Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torch/nn.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorSoumith Chintala <soumith@gmail.com>2017-01-24 20:17:17 +0300
committerGitHub <noreply@github.com>2017-01-24 20:17:17 +0300
commitea7ec8b63dce65a352a773b6538953067dae5ac1 (patch)
treee44eb475d69aa1ef58e8bfdfa12844c9499dcad9 /lib
parent5d3372d68a454054b07c5cb34bc742cc3c4b0abb (diff)
parent896c627805b3c782d5462f36ab475b67f547f752 (diff)
Merge pull request #1109 from pavanky/api
Using accreal instead of real in the API
Diffstat (limited to 'lib')
-rw-r--r--lib/THNN/generic/ELU.c8
-rw-r--r--lib/THNN/generic/HardShrink.c6
-rw-r--r--lib/THNN/generic/HardTanh.c15
-rw-r--r--lib/THNN/generic/LeakyReLU.c6
-rw-r--r--lib/THNN/generic/Linear.c3
-rw-r--r--lib/THNN/generic/LookupTable.c9
-rw-r--r--lib/THNN/generic/MarginCriterion.c10
-rw-r--r--lib/THNN/generic/MultiMarginCriterion.c6
-rw-r--r--lib/THNN/generic/PReLU.c3
-rw-r--r--lib/THNN/generic/RReLU.c18
-rw-r--r--lib/THNN/generic/SoftPlus.c14
-rw-r--r--lib/THNN/generic/SoftShrink.c8
-rw-r--r--lib/THNN/generic/SparseLinear.c18
-rw-r--r--lib/THNN/generic/SpatialConvolutionLocal.c64
-rw-r--r--lib/THNN/generic/SpatialConvolutionMM.c7
-rw-r--r--lib/THNN/generic/SpatialConvolutionMap.c14
-rw-r--r--lib/THNN/generic/SpatialDilatedConvolution.c3
-rw-r--r--lib/THNN/generic/SpatialFullConvolution.c7
-rw-r--r--lib/THNN/generic/SpatialFullConvolutionMap.c14
-rw-r--r--lib/THNN/generic/SpatialSubSampling.c11
-rw-r--r--lib/THNN/generic/Sqrt.c7
-rw-r--r--lib/THNN/generic/THNN.h114
-rw-r--r--lib/THNN/generic/TemporalConvolution.c45
-rw-r--r--lib/THNN/generic/TemporalSubSampling.c7
-rw-r--r--lib/THNN/generic/Threshold.c12
-rw-r--r--lib/THNN/generic/VolumetricConvolution.c3
-rw-r--r--lib/THNN/generic/VolumetricConvolutionMM.c3
-rw-r--r--lib/THNN/generic/VolumetricDilatedConvolution.c3
-rw-r--r--lib/THNN/generic/VolumetricFullConvolution.c3
29 files changed, 254 insertions, 187 deletions
diff --git a/lib/THNN/generic/ELU.c b/lib/THNN/generic/ELU.c
index 784a203..ddcfb97 100644
--- a/lib/THNN/generic/ELU.c
+++ b/lib/THNN/generic/ELU.c
@@ -6,9 +6,10 @@ void THNN_(ELU_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- real alpha,
+ accreal alpha_,
bool inplace)
-{
+{
+ real alpha = TH_CONVERT_ACCREAL_TO_REAL(alpha_);
if(inplace) {
TH_TENSOR_APPLY(real, input,
if(*input_data <= 0) {
@@ -30,9 +31,10 @@ void THNN_(ELU_updateGradInput)(
THTensor *gradOutput,
THTensor *gradInput,
THTensor *output,
- real alpha,
+ accreal alpha_,
bool inplace)
{
+ real alpha = TH_CONVERT_ACCREAL_TO_REAL(alpha_);
THNN_CHECK_NELEMENT(input, gradOutput);
if(inplace) {
TH_TENSOR_APPLY2(real, gradOutput, real, output,
diff --git a/lib/THNN/generic/HardShrink.c b/lib/THNN/generic/HardShrink.c
index 50d272c..aaae85b 100644
--- a/lib/THNN/generic/HardShrink.c
+++ b/lib/THNN/generic/HardShrink.c
@@ -6,8 +6,9 @@ void THNN_(HardShrink_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- real lambda)
+ accreal lambda_)
{
+ real lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_);
THTensor_(resizeAs)(output, input);
TH_TENSOR_APPLY2(real, output, real, input,
@@ -25,8 +26,9 @@ void THNN_(HardShrink_updateGradInput)(
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
- real lambda)
+ accreal lambda_)
{
+ real lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_);
THNN_CHECK_NELEMENT(input, gradOutput);
THTensor_(resizeAs)(gradInput, input);
TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input,
diff --git a/lib/THNN/generic/HardTanh.c b/lib/THNN/generic/HardTanh.c
index 57ef1be..b38a946 100644
--- a/lib/THNN/generic/HardTanh.c
+++ b/lib/THNN/generic/HardTanh.c
@@ -6,15 +6,17 @@ void THNN_(HardTanh_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- real min_val,
- real max_val,
+ accreal min_val_,
+ accreal max_val_,
bool inplace)
{
+ real min_val = TH_CONVERT_ACCREAL_TO_REAL(min_val_);
+ real max_val = TH_CONVERT_ACCREAL_TO_REAL(max_val_);
if (inplace)
THTensor_(set)(output, input);
else
THTensor_(resizeAs)(output, input);
-
+
if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(output))
{
if (inplace)
@@ -68,10 +70,13 @@ void THNN_(HardTanh_updateGradInput)(
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
- real min_val,
- real max_val,
+ accreal min_val_,
+ accreal max_val_,
bool inplace)
{
+ real min_val = TH_CONVERT_ACCREAL_TO_REAL(min_val_);
+ real max_val = TH_CONVERT_ACCREAL_TO_REAL(max_val_);
+
THNN_CHECK_NELEMENT(input, gradOutput);
if (inplace)
THTensor_(set)(gradInput, gradOutput);
diff --git a/lib/THNN/generic/LeakyReLU.c b/lib/THNN/generic/LeakyReLU.c
index a4d9677..074047d 100644
--- a/lib/THNN/generic/LeakyReLU.c
+++ b/lib/THNN/generic/LeakyReLU.c
@@ -6,9 +6,10 @@ void THNN_(LeakyReLU_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- real negval,
+ accreal negval_,
bool inplace)
{
+ real negval = TH_CONVERT_ACCREAL_TO_REAL(negval_);
if (inplace)
{
TH_TENSOR_APPLY(real, input,
@@ -31,9 +32,10 @@ void THNN_(LeakyReLU_updateGradInput)(
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
- real negval,
+ accreal negval_,
bool inplace)
{
+ real negval = TH_CONVERT_ACCREAL_TO_REAL(negval_);
THNN_CHECK_NELEMENT(input, gradOutput);
if (inplace)
{
diff --git a/lib/THNN/generic/Linear.c b/lib/THNN/generic/Linear.c
index 933bc4b..faef421 100644
--- a/lib/THNN/generic/Linear.c
+++ b/lib/THNN/generic/Linear.c
@@ -87,8 +87,9 @@ void THNN_(Linear_accGradParameters)(
THTensor *gradWeight,
THTensor *gradBias,
THTensor *addBuffer,
- real scale)
+ accreal scale_)
{
+ real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
long dim = THTensor_(nDimension)(input);
if (dim == 1) {
THTensor_(addr)(gradWeight,1,gradWeight,scale,gradOutput,input);
diff --git a/lib/THNN/generic/LookupTable.c b/lib/THNN/generic/LookupTable.c
index b460f38..46bc2c3 100644
--- a/lib/THNN/generic/LookupTable.c
+++ b/lib/THNN/generic/LookupTable.c
@@ -32,8 +32,9 @@ void THNN_(LookupTable_accGradParameters)(
THIndexTensor *indices,
bool scaleGradByFreq,
int paddingValue,
- real scale)
+ accreal ascale)
{
+ real scale = TH_CONVERT_ACCREAL_TO_REAL(ascale);
ptrdiff_t i;
THInteger_t *count_data = NULL;
@@ -163,9 +164,11 @@ void THNN_(LookupTable_renorm)(
THNNState *state,
THIndexTensor *idx,
THTensor *weight,
- real maxNorm,
- real normType)
+ accreal maxNorm_,
+ accreal normType_)
{
+ real maxNorm = TH_CONVERT_ACCREAL_TO_REAL(maxNorm_);
+ real normType = TH_CONVERT_ACCREAL_TO_REAL(normType_);
if (!THTensor_(isContiguous)(weight))
THError("weight must be contiguous");
if (!THIndexTensor_(isContiguous)(idx))
diff --git a/lib/THNN/generic/MarginCriterion.c b/lib/THNN/generic/MarginCriterion.c
index 1675860..d6d9b60 100644
--- a/lib/THNN/generic/MarginCriterion.c
+++ b/lib/THNN/generic/MarginCriterion.c
@@ -8,10 +8,11 @@ void THNN_(MarginCriterion_updateOutput)(
THTensor *target,
THTensor *output,
bool sizeAverage,
- real margin)
+ accreal margin_)
{
+ real margin = TH_CONVERT_ACCREAL_TO_REAL(margin_);
THNN_CHECK_NELEMENT(input, target);
- THNN_CHECK_DIM_SIZE(output, 1, 0, 1);
+ THNN_CHECK_DIM_SIZE(output, 1, 0, 1);
real sum = 0;
TH_TENSOR_APPLY2(real, input, real, target,
@@ -31,9 +32,10 @@ void THNN_(MarginCriterion_updateGradInput)(
THTensor *target,
THTensor *gradInput,
bool sizeAverage,
- real margin)
+ accreal margin_)
{
- THNN_CHECK_NELEMENT(input, target);
+ real margin = TH_CONVERT_ACCREAL_TO_REAL(margin_);
+ THNN_CHECK_NELEMENT(input, target);
real norm = (sizeAverage ? 1./((real)THTensor_(nElement)(input)) : 1.);
THTensor_(resizeAs)(gradInput, input);
diff --git a/lib/THNN/generic/MultiMarginCriterion.c b/lib/THNN/generic/MultiMarginCriterion.c
index af83e89..2f8f8ff 100644
--- a/lib/THNN/generic/MultiMarginCriterion.c
+++ b/lib/THNN/generic/MultiMarginCriterion.c
@@ -11,8 +11,9 @@ void THNN_(MultiMarginCriterion_updateOutput)(
bool sizeAverage,
int p,
THTensor *weights,
- real margin)
+ accreal margin_)
{
+ real margin = TH_CONVERT_ACCREAL_TO_REAL(margin_);
real *input_data, *weights_data;
THIndex_t *target_data;
long nframe, dim;
@@ -90,8 +91,9 @@ void THNN_(MultiMarginCriterion_updateGradInput)(
bool sizeAverage,
int p,
THTensor *weights,
- real margin)
+ accreal margin_)
{
+ real margin = TH_CONVERT_ACCREAL_TO_REAL(margin_);
real *input_data;
real *gradInput_data;
THIndex_t *target_data;
diff --git a/lib/THNN/generic/PReLU.c b/lib/THNN/generic/PReLU.c
index 3d2ebfc..174f514 100644
--- a/lib/THNN/generic/PReLU.c
+++ b/lib/THNN/generic/PReLU.c
@@ -165,8 +165,9 @@ void THNN_(PReLU_accGradParameters)(
THTensor *gradWeightBuf,
THTensor *gradWeightBuf2,
THIndex_t nOutputPlane,
- real scale)
+ accreal scale_)
{
+ real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THNN_CHECK_NELEMENT(input, gradOutput);
real *gradWeight_data = THTensor_(data)(gradWeight);
diff --git a/lib/THNN/generic/RReLU.c b/lib/THNN/generic/RReLU.c
index cdb9dca..8fd46d3 100644
--- a/lib/THNN/generic/RReLU.c
+++ b/lib/THNN/generic/RReLU.c
@@ -7,12 +7,14 @@ void THNN_(RReLU_updateOutput)(
THTensor *input,
THTensor *output,
THTensor *noise,
- real lower,
- real upper,
+ accreal lower_,
+ accreal upper_,
bool train,
bool inplace,
THGenerator *generator)
{
+ real lower = TH_CONVERT_ACCREAL_TO_REAL(lower_);
+ real upper = TH_CONVERT_ACCREAL_TO_REAL(upper_);
if (train)
{
// get default random generator
@@ -72,7 +74,7 @@ void THNN_(RReLU_updateOutput)(
*output_data = *input_data * r;
);
}
- }
+ }
}
void THNN_(RReLU_updateGradInput)(
@@ -81,11 +83,13 @@ void THNN_(RReLU_updateGradInput)(
THTensor *gradOutput,
THTensor *gradInput,
THTensor *noise,
- real lower,
- real upper,
+ accreal lower_,
+ accreal upper_,
bool train,
bool inplace)
{
+ real lower = TH_CONVERT_ACCREAL_TO_REAL(lower_);
+ real upper = TH_CONVERT_ACCREAL_TO_REAL(upper_);
THNN_CHECK_NELEMENT(input, gradOutput);
if (train && upper - lower > 1E-6) // e.g. if upper == lower, RReLU behaves like LeakyReLU
{
@@ -99,10 +103,10 @@ void THNN_(RReLU_updateGradInput)(
{
THTensor_(resizeAs)(gradInput, input);
THTensor_(cmul)(gradInput, gradOutput, noise);
- }
+ }
}
else
- {
+ {
// use constant factor for negative input values
const real negSlope = (lower + upper) / 2;
if (inplace)
diff --git a/lib/THNN/generic/SoftPlus.c b/lib/THNN/generic/SoftPlus.c
index 7305238..6491e66 100644
--- a/lib/THNN/generic/SoftPlus.c
+++ b/lib/THNN/generic/SoftPlus.c
@@ -6,9 +6,11 @@ void THNN_(SoftPlus_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- real beta,
- real threshold)
+ accreal beta_,
+ accreal threshold_)
{
+ real beta = TH_CONVERT_ACCREAL_TO_REAL(beta_);
+ real threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_);
THTensor_(resizeAs)(output, input);
// f(x) = 1/beta * log(1 + exp(beta * x))
@@ -23,12 +25,14 @@ void THNN_(SoftPlus_updateGradInput)(
THTensor *gradOutput,
THTensor *gradInput,
THTensor *output,
- real beta,
- real threshold)
+ accreal beta_,
+ accreal threshold_)
{
+ real beta = TH_CONVERT_ACCREAL_TO_REAL(beta_);
+ real threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_);
THNN_CHECK_NELEMENT(input, gradOutput);
THTensor_(resizeAs)(gradInput, output);
-
+
// d/dx[log(1+exp(k*x))/k] = exp(kx) / (exp(kx) + 1)
// SINCE
// y = (1/k)*log(1+exp(k*x)) --> x = (1/k)*log(exp(k*y)-1)
diff --git a/lib/THNN/generic/SoftShrink.c b/lib/THNN/generic/SoftShrink.c
index 28dcce0..e779508 100644
--- a/lib/THNN/generic/SoftShrink.c
+++ b/lib/THNN/generic/SoftShrink.c
@@ -6,10 +6,11 @@ void THNN_(SoftShrink_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- real lambda)
+ accreal lambda_)
{
+ real lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_);
THTensor_(resizeAs)(output, input);
-
+
TH_TENSOR_APPLY2(real, output, real, input,
if ((*input_data) > lambda)
*output_data = *input_data - lambda;
@@ -25,8 +26,9 @@ void THNN_(SoftShrink_updateGradInput)(
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
- real lambda)
+ accreal lambda_)
{
+ real lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_);
THNN_CHECK_NELEMENT(input, gradOutput);
THTensor_(resizeAs)(gradInput, input);
TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input,
diff --git a/lib/THNN/generic/SparseLinear.c b/lib/THNN/generic/SparseLinear.c
index 807280e..0c52541 100644
--- a/lib/THNN/generic/SparseLinear.c
+++ b/lib/THNN/generic/SparseLinear.c
@@ -167,9 +167,11 @@ void THNN_(SparseLinear_accGradParameters)(
THTensor *gradBias,
THTensor *weight,
THTensor *bias,
- real weightDecay,
- real scale)
+ accreal weightDecay_,
+ accreal scale_)
{
+ real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_);
+ real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
long h, i, col, hp0, hp1;
long outDim = THTensor_(size)(weight, 0);
long inDim = THTensor_(size)(weight, 1);
@@ -243,9 +245,11 @@ void THNN_(SparseLinear_legacyAccGradParameters)(
THTensor *gradBias,
THTensor *weight,
THTensor *bias,
- real weightDecay,
- real scale)
+ accreal weightDecay_,
+ accreal scale_)
{
+ real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_);
+ real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
long h, i;
long outDim = THTensor_(size)(weight, 0);
long inDim = THTensor_(size)(weight, 1);
@@ -308,8 +312,9 @@ void THNN_(SparseLinear_updateParameters)(
THTensor *gradWeight,
THTensor *gradBias,
THTensor *lastInput,
- real learningRate)
+ accreal learningRate_)
{
+ real learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_);
long h, i;
long outDim = weight->size[0];
long inDim = weight->size[1];
@@ -381,8 +386,9 @@ void THNN_(SparseLinear_legacyUpdateParameters)(
THTensor *gradWeight,
THTensor *gradBias,
THTensor *lastInput,
- real learningRate)
+ accreal learningRate_)
{
+ real learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_);
long h, i;
long outDim = weight->size[0];
long inDim = weight->size[1];
diff --git a/lib/THNN/generic/SpatialConvolutionLocal.c b/lib/THNN/generic/SpatialConvolutionLocal.c
index efba30e..06b57f3 100644
--- a/lib/THNN/generic/SpatialConvolutionLocal.c
+++ b/lib/THNN/generic/SpatialConvolutionLocal.c
@@ -4,8 +4,8 @@
static inline void THNN_(SpatialConvolutionLocal_shapeCheck)(
THTensor *input, THTensor *gradOutput,
- THTensor *weight, THTensor *bias,
- int kH, int kW, int dH,
+ THTensor *weight, THTensor *bias,
+ int kH, int kW, int dH,
int dW, int padH, int padW,
long inputHeight, long inputWidth,
long outputHeight, long outputWidth) {
@@ -39,7 +39,7 @@ static inline void THNN_(SpatialConvolutionLocal_shapeCheck)(
}
THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane);
-
+
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight);
@@ -56,8 +56,8 @@ static int THNN_(view_weight_local)(THTensor **_weight)
long s1 = weight->size[0] * weight->size[1];
long s2 = weight->size[2];
long s3 = weight->size[3] * weight->size[4] * weight->size[5];
- *_weight = THTensor_(newWithStorage3d)(weight->storage,
- weight->storageOffset,
+ *_weight = THTensor_(newWithStorage3d)(weight->storage,
+ weight->storageOffset,
s1, -1, s2, -1, s3, -1);
return 1;
}
@@ -75,8 +75,8 @@ static void THNN_(SpatialConvolutionLocal_updateOutput_frame)
long i;
THTensor *output3d, *finput3d;
- THNN_(unfolded_copy)(finput, input, kW, kH, dW, dH, padW, padH,
- nInputPlane, inputWidth, inputHeight,
+ THNN_(unfolded_copy)(finput, input, kW, kH, dW, dH, padW, padH,
+ nInputPlane, inputWidth, inputHeight,
outputWidth, outputHeight);
THTensor_(copy)(output, bias);
@@ -86,7 +86,7 @@ static void THNN_(SpatialConvolutionLocal_updateOutput_frame)
outputHeight * outputWidth, 1,
nOutputPlane, outputHeight * outputWidth,
1, nOutputPlane * outputHeight * outputWidth);
-
+
finput3d = THTensor_(newWithStorage3d)
(finput->storage, finput->storageOffset,
outputHeight * outputWidth, 1,
@@ -94,10 +94,10 @@ static void THNN_(SpatialConvolutionLocal_updateOutput_frame)
1, kW * kH * nInputPlane * outputHeight * outputWidth);
// weight: oH*oW x nOutputPlane x nInputPlane*kH*kW
- // finput3d: oH*oW x nInputPlane*kH*kW x 1
+ // finput3d: oH*oW x nInputPlane*kH*kW x 1
THTensor_(baddbmm)(output3d, 1.0, output3d, 1.0, weight, finput3d);
// output3d: oH*oW x nOutputPlane x 1
-
+
THTensor_(free)(output3d);
THTensor_(free)(finput3d);
}
@@ -120,10 +120,10 @@ void THNN_(SpatialConvolutionLocal_updateOutput)(
THNN_(SpatialConvolutionLocal_shapeCheck)
(input, NULL, weight, bias, kH, kW, dH, dW, padH, padW,
- inputHeight, inputWidth, outputHeight, outputWidth);
+ inputHeight, inputWidth, outputHeight, outputWidth);
input = THTensor_(newContiguous)(input);
-
+
long nInputPlane = THTensor_(size)(weight, 2)/ (kW * kH);
long nOutputPlane = THTensor_(size)(weight, 1);
@@ -174,7 +174,7 @@ void THNN_(SpatialConvolutionLocal_updateOutput)(
static void THNN_(SpatialConvolutionLocal_updateGradInput_frame)
(THTensor *gradInput, THTensor *gradOutput,
THTensor *weight, THTensor *fgradInput,
- int kW, int kH, int dW, int dH, int padW, int padH,
+ int kW, int kH, int dW, int dH, int padW, int padH,
long nInputPlane, long inputWidth, long inputHeight,
long nOutputPlane, long outputWidth, long outputHeight)
{
@@ -188,17 +188,17 @@ static void THNN_(SpatialConvolutionLocal_updateGradInput_frame)
kW*kH*nInputPlane, outputHeight*outputWidth,
1, kW*kH*nInputPlane*outputHeight*outputWidth);
// weight: oH*oW x nInputPlane*kH*kW x nOutputPlane
- // gradOutput3d: oH*oW x nOutputPlane x 1
+ // gradOutput3d: oH*oW x nOutputPlane x 1
THTensor_(baddbmm)(fgradInput3d, 0.0, fgradInput3d, 1.0, weight, gradOutput3d);
- // fgradInput3d: oH*oW x nInputPlane*kH*kW x 1
-
+ // fgradInput3d: oH*oW x nInputPlane*kH*kW x 1
+
THTensor_(free)(gradOutput3d);
THTensor_(free)(fgradInput3d);
-
+
THTensor_(zero)(gradInput);
-
- THNN_(unfolded_acc)(fgradInput, gradInput, kW, kH, dW, dH, padW, padH,
- nInputPlane, inputWidth, inputHeight,
+
+ THNN_(unfolded_acc)(fgradInput, gradInput, kW, kH, dW, dH, padW, padH,
+ nInputPlane, inputWidth, inputHeight,
outputWidth, outputHeight);
}
@@ -235,8 +235,8 @@ void THNN_(SpatialConvolutionLocal_updateGradInput)(
if(input->nDimension == 3)
{
THNN_(SpatialConvolutionLocal_updateGradInput_frame)
- (gradInput, gradOutput, weight,
- fgradInput, kW, kH, dW, dH, padW, padH,
+ (gradInput, gradOutput, weight,
+ fgradInput, kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
}
@@ -253,8 +253,8 @@ void THNN_(SpatialConvolutionLocal_updateGradInput)(
THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t);
THNN_(SpatialConvolutionLocal_updateGradInput_frame)
- (gradInput_t, gradOutput_t, weight, fgradInput_t,
- kW, kH, dW, dH, padW, padH,
+ (gradInput_t, gradOutput_t, weight, fgradInput_t,
+ kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
@@ -275,12 +275,12 @@ void THNN_(SpatialConvolutionLocal_updateGradInput)(
static void THNN_(SpatialConvolutionLocal_accGradParameters_frame)
(THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias,
- THTensor *finput, real scale,
- int kW, int kH, int dW, int dH, int padW, int padH,
+ THTensor *finput, real scale,
+ int kW, int kH, int dW, int dH, int padW, int padH,
long nInputPlane, long inputWidth, long inputHeight,
long nOutputPlane, long outputWidth, long outputHeight)
{
-
+
THTensor *gradOutput3d, *finput3d;
gradOutput3d = THTensor_(newWithStorage3d)(gradOutput->storage, gradOutput->storageOffset,
outputHeight*outputWidth, 1,
@@ -290,7 +290,7 @@ static void THNN_(SpatialConvolutionLocal_accGradParameters_frame)
outputHeight*outputWidth, 1,
1, kW*kH*nInputPlane*outputHeight*outputWidth,
kW*kH*nInputPlane, outputHeight*outputWidth);
- // gradOutput3d: oH*oW x nOutputPlane x 1
+ // gradOutput3d: oH*oW x nOutputPlane x 1
// finput3d: oH*oW x 1 x kW*kH*nInputPlane
THTensor_(baddbmm)(gradWeight, 1.0, gradWeight, scale, gradOutput3d, finput3d);
// gradWeight: oH*oW x nOutputPlane x kW*kH*nInputPlane
@@ -314,9 +314,9 @@ void THNN_(SpatialConvolutionLocal_accGradParameters)(
int padW, int padH,
long inputWidth, long inputHeight,
long outputWidth, long outputHeight,
- real scale)
+ accreal scale_)
{
-
+ real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
int freeWeight = THNN_(view_weight_local)(&gradWeight);
THNN_(SpatialConvolutionLocal_shapeCheck)
@@ -332,7 +332,7 @@ void THNN_(SpatialConvolutionLocal_accGradParameters)(
if(input->nDimension == 3)
{
THNN_(SpatialConvolutionLocal_accGradParameters_frame)
- (gradOutput, gradWeight, gradBias, finput, scale,
+ (gradOutput, gradWeight, gradBias, finput, scale,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
@@ -348,7 +348,7 @@ void THNN_(SpatialConvolutionLocal_accGradParameters)(
THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
THNN_(SpatialConvolutionLocal_accGradParameters_frame)
- (gradOutput_t, gradWeight, gradBias, finput_t, scale,
+ (gradOutput_t, gradWeight, gradBias, finput_t, scale,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
diff --git a/lib/THNN/generic/SpatialConvolutionMM.c b/lib/THNN/generic/SpatialConvolutionMM.c
index 83635c1..c9c22bc 100644
--- a/lib/THNN/generic/SpatialConvolutionMM.c
+++ b/lib/THNN/generic/SpatialConvolutionMM.c
@@ -4,7 +4,7 @@
static inline void THNN_(SpatialConvolutionMM_shapeCheck)(
THTensor *input, THTensor *gradOutput,
- THTensor *weight, THTensor *bias,
+ THTensor *weight, THTensor *bias,
int kH, int kW, int dH, int dW, int padH, int padW) {
THArgCheck(kW > 0 && kH > 0, 9,
@@ -45,7 +45,7 @@ static inline void THNN_(SpatialConvolutionMM_shapeCheck)(
nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth);
THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane);
-
+
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight);
@@ -336,8 +336,9 @@ void THNN_(SpatialConvolutionMM_accGradParameters)(
int dH,
int padW,
int padH,
- real scale)
+ accreal scale_)
{
+ real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
int freeWeight = 0;
if (gradWeight->nDimension == 4) {
diff --git a/lib/THNN/generic/SpatialConvolutionMap.c b/lib/THNN/generic/SpatialConvolutionMap.c
index 82886c2..750b212 100644
--- a/lib/THNN/generic/SpatialConvolutionMap.c
+++ b/lib/THNN/generic/SpatialConvolutionMap.c
@@ -175,10 +175,18 @@ void THNN_(SpatialConvolutionMap_updateGradInput)(
}
void THNN_(SpatialConvolutionMap_accGradParameters)(
- THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias,
- THTensor *connTable, int nInputPlane, int nOutputPlane,
- int dW, int dH, real scale)
+ THNNState *state,
+ THTensor *input,
+ THTensor *gradOutput,
+ THTensor *gradWeight,
+ THTensor *gradBias,
+ THTensor *connTable,
+ int nInputPlane,
+ int nOutputPlane,
+ int dW, int dH,
+ accreal scale_)
{
+ real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THArgCheck(
gradWeight != NULL && gradWeight->nDimension == 3
&& connTable != NULL && connTable->size[0] == gradWeight->size[0], 5,
diff --git a/lib/THNN/generic/SpatialDilatedConvolution.c b/lib/THNN/generic/SpatialDilatedConvolution.c
index 8b18910..d345f7a 100644
--- a/lib/THNN/generic/SpatialDilatedConvolution.c
+++ b/lib/THNN/generic/SpatialDilatedConvolution.c
@@ -289,8 +289,9 @@ void THNN_(SpatialDilatedConvolution_accGradParameters)(
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH,
- real scale)
+ accreal scale_)
{
+ real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THNN_(SpatialDilatedConvolution_shapeCheck)
(input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW,
dilationH, dilationW);
diff --git a/lib/THNN/generic/SpatialFullConvolution.c b/lib/THNN/generic/SpatialFullConvolution.c
index 4adcca6..e2a835d 100644
--- a/lib/THNN/generic/SpatialFullConvolution.c
+++ b/lib/THNN/generic/SpatialFullConvolution.c
@@ -59,7 +59,7 @@ static void THNN_(col2im)(const real* data_col, const int channels,
static inline void THNN_(SpatialFullConvolution_shapeCheck)(
THTensor *input, THTensor *gradOutput,
- THTensor *weight, THTensor *bias,
+ THTensor *weight, THTensor *bias,
int kH, int kW, int dH, int dW, int padH, int padW, int adjH, int adjW) {
THArgCheck(kW > 0 && kH > 0, 9,
@@ -103,7 +103,7 @@ static inline void THNN_(SpatialFullConvolution_shapeCheck)(
nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth);
THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane);
-
+
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight);
@@ -342,8 +342,9 @@ void THNN_(SpatialFullConvolution_accGradParameters)(
int dW, int dH,
int padW, int padH,
int adjW, int adjH,
- real scale)
+ accreal scale_)
{
+ real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THNN_(SpatialFullConvolution_shapeCheck)
(input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW, adjH, adjW);
diff --git a/lib/THNN/generic/SpatialFullConvolutionMap.c b/lib/THNN/generic/SpatialFullConvolutionMap.c
index 1bd3455..e98dea0 100644
--- a/lib/THNN/generic/SpatialFullConvolutionMap.c
+++ b/lib/THNN/generic/SpatialFullConvolutionMap.c
@@ -147,10 +147,18 @@ void THNN_(SpatialFullConvolutionMap_updateGradInput)(
}
void THNN_(SpatialFullConvolutionMap_accGradParameters)(
- THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias,
- THTensor *connTable, int nInputPlane, int nOutputPlane,
- int dW, int dH, real scale)
+ THNNState *state,
+ THTensor *input,
+ THTensor *gradOutput,
+ THTensor *gradWeight,
+ THTensor *gradBias,
+ THTensor *connTable,
+ int nInputPlane,
+ int nOutputPlane,
+ int dW, int dH,
+ accreal scale_)
{
+ real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THArgCheck(
gradWeight != NULL && gradWeight->nDimension == 3
&& connTable != NULL && connTable->size[0] == gradWeight->size[0], 5,
diff --git a/lib/THNN/generic/SpatialSubSampling.c b/lib/THNN/generic/SpatialSubSampling.c
index 3674f2c..3f01540 100644
--- a/lib/THNN/generic/SpatialSubSampling.c
+++ b/lib/THNN/generic/SpatialSubSampling.c
@@ -40,7 +40,7 @@ void THNN_(SpatialSubSampling_updateOutput)(
int kW, int kH,
int dW, int dH)
{
-
+
real *weight_data = THTensor_(data)(weight);
real *bias_data = THTensor_(data)(bias);
real *output_data;
@@ -76,11 +76,11 @@ void THNN_(SpatialSubSampling_updateOutput)(
THTensor_(resize3d)(output, nInputPlane, outputHeight, outputWidth);
else
THTensor_(resize4d)(output, input->size[0], nInputPlane, outputHeight, outputWidth);
-
+
input = THTensor_(newContiguous)(input);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
-
+
#pragma omp parallel for private(k)
for(k = 0; k < nInputPlane; k++)
{
@@ -97,7 +97,7 @@ void THNN_(SpatialSubSampling_updateOutput)(
long i;
for(i = 0; i < outputWidth*outputHeight; i++)
ptr_output[i] = z;
-
+
for(yy = 0; yy < outputHeight; yy++)
{
for(xx = 0; xx < outputWidth; xx++)
@@ -214,8 +214,9 @@ void THNN_(SpatialSubSampling_accGradParameters)(
THTensor *gradBias,
int kW, int kH,
int dW, int dH,
- real scale)
+ accreal scale_)
{
+ real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THNN_(SpatialSubSampling_shapeCheck)(input, gradOutput, gradWeight, kW, kH);
long nbatch = 1;
diff --git a/lib/THNN/generic/Sqrt.c b/lib/THNN/generic/Sqrt.c
index 24cd51a..174884e 100644
--- a/lib/THNN/generic/Sqrt.c
+++ b/lib/THNN/generic/Sqrt.c
@@ -6,8 +6,9 @@ void THNN_(Sqrt_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- real eps)
+ accreal eps_)
{
+ real eps = TH_CONVERT_ACCREAL_TO_REAL(eps_);
THTensor_(resizeAs)(output, input);
THTensor_(sqrt)(output, input);
}
@@ -22,8 +23,8 @@ void THNN_(Sqrt_updateGradInput)(
THNN_CHECK_SHAPE(output, gradOutput);
THTensor_(resizeAs)(gradInput, input);
- if (output->nDimension == 1 ||
- !THTensor_(isContiguous)(output) ||
+ if (output->nDimension == 1 ||
+ !THTensor_(isContiguous)(output) ||
!THTensor_(isContiguous)(gradOutput) ||
!THTensor_(isContiguous)(gradInput))
{
diff --git a/lib/THNN/generic/THNN.h b/lib/THNN/generic/THNN.h
index 447289b..d4b7a51 100644
--- a/lib/THNN/generic/THNN.h
+++ b/lib/THNN/generic/THNN.h
@@ -78,7 +78,7 @@ TH_API void THNN_(ELU_updateOutput)(
THNNState *state, // library's state
THTensor *input, // input tensor
THTensor *output, // [OUT] ELU output
- real alpha, // an ELU parameter (as in paper)
+ accreal alpha, // an ELU parameter (as in paper)
bool inplace); // if true, modifies gradOutput and sets gradInput onto it (no additional memory is allocated)
TH_API void THNN_(ELU_updateGradInput)(
THNNState *state, // library's state
@@ -86,7 +86,7 @@ TH_API void THNN_(ELU_updateGradInput)(
THTensor *gradOutput, // gradient w.r.t. output
THTensor *gradInput, // [OUT] gradient w.r.t. input
THTensor *output, // output from a forward pass
- real alpha, // an ELU parameter (as in paper)
+ accreal alpha, // an ELU parameter (as in paper)
bool inplace); // if true, modifies gradOutput and sets gradInput onto it (no additional memory is allocated)
TH_API void THNN_(DistKLDivCriterion_updateOutput)(
@@ -119,30 +119,30 @@ TH_API void THNN_(HardShrink_updateOutput)(
THNNState *state, // library's state
THTensor *input, // input tensor
THTensor *output, // [OUT] output tensor
- real lambda); // HardShrink parameter
+ accreal lambda); // HardShrink parameter
TH_API void THNN_(HardShrink_updateGradInput)(
THNNState *state, // library's state
THTensor *input, // input tensor
THTensor *gradOutput, // gradient w.r.t. module's output
THTensor *gradInput, // [OUT] gradient w.r.t. input
- real lambda); // HardShrink parameter
+ accreal lambda); // HardShrink parameter
// HardTanh clamps the values to the interval [min_val; max_val].
TH_API void THNN_(HardTanh_updateOutput)(
THNNState *state, // library's state
THTensor *input, // input tensor
THTensor *output, // [OUT] output tensor
- real min_val, // lower threshold
- real max_val,
- bool inplace); // upper threshold
+ accreal min_val, // lower threshold
+ accreal max_val, // upper threshold
+ bool inplace);
TH_API void THNN_(HardTanh_updateGradInput)(
THNNState *state, // library's state
THTensor *input, // input tensor
THTensor *gradOutput, // gradient w.r.t. module's output
THTensor *gradInput, // [OUT] gradient w.r.t. the input
- real min_val, // lower threshold
- real max_val,
- bool inplace); // upper threshold
+ accreal min_val, // lower threshold
+ accreal max_val, // upper threshold
+ bool inplace);
TH_API void THNN_(L1Cost_updateOutput)(
THNNState *state, // library's state
@@ -158,14 +158,14 @@ TH_API void THNN_(LeakyReLU_updateOutput)(
THNNState *state, // library's state
THTensor *input, // [MODIFIED] input tensor
THTensor *output, // [OUT] output tensor
- real negval, // negative part slope
+ accreal negval, // negative part slope
bool inplace); // if true, modifies the input tensor and sets the output tensor on it (no additional memory is allocated)
TH_API void THNN_(LeakyReLU_updateGradInput)(
THNNState *state, // library's state
THTensor *input, // input tensor
THTensor *gradOutput, // [MODIFIED] gradient w.r.t. module's output
THTensor *gradInput, // [OUT] gradient w.r.t. the input
- real negval, // negative part slope
+ accreal negval, // negative part slope
bool inplace); // if true, modifies gradOutput and sets gradInput onto it (no additional memory is allocated)
TH_API void THNN_(LogSigmoid_updateOutput)(
@@ -201,14 +201,14 @@ TH_API void THNN_(LookupTable_accGradParameters)(
THIndexTensor *indices, // [OPTIONAL]
bool scaleGradByFreq,
int paddingValue,
- real scale);
+ accreal scale);
TH_API void THNN_(LookupTable_renorm)(
THNNState *state, // library's state
- THIndexTensor *idx, // vector that contains row indices (modified in function)
+ THIndexTensor *idx, // vector containing row indices (modified in function)
THTensor *weight, // 2D tensor whose rows will be renormalized
- real maxNorm, // maximum norm
- real normType); // the norm type (e.g., normType=2, then it's 2-norm)
+ accreal maxNorm, // maximum norm
+ accreal normType); // the norm type (e.g., normType=2, then it's 2-norm)
TH_API void THNN_(MarginCriterion_updateOutput)(
THNNState *state, // library's state
@@ -216,14 +216,15 @@ TH_API void THNN_(MarginCriterion_updateOutput)(
THTensor *target, // target tensor (should contain only 1s and -1s)
THTensor *output, // [OUT] a one-element tensor containing the loss
bool sizeAverage, // if true, the loss is normalized by **total number of elements**
- real margin); // a margin that is required for the loss to be 0
+ accreal margin); // a margin that is required for the loss to be 0
+
TH_API void THNN_(MarginCriterion_updateGradInput)(
THNNState *state, // library's state
THTensor *input, // input tensor
THTensor *target, // target tensor (should contin only 1s and -1s)
THTensor *gradInput, // [OUT] gradient w.r.t. module's input
bool sizeAverage, // if true, the gradient is normalized by **total number of elements**
- real margin); // a margin that is required for the loss to be 0
+ accreal margin); // a margin that is required for the loss to be 0
TH_API void THNN_(SoftMarginCriterion_updateOutput)(
THNNState *state,
@@ -275,7 +276,7 @@ TH_API void THNN_(MultiMarginCriterion_updateOutput)(
bool sizeAverage,
int p,
THTensor* weights, // [OPTIONAL]
- real margin);
+ accreal margin);
TH_API void THNN_(MultiMarginCriterion_updateGradInput)(
THNNState *state,
THTensor *input,
@@ -284,7 +285,7 @@ TH_API void THNN_(MultiMarginCriterion_updateGradInput)(
bool sizeAverage,
int p,
THTensor *weights, // [OPTIONAL]
- real margin);
+ accreal margin);
TH_API void THNN_(PReLU_updateOutput)(
THNNState *state,
@@ -309,7 +310,7 @@ TH_API void THNN_(PReLU_accGradParameters)(
THTensor *gradWeightBuf,
THTensor *gradWeightBuf2,
THIndex_t nOutputPlane,
- real scale);
+ accreal scale);
TH_API void THNN_(Linear_updateOutput)(
THNNState *state,
@@ -334,15 +335,15 @@ TH_API void THNN_(Linear_accGradParameters)(
THTensor *gradWeight,
THTensor *gradBias,
THTensor *addBuffer,
- real scale);
+ accreal scale);
TH_API void THNN_(RReLU_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THTensor *noise,
- real lower,
- real upper,
+ accreal lower,
+ accreal upper,
bool train,
bool inplace,
THGenerator *generator);
@@ -352,8 +353,8 @@ TH_API void THNN_(RReLU_updateGradInput)(
THTensor *gradOutput,
THTensor *gradInput,
THTensor *noise,
- real lower,
- real upper,
+ accreal lower,
+ accreal upper,
bool train,
bool inplace);
@@ -396,28 +397,28 @@ TH_API void THNN_(SoftPlus_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- real beta,
- real threshold);
+ accreal beta,
+ accreal threshold);
TH_API void THNN_(SoftPlus_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *output,
- real beta,
- real threshold);
+ accreal beta,
+ accreal threshold);
TH_API void THNN_(SoftShrink_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- real lambda);
+ accreal lambda);
TH_API void THNN_(SoftShrink_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
- real lambda);
+ accreal lambda);
TH_API void THNN_(SparseLinear_updateOutput)(
THNNState *state,
@@ -433,8 +434,8 @@ TH_API void THNN_(SparseLinear_accGradParameters)(
THTensor *gradBias,
THTensor *weight,
THTensor *bias,
- real weightDecay,
- real scale);
+ accreal weightDecay,
+ accreal scale);
TH_API void THNN_(SparseLinear_zeroGradParameters)(
THNNState *state,
THTensor *gradWeight,
@@ -447,7 +448,7 @@ TH_API void THNN_(SparseLinear_updateParameters)(
THTensor *gradWeight,
THTensor *gradBias,
THTensor *lastInput,
- real learningRate);
+ accreal learningRate);
TH_API void THNN_(SparseLinear_legacyUpdateOutput)(
THNNState *state,
THTensor *input,
@@ -462,8 +463,8 @@ TH_API void THNN_(SparseLinear_legacyAccGradParameters)(
THTensor *gradBias,
THTensor *weight,
THTensor *bias,
- real weightDecay,
- real scale);
+ accreal weightDecay,
+ accreal scale);
TH_API void THNN_(SparseLinear_legacyZeroGradParameters)(
THNNState *state,
THTensor *gradWeight,
@@ -476,13 +477,13 @@ TH_API void THNN_(SparseLinear_legacyUpdateParameters)(
THTensor *gradWeight,
THTensor *gradBias,
THTensor *lastInput,
- real learningRate);
+ accreal learningRate);
TH_API void THNN_(Sqrt_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- real eps);
+ accreal eps);
TH_API void THNN_(Sqrt_updateGradInput)(
THNNState *state,
THTensor *input,
@@ -515,16 +516,16 @@ TH_API void THNN_(Threshold_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- real threshold,
- real val,
+ accreal threshold,
+ accreal val,
bool inplace);
TH_API void THNN_(Threshold_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
- real threshold,
- real val,
+ accreal threshold,
+ accreal val,
bool inplace);
TH_API void THNN_(TemporalConvolution_updateOutput)(
@@ -550,7 +551,7 @@ TH_API void THNN_(TemporalConvolution_accGradParameters)(
THTensor *gradWeight,
THTensor *gradBias,
int kW, int dW,
- real scale);
+ accreal scale);
TH_API void THNN_(TemporalMaxPooling_updateOutput)(
THNNState *state,
THTensor *input,
@@ -586,7 +587,7 @@ TH_API void THNN_(TemporalSubSampling_accGradParameters)(
THTensor *gradWeight,
THTensor *gradBias,
int kW, int dW,
- real scale);
+ accreal scale);
TH_API void THNN_(BatchNormalization_updateOutput)(
THNNState *state,
@@ -648,7 +649,7 @@ TH_API void THNN_(SpatialConvolutionMap_accGradParameters)(
int nInputPlane, // number of input planes
int nOutputPlane, // number of output planes
int dW, int dH, // stride
- real scale); // scaling factor
+ accreal scale); // scaling factor
TH_API void THNN_(SpatialConvolutionMM_updateOutput)(
THNNState *state,
@@ -683,7 +684,7 @@ TH_API void THNN_(SpatialConvolutionMM_accGradParameters)(
int kW, int kH,
int dW, int dH,
int padW, int padH,
- real scale);
+ accreal scale);
TH_API void THNN_(SpatialConvolutionLocal_updateOutput)(
THNNState *state,
@@ -724,7 +725,7 @@ TH_API void THNN_(SpatialConvolutionLocal_accGradParameters)(
int padW, int padH,
long inputWidth, long inputHeight,
long outputWidth, long outputHeight,
- real scale);
+ accreal scale);
TH_API void THNN_(SpatialAdaptiveMaxPooling_updateOutput)(
THNNState *state,
@@ -811,7 +812,7 @@ TH_API void THNN_(SpatialFullConvolution_accGradParameters)(
int dW, int dH,
int padW, int padH,
int adjW, int adjH,
- real scale);
+ accreal scale);
TH_API void THNN_(SpatialFullConvolutionMap_updateOutput)(
THNNState *state, // library state
@@ -844,7 +845,7 @@ TH_API void THNN_(SpatialFullConvolutionMap_accGradParameters)(
int nInputPlane, // number of input planes
int nOutputPlane, // number of output planes
int dW, int dH, // stride
- real scale); // scaling factor
+ accreal scale); // scaling factor
TH_API void THNN_(SpatialDilatedConvolution_updateOutput)(
THNNState *state,
@@ -883,7 +884,7 @@ TH_API void THNN_(SpatialDilatedConvolution_accGradParameters)(
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH,
- real scale);
+ accreal scale);
TH_API void THNN_(SpatialMaxPooling_updateOutput)(
THNNState *state,
@@ -965,7 +966,7 @@ TH_API void THNN_(SpatialSubSampling_accGradParameters)(
THTensor *gradBias,
int kW, int kH,
int dW, int dH,
- real scale);
+ accreal scale);
TH_API void THNN_(SpatialUpSamplingNearest_updateOutput)(
THNNState *state,
@@ -1058,7 +1059,7 @@ TH_API void THNN_(VolumetricConvolution_accGradParameters)(
THTensor *fgradInput,
int dT, int dW, int dH,
int pT, int pW, int pH,
- real scale);
+ accreal scale);
TH_API void THNN_(VolumetricConvolutionMM_updateOutput)(
THNNState *state,
@@ -1091,7 +1092,7 @@ TH_API void THNN_(VolumetricConvolutionMM_accGradParameters)(
int kT, int kW, int kH,
int dT, int dW, int dH,
int pT, int pW, int pH,
- real scale);
+ accreal scale);
TH_API void THNN_(VolumetricFullConvolution_updateOutput)(
THNNState *state, // library state
@@ -1126,7 +1127,7 @@ TH_API void THNN_(VolumetricFullConvolution_accGradParameters)(
int dT, int dW, int dH, // stride
int pT, int pW, int pH, // padding
int aT, int aW, int aH, // extra output adjustment
- real scale); // scaling factor
+ accreal scale); // scaling factor
TH_API void THNN_(VolumetricDilatedConvolution_updateOutput)(
THNNState *state,
@@ -1165,7 +1166,7 @@ TH_API void THNN_(VolumetricDilatedConvolution_accGradParameters)(
int dT, int dW, int dH,
int padT, int padW, int padH,
int dilationT, int dilationW, int dilationH,
- real scale);
+ accreal scale);
TH_API void THNN_(VolumetricMaxPooling_updateOutput)(
THNNState *state,
@@ -1273,5 +1274,4 @@ TH_API void THNN_(VolumetricReplicationPadding_updateGradInput)(
int pleft, int pright,
int ptop, int pbottom,
int pfront, int pback);
-
#endif
diff --git a/lib/THNN/generic/TemporalConvolution.c b/lib/THNN/generic/TemporalConvolution.c
index 14297ad..a107da2 100644
--- a/lib/THNN/generic/TemporalConvolution.c
+++ b/lib/THNN/generic/TemporalConvolution.c
@@ -48,11 +48,11 @@ void THNN_(TemporalConvolution_updateOutput)(
THTensor *outputWindow, *inputWindow;
int nInputFrame, nOutputFrame;
long k, i;
-
+
int dimS = 0; // sequence dimension
int dimF = 1; // feature dimension
-
- if (input->nDimension == 3)
+
+ if (input->nDimension == 3)
{
dimS = 1;
dimF = 2;
@@ -93,7 +93,7 @@ void THNN_(TemporalConvolution_updateOutput)(
nFrame, inputFrameStride*input->size[1],
kW*input->size[1], 1);
- THTensor_(setStorage2d)(outputWindow, output->storage,
+ THTensor_(setStorage2d)(outputWindow, output->storage,
output->storageOffset + k*output->size[1],
nFrame, outputFrameStride*output->size[1],
output->size[1], 1);
@@ -108,18 +108,18 @@ void THNN_(TemporalConvolution_updateOutput)(
THTensor *outputSample = THTensor_(new)();
THTensor *inputSample = THTensor_(new)();
int nBatchFrame = input->size[0];
-
+
THTensor_(resize3d)(output,
nBatchFrame,
nOutputFrame,
outputFrameSize);
-
+
for(i = 0; i < nBatchFrame; i++)
{
THTensor_(select)(outputSample, output, 0, i);
THTensor_(select)(inputSample, input, 0, i);
long nOutputSampleFrame = nOutputFrame;
-
+
/* bias first */
for(k = 0; k < nOutputFrame; k++)
{
@@ -140,7 +140,7 @@ void THNN_(TemporalConvolution_updateOutput)(
nFrame, inputFrameStride*inputSample->size[1],
kW*inputSample->size[1], 1);
- THTensor_(setStorage2d)(outputWindow, outputSample->storage,
+ THTensor_(setStorage2d)(outputWindow, outputSample->storage,
outputSample->storageOffset + k*outputSample->size[1],
nFrame, outputFrameStride*outputSample->size[1],
outputSample->size[1], 1);
@@ -175,11 +175,11 @@ void THNN_(TemporalConvolution_updateGradInput)(
THTensor *gradOutputWindow;
THTensor *gradInputWindow;
long k, i;
-
+
int dimS = 0; // sequence dimension
int dimF = 1; // feature dimension
-
- if (gradOutput->nDimension == 3)
+
+ if (gradOutput->nDimension == 3)
{
dimS = 1;
dimF = 2;
@@ -227,13 +227,13 @@ void THNN_(TemporalConvolution_updateGradInput)(
THTensor *gradOutputSample = THTensor_(new)();
THTensor *gradInputSample = THTensor_(new)();
int nBatchFrame = input->size[0];
-
+
for(i = 0; i < nBatchFrame; i++)
{
THTensor_(select)(gradOutputSample, gradOutput, 0, i);
THTensor_(select)(gradInputSample, gradInput, 0, i);
int nOutputSampleFrame = nOutputFrame;
-
+
/* ouch */
for(k = 0; nOutputSampleFrame > 0; k++)
{
@@ -274,19 +274,20 @@ void THNN_(TemporalConvolution_accGradParameters)(
THTensor *gradBias,
int kW,
int dW,
- real scale)
+ accreal scale_)
{
+ real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
long nInputFrame;
long nOutputFrame;
THTensor *gradOutputWindow;
THTensor *inputWindow;
long k, i;
-
+
int dimS = 0; // sequence dimension
int dimF = 1; // feature dimension
-
- if (gradOutput->nDimension == 3)
+
+ if (gradOutput->nDimension == 3)
{
dimS = 1;
dimF = 2;
@@ -301,7 +302,7 @@ void THNN_(TemporalConvolution_accGradParameters)(
gradOutput = THTensor_(newContiguous)(gradOutput);
gradOutputWindow = THTensor_(new)();
inputWindow = THTensor_(new)();
-
+
if (input->nDimension == 2)
{
/* bias first */
@@ -324,7 +325,7 @@ void THNN_(TemporalConvolution_accGradParameters)(
nFrame, inputFrameStride*input->size[1],
kW*input->size[1], 1);
- THTensor_(setStorage2d)(gradOutputWindow, gradOutput->storage,
+ THTensor_(setStorage2d)(gradOutputWindow, gradOutput->storage,
gradOutput->storageOffset + k*gradOutput->size[1],
nFrame, outputFrameStride*gradOutput->size[1],
gradOutput->size[1], 1);
@@ -339,13 +340,13 @@ void THNN_(TemporalConvolution_accGradParameters)(
THTensor *gradOutputSample = THTensor_(new)();
THTensor *inputSample = THTensor_(new)();
int nBatchFrame = input->size[0];
-
+
for(i = 0; i < nBatchFrame; i++)
{
THTensor_(select)(gradOutputSample, gradOutput, 0, i);
THTensor_(select)(inputSample, input, 0, i);
int nOutputSampleFrame = nOutputFrame;
-
+
/* bias first */
for(k = 0; k < nOutputFrame; k++)
{
@@ -366,7 +367,7 @@ void THNN_(TemporalConvolution_accGradParameters)(
nFrame, inputFrameStride*inputSample->size[1],
kW*inputSample->size[1], 1);
- THTensor_(setStorage2d)(gradOutputWindow, gradOutputSample->storage,
+ THTensor_(setStorage2d)(gradOutputWindow, gradOutputSample->storage,
gradOutputSample->storageOffset + k*gradOutputSample->size[1],
nFrame, outputFrameStride*gradOutputSample->size[1],
gradOutputSample->size[1], 1);
diff --git a/lib/THNN/generic/TemporalSubSampling.c b/lib/THNN/generic/TemporalSubSampling.c
index bfc7d30..8728d14 100644
--- a/lib/THNN/generic/TemporalSubSampling.c
+++ b/lib/THNN/generic/TemporalSubSampling.c
@@ -51,7 +51,7 @@ void THNN_(TemporalSubSampling_updateOutput)(
THTensor *outputFrame, *inputWindow;
int nInputFrame, nOutputFrame;
long k;
-
+
THNN_(TemporalSubSampling_shapeCheck)(state, input, NULL, kW, dW, &inputFrameSize);
outputFrame = THTensor_(new)();
@@ -63,7 +63,7 @@ void THNN_(TemporalSubSampling_updateOutput)(
THTensor_(resize2d)(output,
nOutputFrame,
inputFrameSize);
-
+
for(k = 0; k < nOutputFrame; k++)
{
THTensor_(narrow)(inputWindow, input, 0, k*dW, kW);
@@ -124,8 +124,9 @@ void THNN_(TemporalSubSampling_accGradParameters)(
THTensor *gradBias,
int kW,
int dW,
- real scale)
+ accreal scale_)
{
+ real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THTensor *gradOutputFrame;
THTensor *inputWindow, *buffer;
long k;
diff --git a/lib/THNN/generic/Threshold.c b/lib/THNN/generic/Threshold.c
index dd2a698..949c7a0 100644
--- a/lib/THNN/generic/Threshold.c
+++ b/lib/THNN/generic/Threshold.c
@@ -6,10 +6,12 @@ void THNN_(Threshold_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- real threshold,
- real val,
+ accreal threshold_,
+ accreal val_,
bool inplace)
{
+ real threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_);
+ real val = TH_CONVERT_ACCREAL_TO_REAL(val_);
if (inplace)
{
TH_TENSOR_APPLY(real, input,
@@ -32,10 +34,12 @@ void THNN_(Threshold_updateGradInput)(
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
- real threshold,
- real val,
+ accreal threshold_,
+ accreal val_,
bool inplace)
{
+ real threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_);
+ real val = TH_CONVERT_ACCREAL_TO_REAL(val_);
THNN_CHECK_NELEMENT(input, gradOutput);
if (inplace)
{
diff --git a/lib/THNN/generic/VolumetricConvolution.c b/lib/THNN/generic/VolumetricConvolution.c
index 4fd8ac3..bcd1a0f 100644
--- a/lib/THNN/generic/VolumetricConvolution.c
+++ b/lib/THNN/generic/VolumetricConvolution.c
@@ -170,8 +170,9 @@ void THNN_(VolumetricConvolution_accGradParameters)(
int pT,
int pW,
int pH,
- real scale)
+ accreal scale_)
{
+ real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THArgCheck(pT != 0 || pW != 0 || pH != 0, 9, "padding not supported by CPU backend"); // sharing signature with CUDA version
THNN_ARGCHECK(gradWeight->nDimension == 5, 4, gradWeight,
diff --git a/lib/THNN/generic/VolumetricConvolutionMM.c b/lib/THNN/generic/VolumetricConvolutionMM.c
index 4085e2b..f8d9eb2 100644
--- a/lib/THNN/generic/VolumetricConvolutionMM.c
+++ b/lib/THNN/generic/VolumetricConvolutionMM.c
@@ -575,8 +575,9 @@ void THNN_(VolumetricConvolutionMM_accGradParameters)(
int kT, int kW, int kH,
int dT, int dW, int dH,
int pT, int pW, int pH,
- real scale)
+ accreal scale_)
{
+ real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
int freeWeight;
int nOutputPlane = (int)gradWeight->size[0];
diff --git a/lib/THNN/generic/VolumetricDilatedConvolution.c b/lib/THNN/generic/VolumetricDilatedConvolution.c
index d2d5c88..e31ff2b 100644
--- a/lib/THNN/generic/VolumetricDilatedConvolution.c
+++ b/lib/THNN/generic/VolumetricDilatedConvolution.c
@@ -299,8 +299,9 @@ void THNN_(VolumetricDilatedConvolution_accGradParameters)(
int dT, int dW, int dH,
int padT, int padW, int padH,
int dilationT, int dilationW, int dilationH,
- real scale)
+ accreal scale_)
{
+ real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THNN_(VolumetricDilatedConvolution_shapeCheck)(
input, gradOutput, gradWeight, gradBias,
kT, kH, kW, dT, dH, dW, padT, padH, padW,
diff --git a/lib/THNN/generic/VolumetricFullConvolution.c b/lib/THNN/generic/VolumetricFullConvolution.c
index b6ef1cd..61c3a44 100644
--- a/lib/THNN/generic/VolumetricFullConvolution.c
+++ b/lib/THNN/generic/VolumetricFullConvolution.c
@@ -402,8 +402,9 @@ void THNN_(VolumetricFullConvolution_accGradParameters)(
int dT, int dW, int dH, // stride
int pT, int pW, int pH, // padding
int aT, int aW, int aH, // extra output adjustment
- real scale)
+ accreal scale_)
{
+ real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
// number of input & output planes and kernel size is indirectly defined by the gradWeight tensor
THNN_(VolumetricFullConvolution_shapeCheck)(
input, gradOutput, gradWeight, gradBias,