Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torch/nn.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSoumith Chintala <soumith@gmail.com>2017-01-26 00:26:42 +0300
committerGitHub <noreply@github.com>2017-01-26 00:26:42 +0300
commit7a07afe545b4deae5919d9dc268bfac3d37398c7 (patch)
treeea2cb89e44de8e73d298e52b9226c48a2b313e60
parentea7ec8b63dce65a352a773b6538953067dae5ac1 (diff)
Revert "Using accreal instead of real in the API"revert-1109-api
-rw-r--r--THNN.lua7
-rw-r--r--lib/THNN/generic/ELU.c8
-rw-r--r--lib/THNN/generic/HardShrink.c6
-rw-r--r--lib/THNN/generic/HardTanh.c15
-rw-r--r--lib/THNN/generic/LeakyReLU.c6
-rw-r--r--lib/THNN/generic/Linear.c3
-rw-r--r--lib/THNN/generic/LookupTable.c9
-rw-r--r--lib/THNN/generic/MarginCriterion.c10
-rw-r--r--lib/THNN/generic/MultiMarginCriterion.c6
-rw-r--r--lib/THNN/generic/PReLU.c3
-rw-r--r--lib/THNN/generic/RReLU.c18
-rw-r--r--lib/THNN/generic/SoftPlus.c14
-rw-r--r--lib/THNN/generic/SoftShrink.c8
-rw-r--r--lib/THNN/generic/SparseLinear.c18
-rw-r--r--lib/THNN/generic/SpatialConvolutionLocal.c64
-rw-r--r--lib/THNN/generic/SpatialConvolutionMM.c7
-rw-r--r--lib/THNN/generic/SpatialConvolutionMap.c14
-rw-r--r--lib/THNN/generic/SpatialDilatedConvolution.c3
-rw-r--r--lib/THNN/generic/SpatialFullConvolution.c7
-rw-r--r--lib/THNN/generic/SpatialFullConvolutionMap.c14
-rw-r--r--lib/THNN/generic/SpatialSubSampling.c11
-rw-r--r--lib/THNN/generic/Sqrt.c7
-rw-r--r--lib/THNN/generic/THNN.h114
-rw-r--r--lib/THNN/generic/TemporalConvolution.c45
-rw-r--r--lib/THNN/generic/TemporalSubSampling.c7
-rw-r--r--lib/THNN/generic/Threshold.c12
-rw-r--r--lib/THNN/generic/VolumetricConvolution.c3
-rw-r--r--lib/THNN/generic/VolumetricConvolutionMM.c3
-rw-r--r--lib/THNN/generic/VolumetricDilatedConvolution.c3
-rw-r--r--lib/THNN/generic/VolumetricFullConvolution.c3
30 files changed, 187 insertions, 261 deletions
diff --git a/THNN.lua b/THNN.lua
index 60430f0..9100239 100644
--- a/THNN.lua
+++ b/THNN.lua
@@ -73,13 +73,6 @@ local replacements =
}
}
--- gsub(s, 'real', 'float') changes accreal to accfloat.
--- typedef accfloat ahead of time.
-ffi.cdef("typedef double accfloat;")
--- gsub(s, 'real', 'double') changes accreal to accfloat.
--- typedef accdouble ahead of time
-ffi.cdef("typedef double accdouble;")
-
for i=1,#replacements do
local r = replacements[i]
local s = preprocessed
diff --git a/lib/THNN/generic/ELU.c b/lib/THNN/generic/ELU.c
index ddcfb97..784a203 100644
--- a/lib/THNN/generic/ELU.c
+++ b/lib/THNN/generic/ELU.c
@@ -6,10 +6,9 @@ void THNN_(ELU_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- accreal alpha_,
+ real alpha,
bool inplace)
-{
- real alpha = TH_CONVERT_ACCREAL_TO_REAL(alpha_);
+{
if(inplace) {
TH_TENSOR_APPLY(real, input,
if(*input_data <= 0) {
@@ -31,10 +30,9 @@ void THNN_(ELU_updateGradInput)(
THTensor *gradOutput,
THTensor *gradInput,
THTensor *output,
- accreal alpha_,
+ real alpha,
bool inplace)
{
- real alpha = TH_CONVERT_ACCREAL_TO_REAL(alpha_);
THNN_CHECK_NELEMENT(input, gradOutput);
if(inplace) {
TH_TENSOR_APPLY2(real, gradOutput, real, output,
diff --git a/lib/THNN/generic/HardShrink.c b/lib/THNN/generic/HardShrink.c
index aaae85b..50d272c 100644
--- a/lib/THNN/generic/HardShrink.c
+++ b/lib/THNN/generic/HardShrink.c
@@ -6,9 +6,8 @@ void THNN_(HardShrink_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- accreal lambda_)
+ real lambda)
{
- real lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_);
THTensor_(resizeAs)(output, input);
TH_TENSOR_APPLY2(real, output, real, input,
@@ -26,9 +25,8 @@ void THNN_(HardShrink_updateGradInput)(
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
- accreal lambda_)
+ real lambda)
{
- real lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_);
THNN_CHECK_NELEMENT(input, gradOutput);
THTensor_(resizeAs)(gradInput, input);
TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input,
diff --git a/lib/THNN/generic/HardTanh.c b/lib/THNN/generic/HardTanh.c
index b38a946..57ef1be 100644
--- a/lib/THNN/generic/HardTanh.c
+++ b/lib/THNN/generic/HardTanh.c
@@ -6,17 +6,15 @@ void THNN_(HardTanh_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- accreal min_val_,
- accreal max_val_,
+ real min_val,
+ real max_val,
bool inplace)
{
- real min_val = TH_CONVERT_ACCREAL_TO_REAL(min_val_);
- real max_val = TH_CONVERT_ACCREAL_TO_REAL(max_val_);
if (inplace)
THTensor_(set)(output, input);
else
THTensor_(resizeAs)(output, input);
-
+
if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(output))
{
if (inplace)
@@ -70,13 +68,10 @@ void THNN_(HardTanh_updateGradInput)(
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
- accreal min_val_,
- accreal max_val_,
+ real min_val,
+ real max_val,
bool inplace)
{
- real min_val = TH_CONVERT_ACCREAL_TO_REAL(min_val_);
- real max_val = TH_CONVERT_ACCREAL_TO_REAL(max_val_);
-
THNN_CHECK_NELEMENT(input, gradOutput);
if (inplace)
THTensor_(set)(gradInput, gradOutput);
diff --git a/lib/THNN/generic/LeakyReLU.c b/lib/THNN/generic/LeakyReLU.c
index 074047d..a4d9677 100644
--- a/lib/THNN/generic/LeakyReLU.c
+++ b/lib/THNN/generic/LeakyReLU.c
@@ -6,10 +6,9 @@ void THNN_(LeakyReLU_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- accreal negval_,
+ real negval,
bool inplace)
{
- real negval = TH_CONVERT_ACCREAL_TO_REAL(negval_);
if (inplace)
{
TH_TENSOR_APPLY(real, input,
@@ -32,10 +31,9 @@ void THNN_(LeakyReLU_updateGradInput)(
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
- accreal negval_,
+ real negval,
bool inplace)
{
- real negval = TH_CONVERT_ACCREAL_TO_REAL(negval_);
THNN_CHECK_NELEMENT(input, gradOutput);
if (inplace)
{
diff --git a/lib/THNN/generic/Linear.c b/lib/THNN/generic/Linear.c
index faef421..933bc4b 100644
--- a/lib/THNN/generic/Linear.c
+++ b/lib/THNN/generic/Linear.c
@@ -87,9 +87,8 @@ void THNN_(Linear_accGradParameters)(
THTensor *gradWeight,
THTensor *gradBias,
THTensor *addBuffer,
- accreal scale_)
+ real scale)
{
- real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
long dim = THTensor_(nDimension)(input);
if (dim == 1) {
THTensor_(addr)(gradWeight,1,gradWeight,scale,gradOutput,input);
diff --git a/lib/THNN/generic/LookupTable.c b/lib/THNN/generic/LookupTable.c
index 46bc2c3..b460f38 100644
--- a/lib/THNN/generic/LookupTable.c
+++ b/lib/THNN/generic/LookupTable.c
@@ -32,9 +32,8 @@ void THNN_(LookupTable_accGradParameters)(
THIndexTensor *indices,
bool scaleGradByFreq,
int paddingValue,
- accreal ascale)
+ real scale)
{
- real scale = TH_CONVERT_ACCREAL_TO_REAL(ascale);
ptrdiff_t i;
THInteger_t *count_data = NULL;
@@ -164,11 +163,9 @@ void THNN_(LookupTable_renorm)(
THNNState *state,
THIndexTensor *idx,
THTensor *weight,
- accreal maxNorm_,
- accreal normType_)
+ real maxNorm,
+ real normType)
{
- real maxNorm = TH_CONVERT_ACCREAL_TO_REAL(maxNorm_);
- real normType = TH_CONVERT_ACCREAL_TO_REAL(normType_);
if (!THTensor_(isContiguous)(weight))
THError("weight must be contiguous");
if (!THIndexTensor_(isContiguous)(idx))
diff --git a/lib/THNN/generic/MarginCriterion.c b/lib/THNN/generic/MarginCriterion.c
index d6d9b60..1675860 100644
--- a/lib/THNN/generic/MarginCriterion.c
+++ b/lib/THNN/generic/MarginCriterion.c
@@ -8,11 +8,10 @@ void THNN_(MarginCriterion_updateOutput)(
THTensor *target,
THTensor *output,
bool sizeAverage,
- accreal margin_)
+ real margin)
{
- real margin = TH_CONVERT_ACCREAL_TO_REAL(margin_);
THNN_CHECK_NELEMENT(input, target);
- THNN_CHECK_DIM_SIZE(output, 1, 0, 1);
+ THNN_CHECK_DIM_SIZE(output, 1, 0, 1);
real sum = 0;
TH_TENSOR_APPLY2(real, input, real, target,
@@ -32,10 +31,9 @@ void THNN_(MarginCriterion_updateGradInput)(
THTensor *target,
THTensor *gradInput,
bool sizeAverage,
- accreal margin_)
+ real margin)
{
- real margin = TH_CONVERT_ACCREAL_TO_REAL(margin_);
- THNN_CHECK_NELEMENT(input, target);
+ THNN_CHECK_NELEMENT(input, target);
real norm = (sizeAverage ? 1./((real)THTensor_(nElement)(input)) : 1.);
THTensor_(resizeAs)(gradInput, input);
diff --git a/lib/THNN/generic/MultiMarginCriterion.c b/lib/THNN/generic/MultiMarginCriterion.c
index 2f8f8ff..af83e89 100644
--- a/lib/THNN/generic/MultiMarginCriterion.c
+++ b/lib/THNN/generic/MultiMarginCriterion.c
@@ -11,9 +11,8 @@ void THNN_(MultiMarginCriterion_updateOutput)(
bool sizeAverage,
int p,
THTensor *weights,
- accreal margin_)
+ real margin)
{
- real margin = TH_CONVERT_ACCREAL_TO_REAL(margin_);
real *input_data, *weights_data;
THIndex_t *target_data;
long nframe, dim;
@@ -91,9 +90,8 @@ void THNN_(MultiMarginCriterion_updateGradInput)(
bool sizeAverage,
int p,
THTensor *weights,
- accreal margin_)
+ real margin)
{
- real margin = TH_CONVERT_ACCREAL_TO_REAL(margin_);
real *input_data;
real *gradInput_data;
THIndex_t *target_data;
diff --git a/lib/THNN/generic/PReLU.c b/lib/THNN/generic/PReLU.c
index 174f514..3d2ebfc 100644
--- a/lib/THNN/generic/PReLU.c
+++ b/lib/THNN/generic/PReLU.c
@@ -165,9 +165,8 @@ void THNN_(PReLU_accGradParameters)(
THTensor *gradWeightBuf,
THTensor *gradWeightBuf2,
THIndex_t nOutputPlane,
- accreal scale_)
+ real scale)
{
- real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THNN_CHECK_NELEMENT(input, gradOutput);
real *gradWeight_data = THTensor_(data)(gradWeight);
diff --git a/lib/THNN/generic/RReLU.c b/lib/THNN/generic/RReLU.c
index 8fd46d3..cdb9dca 100644
--- a/lib/THNN/generic/RReLU.c
+++ b/lib/THNN/generic/RReLU.c
@@ -7,14 +7,12 @@ void THNN_(RReLU_updateOutput)(
THTensor *input,
THTensor *output,
THTensor *noise,
- accreal lower_,
- accreal upper_,
+ real lower,
+ real upper,
bool train,
bool inplace,
THGenerator *generator)
{
- real lower = TH_CONVERT_ACCREAL_TO_REAL(lower_);
- real upper = TH_CONVERT_ACCREAL_TO_REAL(upper_);
if (train)
{
// get default random generator
@@ -74,7 +72,7 @@ void THNN_(RReLU_updateOutput)(
*output_data = *input_data * r;
);
}
- }
+ }
}
void THNN_(RReLU_updateGradInput)(
@@ -83,13 +81,11 @@ void THNN_(RReLU_updateGradInput)(
THTensor *gradOutput,
THTensor *gradInput,
THTensor *noise,
- accreal lower_,
- accreal upper_,
+ real lower,
+ real upper,
bool train,
bool inplace)
{
- real lower = TH_CONVERT_ACCREAL_TO_REAL(lower_);
- real upper = TH_CONVERT_ACCREAL_TO_REAL(upper_);
THNN_CHECK_NELEMENT(input, gradOutput);
if (train && upper - lower > 1E-6) // e.g. if upper == lower, RReLU behaves like LeakyReLU
{
@@ -103,10 +99,10 @@ void THNN_(RReLU_updateGradInput)(
{
THTensor_(resizeAs)(gradInput, input);
THTensor_(cmul)(gradInput, gradOutput, noise);
- }
+ }
}
else
- {
+ {
// use constant factor for negative input values
const real negSlope = (lower + upper) / 2;
if (inplace)
diff --git a/lib/THNN/generic/SoftPlus.c b/lib/THNN/generic/SoftPlus.c
index 6491e66..7305238 100644
--- a/lib/THNN/generic/SoftPlus.c
+++ b/lib/THNN/generic/SoftPlus.c
@@ -6,11 +6,9 @@ void THNN_(SoftPlus_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- accreal beta_,
- accreal threshold_)
+ real beta,
+ real threshold)
{
- real beta = TH_CONVERT_ACCREAL_TO_REAL(beta_);
- real threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_);
THTensor_(resizeAs)(output, input);
// f(x) = 1/beta * log(1 + exp(beta * x))
@@ -25,14 +23,12 @@ void THNN_(SoftPlus_updateGradInput)(
THTensor *gradOutput,
THTensor *gradInput,
THTensor *output,
- accreal beta_,
- accreal threshold_)
+ real beta,
+ real threshold)
{
- real beta = TH_CONVERT_ACCREAL_TO_REAL(beta_);
- real threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_);
THNN_CHECK_NELEMENT(input, gradOutput);
THTensor_(resizeAs)(gradInput, output);
-
+
// d/dx[log(1+exp(k*x))/k] = exp(kx) / (exp(kx) + 1)
// SINCE
// y = (1/k)*log(1+exp(k*x)) --> x = (1/k)*log(exp(k*y)-1)
diff --git a/lib/THNN/generic/SoftShrink.c b/lib/THNN/generic/SoftShrink.c
index e779508..28dcce0 100644
--- a/lib/THNN/generic/SoftShrink.c
+++ b/lib/THNN/generic/SoftShrink.c
@@ -6,11 +6,10 @@ void THNN_(SoftShrink_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- accreal lambda_)
+ real lambda)
{
- real lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_);
THTensor_(resizeAs)(output, input);
-
+
TH_TENSOR_APPLY2(real, output, real, input,
if ((*input_data) > lambda)
*output_data = *input_data - lambda;
@@ -26,9 +25,8 @@ void THNN_(SoftShrink_updateGradInput)(
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
- accreal lambda_)
+ real lambda)
{
- real lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_);
THNN_CHECK_NELEMENT(input, gradOutput);
THTensor_(resizeAs)(gradInput, input);
TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input,
diff --git a/lib/THNN/generic/SparseLinear.c b/lib/THNN/generic/SparseLinear.c
index 0c52541..807280e 100644
--- a/lib/THNN/generic/SparseLinear.c
+++ b/lib/THNN/generic/SparseLinear.c
@@ -167,11 +167,9 @@ void THNN_(SparseLinear_accGradParameters)(
THTensor *gradBias,
THTensor *weight,
THTensor *bias,
- accreal weightDecay_,
- accreal scale_)
+ real weightDecay,
+ real scale)
{
- real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_);
- real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
long h, i, col, hp0, hp1;
long outDim = THTensor_(size)(weight, 0);
long inDim = THTensor_(size)(weight, 1);
@@ -245,11 +243,9 @@ void THNN_(SparseLinear_legacyAccGradParameters)(
THTensor *gradBias,
THTensor *weight,
THTensor *bias,
- accreal weightDecay_,
- accreal scale_)
+ real weightDecay,
+ real scale)
{
- real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_);
- real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
long h, i;
long outDim = THTensor_(size)(weight, 0);
long inDim = THTensor_(size)(weight, 1);
@@ -312,9 +308,8 @@ void THNN_(SparseLinear_updateParameters)(
THTensor *gradWeight,
THTensor *gradBias,
THTensor *lastInput,
- accreal learningRate_)
+ real learningRate)
{
- real learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_);
long h, i;
long outDim = weight->size[0];
long inDim = weight->size[1];
@@ -386,9 +381,8 @@ void THNN_(SparseLinear_legacyUpdateParameters)(
THTensor *gradWeight,
THTensor *gradBias,
THTensor *lastInput,
- accreal learningRate_)
+ real learningRate)
{
- real learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_);
long h, i;
long outDim = weight->size[0];
long inDim = weight->size[1];
diff --git a/lib/THNN/generic/SpatialConvolutionLocal.c b/lib/THNN/generic/SpatialConvolutionLocal.c
index 06b57f3..efba30e 100644
--- a/lib/THNN/generic/SpatialConvolutionLocal.c
+++ b/lib/THNN/generic/SpatialConvolutionLocal.c
@@ -4,8 +4,8 @@
static inline void THNN_(SpatialConvolutionLocal_shapeCheck)(
THTensor *input, THTensor *gradOutput,
- THTensor *weight, THTensor *bias,
- int kH, int kW, int dH,
+ THTensor *weight, THTensor *bias,
+ int kH, int kW, int dH,
int dW, int padH, int padW,
long inputHeight, long inputWidth,
long outputHeight, long outputWidth) {
@@ -39,7 +39,7 @@ static inline void THNN_(SpatialConvolutionLocal_shapeCheck)(
}
THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane);
-
+
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight);
@@ -56,8 +56,8 @@ static int THNN_(view_weight_local)(THTensor **_weight)
long s1 = weight->size[0] * weight->size[1];
long s2 = weight->size[2];
long s3 = weight->size[3] * weight->size[4] * weight->size[5];
- *_weight = THTensor_(newWithStorage3d)(weight->storage,
- weight->storageOffset,
+ *_weight = THTensor_(newWithStorage3d)(weight->storage,
+ weight->storageOffset,
s1, -1, s2, -1, s3, -1);
return 1;
}
@@ -75,8 +75,8 @@ static void THNN_(SpatialConvolutionLocal_updateOutput_frame)
long i;
THTensor *output3d, *finput3d;
- THNN_(unfolded_copy)(finput, input, kW, kH, dW, dH, padW, padH,
- nInputPlane, inputWidth, inputHeight,
+ THNN_(unfolded_copy)(finput, input, kW, kH, dW, dH, padW, padH,
+ nInputPlane, inputWidth, inputHeight,
outputWidth, outputHeight);
THTensor_(copy)(output, bias);
@@ -86,7 +86,7 @@ static void THNN_(SpatialConvolutionLocal_updateOutput_frame)
outputHeight * outputWidth, 1,
nOutputPlane, outputHeight * outputWidth,
1, nOutputPlane * outputHeight * outputWidth);
-
+
finput3d = THTensor_(newWithStorage3d)
(finput->storage, finput->storageOffset,
outputHeight * outputWidth, 1,
@@ -94,10 +94,10 @@ static void THNN_(SpatialConvolutionLocal_updateOutput_frame)
1, kW * kH * nInputPlane * outputHeight * outputWidth);
// weight: oH*oW x nOutputPlane x nInputPlane*kH*kW
- // finput3d: oH*oW x nInputPlane*kH*kW x 1
+ // finput3d: oH*oW x nInputPlane*kH*kW x 1
THTensor_(baddbmm)(output3d, 1.0, output3d, 1.0, weight, finput3d);
// output3d: oH*oW x nOutputPlane x 1
-
+
THTensor_(free)(output3d);
THTensor_(free)(finput3d);
}
@@ -120,10 +120,10 @@ void THNN_(SpatialConvolutionLocal_updateOutput)(
THNN_(SpatialConvolutionLocal_shapeCheck)
(input, NULL, weight, bias, kH, kW, dH, dW, padH, padW,
- inputHeight, inputWidth, outputHeight, outputWidth);
+ inputHeight, inputWidth, outputHeight, outputWidth);
input = THTensor_(newContiguous)(input);
-
+
long nInputPlane = THTensor_(size)(weight, 2)/ (kW * kH);
long nOutputPlane = THTensor_(size)(weight, 1);
@@ -174,7 +174,7 @@ void THNN_(SpatialConvolutionLocal_updateOutput)(
static void THNN_(SpatialConvolutionLocal_updateGradInput_frame)
(THTensor *gradInput, THTensor *gradOutput,
THTensor *weight, THTensor *fgradInput,
- int kW, int kH, int dW, int dH, int padW, int padH,
+ int kW, int kH, int dW, int dH, int padW, int padH,
long nInputPlane, long inputWidth, long inputHeight,
long nOutputPlane, long outputWidth, long outputHeight)
{
@@ -188,17 +188,17 @@ static void THNN_(SpatialConvolutionLocal_updateGradInput_frame)
kW*kH*nInputPlane, outputHeight*outputWidth,
1, kW*kH*nInputPlane*outputHeight*outputWidth);
// weight: oH*oW x nInputPlane*kH*kW x nOutputPlane
- // gradOutput3d: oH*oW x nOutputPlane x 1
+ // gradOutput3d: oH*oW x nOutputPlane x 1
THTensor_(baddbmm)(fgradInput3d, 0.0, fgradInput3d, 1.0, weight, gradOutput3d);
- // fgradInput3d: oH*oW x nInputPlane*kH*kW x 1
-
+ // fgradInput3d: oH*oW x nInputPlane*kH*kW x 1
+
THTensor_(free)(gradOutput3d);
THTensor_(free)(fgradInput3d);
-
+
THTensor_(zero)(gradInput);
-
- THNN_(unfolded_acc)(fgradInput, gradInput, kW, kH, dW, dH, padW, padH,
- nInputPlane, inputWidth, inputHeight,
+
+ THNN_(unfolded_acc)(fgradInput, gradInput, kW, kH, dW, dH, padW, padH,
+ nInputPlane, inputWidth, inputHeight,
outputWidth, outputHeight);
}
@@ -235,8 +235,8 @@ void THNN_(SpatialConvolutionLocal_updateGradInput)(
if(input->nDimension == 3)
{
THNN_(SpatialConvolutionLocal_updateGradInput_frame)
- (gradInput, gradOutput, weight,
- fgradInput, kW, kH, dW, dH, padW, padH,
+ (gradInput, gradOutput, weight,
+ fgradInput, kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
}
@@ -253,8 +253,8 @@ void THNN_(SpatialConvolutionLocal_updateGradInput)(
THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t);
THNN_(SpatialConvolutionLocal_updateGradInput_frame)
- (gradInput_t, gradOutput_t, weight, fgradInput_t,
- kW, kH, dW, dH, padW, padH,
+ (gradInput_t, gradOutput_t, weight, fgradInput_t,
+ kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
@@ -275,12 +275,12 @@ void THNN_(SpatialConvolutionLocal_updateGradInput)(
static void THNN_(SpatialConvolutionLocal_accGradParameters_frame)
(THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias,
- THTensor *finput, real scale,
- int kW, int kH, int dW, int dH, int padW, int padH,
+ THTensor *finput, real scale,
+ int kW, int kH, int dW, int dH, int padW, int padH,
long nInputPlane, long inputWidth, long inputHeight,
long nOutputPlane, long outputWidth, long outputHeight)
{
-
+
THTensor *gradOutput3d, *finput3d;
gradOutput3d = THTensor_(newWithStorage3d)(gradOutput->storage, gradOutput->storageOffset,
outputHeight*outputWidth, 1,
@@ -290,7 +290,7 @@ static void THNN_(SpatialConvolutionLocal_accGradParameters_frame)
outputHeight*outputWidth, 1,
1, kW*kH*nInputPlane*outputHeight*outputWidth,
kW*kH*nInputPlane, outputHeight*outputWidth);
- // gradOutput3d: oH*oW x nOutputPlane x 1
+ // gradOutput3d: oH*oW x nOutputPlane x 1
// finput3d: oH*oW x 1 x kW*kH*nInputPlane
THTensor_(baddbmm)(gradWeight, 1.0, gradWeight, scale, gradOutput3d, finput3d);
// gradWeight: oH*oW x nOutputPlane x kW*kH*nInputPlane
@@ -314,9 +314,9 @@ void THNN_(SpatialConvolutionLocal_accGradParameters)(
int padW, int padH,
long inputWidth, long inputHeight,
long outputWidth, long outputHeight,
- accreal scale_)
+ real scale)
{
- real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
+
int freeWeight = THNN_(view_weight_local)(&gradWeight);
THNN_(SpatialConvolutionLocal_shapeCheck)
@@ -332,7 +332,7 @@ void THNN_(SpatialConvolutionLocal_accGradParameters)(
if(input->nDimension == 3)
{
THNN_(SpatialConvolutionLocal_accGradParameters_frame)
- (gradOutput, gradWeight, gradBias, finput, scale,
+ (gradOutput, gradWeight, gradBias, finput, scale,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
@@ -348,7 +348,7 @@ void THNN_(SpatialConvolutionLocal_accGradParameters)(
THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
THNN_(SpatialConvolutionLocal_accGradParameters_frame)
- (gradOutput_t, gradWeight, gradBias, finput_t, scale,
+ (gradOutput_t, gradWeight, gradBias, finput_t, scale,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
diff --git a/lib/THNN/generic/SpatialConvolutionMM.c b/lib/THNN/generic/SpatialConvolutionMM.c
index c9c22bc..83635c1 100644
--- a/lib/THNN/generic/SpatialConvolutionMM.c
+++ b/lib/THNN/generic/SpatialConvolutionMM.c
@@ -4,7 +4,7 @@
static inline void THNN_(SpatialConvolutionMM_shapeCheck)(
THTensor *input, THTensor *gradOutput,
- THTensor *weight, THTensor *bias,
+ THTensor *weight, THTensor *bias,
int kH, int kW, int dH, int dW, int padH, int padW) {
THArgCheck(kW > 0 && kH > 0, 9,
@@ -45,7 +45,7 @@ static inline void THNN_(SpatialConvolutionMM_shapeCheck)(
nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth);
THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane);
-
+
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight);
@@ -336,9 +336,8 @@ void THNN_(SpatialConvolutionMM_accGradParameters)(
int dH,
int padW,
int padH,
- accreal scale_)
+ real scale)
{
- real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
int freeWeight = 0;
if (gradWeight->nDimension == 4) {
diff --git a/lib/THNN/generic/SpatialConvolutionMap.c b/lib/THNN/generic/SpatialConvolutionMap.c
index 750b212..82886c2 100644
--- a/lib/THNN/generic/SpatialConvolutionMap.c
+++ b/lib/THNN/generic/SpatialConvolutionMap.c
@@ -175,18 +175,10 @@ void THNN_(SpatialConvolutionMap_updateGradInput)(
}
void THNN_(SpatialConvolutionMap_accGradParameters)(
- THNNState *state,
- THTensor *input,
- THTensor *gradOutput,
- THTensor *gradWeight,
- THTensor *gradBias,
- THTensor *connTable,
- int nInputPlane,
- int nOutputPlane,
- int dW, int dH,
- accreal scale_)
+ THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias,
+ THTensor *connTable, int nInputPlane, int nOutputPlane,
+ int dW, int dH, real scale)
{
- real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THArgCheck(
gradWeight != NULL && gradWeight->nDimension == 3
&& connTable != NULL && connTable->size[0] == gradWeight->size[0], 5,
diff --git a/lib/THNN/generic/SpatialDilatedConvolution.c b/lib/THNN/generic/SpatialDilatedConvolution.c
index d345f7a..8b18910 100644
--- a/lib/THNN/generic/SpatialDilatedConvolution.c
+++ b/lib/THNN/generic/SpatialDilatedConvolution.c
@@ -289,9 +289,8 @@ void THNN_(SpatialDilatedConvolution_accGradParameters)(
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH,
- accreal scale_)
+ real scale)
{
- real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THNN_(SpatialDilatedConvolution_shapeCheck)
(input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW,
dilationH, dilationW);
diff --git a/lib/THNN/generic/SpatialFullConvolution.c b/lib/THNN/generic/SpatialFullConvolution.c
index e2a835d..4adcca6 100644
--- a/lib/THNN/generic/SpatialFullConvolution.c
+++ b/lib/THNN/generic/SpatialFullConvolution.c
@@ -59,7 +59,7 @@ static void THNN_(col2im)(const real* data_col, const int channels,
static inline void THNN_(SpatialFullConvolution_shapeCheck)(
THTensor *input, THTensor *gradOutput,
- THTensor *weight, THTensor *bias,
+ THTensor *weight, THTensor *bias,
int kH, int kW, int dH, int dW, int padH, int padW, int adjH, int adjW) {
THArgCheck(kW > 0 && kH > 0, 9,
@@ -103,7 +103,7 @@ static inline void THNN_(SpatialFullConvolution_shapeCheck)(
nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth);
THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane);
-
+
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight);
@@ -342,9 +342,8 @@ void THNN_(SpatialFullConvolution_accGradParameters)(
int dW, int dH,
int padW, int padH,
int adjW, int adjH,
- accreal scale_)
+ real scale)
{
- real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THNN_(SpatialFullConvolution_shapeCheck)
(input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW, adjH, adjW);
diff --git a/lib/THNN/generic/SpatialFullConvolutionMap.c b/lib/THNN/generic/SpatialFullConvolutionMap.c
index e98dea0..1bd3455 100644
--- a/lib/THNN/generic/SpatialFullConvolutionMap.c
+++ b/lib/THNN/generic/SpatialFullConvolutionMap.c
@@ -147,18 +147,10 @@ void THNN_(SpatialFullConvolutionMap_updateGradInput)(
}
void THNN_(SpatialFullConvolutionMap_accGradParameters)(
- THNNState *state,
- THTensor *input,
- THTensor *gradOutput,
- THTensor *gradWeight,
- THTensor *gradBias,
- THTensor *connTable,
- int nInputPlane,
- int nOutputPlane,
- int dW, int dH,
- accreal scale_)
+ THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias,
+ THTensor *connTable, int nInputPlane, int nOutputPlane,
+ int dW, int dH, real scale)
{
- real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THArgCheck(
gradWeight != NULL && gradWeight->nDimension == 3
&& connTable != NULL && connTable->size[0] == gradWeight->size[0], 5,
diff --git a/lib/THNN/generic/SpatialSubSampling.c b/lib/THNN/generic/SpatialSubSampling.c
index 3f01540..3674f2c 100644
--- a/lib/THNN/generic/SpatialSubSampling.c
+++ b/lib/THNN/generic/SpatialSubSampling.c
@@ -40,7 +40,7 @@ void THNN_(SpatialSubSampling_updateOutput)(
int kW, int kH,
int dW, int dH)
{
-
+
real *weight_data = THTensor_(data)(weight);
real *bias_data = THTensor_(data)(bias);
real *output_data;
@@ -76,11 +76,11 @@ void THNN_(SpatialSubSampling_updateOutput)(
THTensor_(resize3d)(output, nInputPlane, outputHeight, outputWidth);
else
THTensor_(resize4d)(output, input->size[0], nInputPlane, outputHeight, outputWidth);
-
+
input = THTensor_(newContiguous)(input);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
-
+
#pragma omp parallel for private(k)
for(k = 0; k < nInputPlane; k++)
{
@@ -97,7 +97,7 @@ void THNN_(SpatialSubSampling_updateOutput)(
long i;
for(i = 0; i < outputWidth*outputHeight; i++)
ptr_output[i] = z;
-
+
for(yy = 0; yy < outputHeight; yy++)
{
for(xx = 0; xx < outputWidth; xx++)
@@ -214,9 +214,8 @@ void THNN_(SpatialSubSampling_accGradParameters)(
THTensor *gradBias,
int kW, int kH,
int dW, int dH,
- accreal scale_)
+ real scale)
{
- real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THNN_(SpatialSubSampling_shapeCheck)(input, gradOutput, gradWeight, kW, kH);
long nbatch = 1;
diff --git a/lib/THNN/generic/Sqrt.c b/lib/THNN/generic/Sqrt.c
index 174884e..24cd51a 100644
--- a/lib/THNN/generic/Sqrt.c
+++ b/lib/THNN/generic/Sqrt.c
@@ -6,9 +6,8 @@ void THNN_(Sqrt_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- accreal eps_)
+ real eps)
{
- real eps = TH_CONVERT_ACCREAL_TO_REAL(eps_);
THTensor_(resizeAs)(output, input);
THTensor_(sqrt)(output, input);
}
@@ -23,8 +22,8 @@ void THNN_(Sqrt_updateGradInput)(
THNN_CHECK_SHAPE(output, gradOutput);
THTensor_(resizeAs)(gradInput, input);
- if (output->nDimension == 1 ||
- !THTensor_(isContiguous)(output) ||
+ if (output->nDimension == 1 ||
+ !THTensor_(isContiguous)(output) ||
!THTensor_(isContiguous)(gradOutput) ||
!THTensor_(isContiguous)(gradInput))
{
diff --git a/lib/THNN/generic/THNN.h b/lib/THNN/generic/THNN.h
index d4b7a51..447289b 100644
--- a/lib/THNN/generic/THNN.h
+++ b/lib/THNN/generic/THNN.h
@@ -78,7 +78,7 @@ TH_API void THNN_(ELU_updateOutput)(
THNNState *state, // library's state
THTensor *input, // input tensor
THTensor *output, // [OUT] ELU output
- accreal alpha, // an ELU parameter (as in paper)
+ real alpha, // an ELU parameter (as in paper)
bool inplace); // if true, modifies gradOutput and sets gradInput onto it (no additional memory is allocated)
TH_API void THNN_(ELU_updateGradInput)(
THNNState *state, // library's state
@@ -86,7 +86,7 @@ TH_API void THNN_(ELU_updateGradInput)(
THTensor *gradOutput, // gradient w.r.t. output
THTensor *gradInput, // [OUT] gradient w.r.t. input
THTensor *output, // output from a forward pass
- accreal alpha, // an ELU parameter (as in paper)
+ real alpha, // an ELU parameter (as in paper)
bool inplace); // if true, modifies gradOutput and sets gradInput onto it (no additional memory is allocated)
TH_API void THNN_(DistKLDivCriterion_updateOutput)(
@@ -119,30 +119,30 @@ TH_API void THNN_(HardShrink_updateOutput)(
THNNState *state, // library's state
THTensor *input, // input tensor
THTensor *output, // [OUT] output tensor
- accreal lambda); // HardShrink parameter
+ real lambda); // HardShrink parameter
TH_API void THNN_(HardShrink_updateGradInput)(
THNNState *state, // library's state
THTensor *input, // input tensor
THTensor *gradOutput, // gradient w.r.t. module's output
THTensor *gradInput, // [OUT] gradient w.r.t. input
- accreal lambda); // HardShrink parameter
+ real lambda); // HardShrink parameter
// HardTanh clamps the values to the interval [min_val; max_val].
TH_API void THNN_(HardTanh_updateOutput)(
THNNState *state, // library's state
THTensor *input, // input tensor
THTensor *output, // [OUT] output tensor
- accreal min_val, // lower threshold
- accreal max_val, // upper threshold
- bool inplace);
+ real min_val, // lower threshold
+ real max_val,
+ bool inplace); // upper threshold
TH_API void THNN_(HardTanh_updateGradInput)(
THNNState *state, // library's state
THTensor *input, // input tensor
THTensor *gradOutput, // gradient w.r.t. module's output
THTensor *gradInput, // [OUT] gradient w.r.t. the input
- accreal min_val, // lower threshold
- accreal max_val, // upper threshold
- bool inplace);
+ real min_val, // lower threshold
+ real max_val,
+ bool inplace); // upper threshold
TH_API void THNN_(L1Cost_updateOutput)(
THNNState *state, // library's state
@@ -158,14 +158,14 @@ TH_API void THNN_(LeakyReLU_updateOutput)(
THNNState *state, // library's state
THTensor *input, // [MODIFIED] input tensor
THTensor *output, // [OUT] output tensor
- accreal negval, // negative part slope
+ real negval, // negative part slope
bool inplace); // if true, modifies the input tensor and sets the output tensor on it (no additional memory is allocated)
TH_API void THNN_(LeakyReLU_updateGradInput)(
THNNState *state, // library's state
THTensor *input, // input tensor
THTensor *gradOutput, // [MODIFIED] gradient w.r.t. module's output
THTensor *gradInput, // [OUT] gradient w.r.t. the input
- accreal negval, // negative part slope
+ real negval, // negative part slope
bool inplace); // if true, modifies gradOutput and sets gradInput onto it (no additional memory is allocated)
TH_API void THNN_(LogSigmoid_updateOutput)(
@@ -201,14 +201,14 @@ TH_API void THNN_(LookupTable_accGradParameters)(
THIndexTensor *indices, // [OPTIONAL]
bool scaleGradByFreq,
int paddingValue,
- accreal scale);
+ real scale);
TH_API void THNN_(LookupTable_renorm)(
THNNState *state, // library's state
- THIndexTensor *idx, // vector containing row indices (modified in function)
+ THIndexTensor *idx, // vector that contains row indices (modified in function)
THTensor *weight, // 2D tensor whose rows will be renormalized
- accreal maxNorm, // maximum norm
- accreal normType); // the norm type (e.g., normType=2, then it's 2-norm)
+ real maxNorm, // maximum norm
+ real normType); // the norm type (e.g., normType=2, then it's 2-norm)
TH_API void THNN_(MarginCriterion_updateOutput)(
THNNState *state, // library's state
@@ -216,15 +216,14 @@ TH_API void THNN_(MarginCriterion_updateOutput)(
THTensor *target, // target tensor (should contain only 1s and -1s)
THTensor *output, // [OUT] a one-element tensor containing the loss
bool sizeAverage, // if true, the loss is normalized by **total number of elements**
- accreal margin); // a margin that is required for the loss to be 0
-
+ real margin); // a margin that is required for the loss to be 0
TH_API void THNN_(MarginCriterion_updateGradInput)(
THNNState *state, // library's state
THTensor *input, // input tensor
THTensor *target, // target tensor (should contin only 1s and -1s)
THTensor *gradInput, // [OUT] gradient w.r.t. module's input
bool sizeAverage, // if true, the gradient is normalized by **total number of elements**
- accreal margin); // a margin that is required for the loss to be 0
+ real margin); // a margin that is required for the loss to be 0
TH_API void THNN_(SoftMarginCriterion_updateOutput)(
THNNState *state,
@@ -276,7 +275,7 @@ TH_API void THNN_(MultiMarginCriterion_updateOutput)(
bool sizeAverage,
int p,
THTensor* weights, // [OPTIONAL]
- accreal margin);
+ real margin);
TH_API void THNN_(MultiMarginCriterion_updateGradInput)(
THNNState *state,
THTensor *input,
@@ -285,7 +284,7 @@ TH_API void THNN_(MultiMarginCriterion_updateGradInput)(
bool sizeAverage,
int p,
THTensor *weights, // [OPTIONAL]
- accreal margin);
+ real margin);
TH_API void THNN_(PReLU_updateOutput)(
THNNState *state,
@@ -310,7 +309,7 @@ TH_API void THNN_(PReLU_accGradParameters)(
THTensor *gradWeightBuf,
THTensor *gradWeightBuf2,
THIndex_t nOutputPlane,
- accreal scale);
+ real scale);
TH_API void THNN_(Linear_updateOutput)(
THNNState *state,
@@ -335,15 +334,15 @@ TH_API void THNN_(Linear_accGradParameters)(
THTensor *gradWeight,
THTensor *gradBias,
THTensor *addBuffer,
- accreal scale);
+ real scale);
TH_API void THNN_(RReLU_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THTensor *noise,
- accreal lower,
- accreal upper,
+ real lower,
+ real upper,
bool train,
bool inplace,
THGenerator *generator);
@@ -353,8 +352,8 @@ TH_API void THNN_(RReLU_updateGradInput)(
THTensor *gradOutput,
THTensor *gradInput,
THTensor *noise,
- accreal lower,
- accreal upper,
+ real lower,
+ real upper,
bool train,
bool inplace);
@@ -397,28 +396,28 @@ TH_API void THNN_(SoftPlus_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- accreal beta,
- accreal threshold);
+ real beta,
+ real threshold);
TH_API void THNN_(SoftPlus_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *output,
- accreal beta,
- accreal threshold);
+ real beta,
+ real threshold);
TH_API void THNN_(SoftShrink_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- accreal lambda);
+ real lambda);
TH_API void THNN_(SoftShrink_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
- accreal lambda);
+ real lambda);
TH_API void THNN_(SparseLinear_updateOutput)(
THNNState *state,
@@ -434,8 +433,8 @@ TH_API void THNN_(SparseLinear_accGradParameters)(
THTensor *gradBias,
THTensor *weight,
THTensor *bias,
- accreal weightDecay,
- accreal scale);
+ real weightDecay,
+ real scale);
TH_API void THNN_(SparseLinear_zeroGradParameters)(
THNNState *state,
THTensor *gradWeight,
@@ -448,7 +447,7 @@ TH_API void THNN_(SparseLinear_updateParameters)(
THTensor *gradWeight,
THTensor *gradBias,
THTensor *lastInput,
- accreal learningRate);
+ real learningRate);
TH_API void THNN_(SparseLinear_legacyUpdateOutput)(
THNNState *state,
THTensor *input,
@@ -463,8 +462,8 @@ TH_API void THNN_(SparseLinear_legacyAccGradParameters)(
THTensor *gradBias,
THTensor *weight,
THTensor *bias,
- accreal weightDecay,
- accreal scale);
+ real weightDecay,
+ real scale);
TH_API void THNN_(SparseLinear_legacyZeroGradParameters)(
THNNState *state,
THTensor *gradWeight,
@@ -477,13 +476,13 @@ TH_API void THNN_(SparseLinear_legacyUpdateParameters)(
THTensor *gradWeight,
THTensor *gradBias,
THTensor *lastInput,
- accreal learningRate);
+ real learningRate);
TH_API void THNN_(Sqrt_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- accreal eps);
+ real eps);
TH_API void THNN_(Sqrt_updateGradInput)(
THNNState *state,
THTensor *input,
@@ -516,16 +515,16 @@ TH_API void THNN_(Threshold_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- accreal threshold,
- accreal val,
+ real threshold,
+ real val,
bool inplace);
TH_API void THNN_(Threshold_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
- accreal threshold,
- accreal val,
+ real threshold,
+ real val,
bool inplace);
TH_API void THNN_(TemporalConvolution_updateOutput)(
@@ -551,7 +550,7 @@ TH_API void THNN_(TemporalConvolution_accGradParameters)(
THTensor *gradWeight,
THTensor *gradBias,
int kW, int dW,
- accreal scale);
+ real scale);
TH_API void THNN_(TemporalMaxPooling_updateOutput)(
THNNState *state,
THTensor *input,
@@ -587,7 +586,7 @@ TH_API void THNN_(TemporalSubSampling_accGradParameters)(
THTensor *gradWeight,
THTensor *gradBias,
int kW, int dW,
- accreal scale);
+ real scale);
TH_API void THNN_(BatchNormalization_updateOutput)(
THNNState *state,
@@ -649,7 +648,7 @@ TH_API void THNN_(SpatialConvolutionMap_accGradParameters)(
int nInputPlane, // number of input planes
int nOutputPlane, // number of output planes
int dW, int dH, // stride
- accreal scale); // scaling factor
+ real scale); // scaling factor
TH_API void THNN_(SpatialConvolutionMM_updateOutput)(
THNNState *state,
@@ -684,7 +683,7 @@ TH_API void THNN_(SpatialConvolutionMM_accGradParameters)(
int kW, int kH,
int dW, int dH,
int padW, int padH,
- accreal scale);
+ real scale);
TH_API void THNN_(SpatialConvolutionLocal_updateOutput)(
THNNState *state,
@@ -725,7 +724,7 @@ TH_API void THNN_(SpatialConvolutionLocal_accGradParameters)(
int padW, int padH,
long inputWidth, long inputHeight,
long outputWidth, long outputHeight,
- accreal scale);
+ real scale);
TH_API void THNN_(SpatialAdaptiveMaxPooling_updateOutput)(
THNNState *state,
@@ -812,7 +811,7 @@ TH_API void THNN_(SpatialFullConvolution_accGradParameters)(
int dW, int dH,
int padW, int padH,
int adjW, int adjH,
- accreal scale);
+ real scale);
TH_API void THNN_(SpatialFullConvolutionMap_updateOutput)(
THNNState *state, // library state
@@ -845,7 +844,7 @@ TH_API void THNN_(SpatialFullConvolutionMap_accGradParameters)(
int nInputPlane, // number of input planes
int nOutputPlane, // number of output planes
int dW, int dH, // stride
- accreal scale); // scaling factor
+ real scale); // scaling factor
TH_API void THNN_(SpatialDilatedConvolution_updateOutput)(
THNNState *state,
@@ -884,7 +883,7 @@ TH_API void THNN_(SpatialDilatedConvolution_accGradParameters)(
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH,
- accreal scale);
+ real scale);
TH_API void THNN_(SpatialMaxPooling_updateOutput)(
THNNState *state,
@@ -966,7 +965,7 @@ TH_API void THNN_(SpatialSubSampling_accGradParameters)(
THTensor *gradBias,
int kW, int kH,
int dW, int dH,
- accreal scale);
+ real scale);
TH_API void THNN_(SpatialUpSamplingNearest_updateOutput)(
THNNState *state,
@@ -1059,7 +1058,7 @@ TH_API void THNN_(VolumetricConvolution_accGradParameters)(
THTensor *fgradInput,
int dT, int dW, int dH,
int pT, int pW, int pH,
- accreal scale);
+ real scale);
TH_API void THNN_(VolumetricConvolutionMM_updateOutput)(
THNNState *state,
@@ -1092,7 +1091,7 @@ TH_API void THNN_(VolumetricConvolutionMM_accGradParameters)(
int kT, int kW, int kH,
int dT, int dW, int dH,
int pT, int pW, int pH,
- accreal scale);
+ real scale);
TH_API void THNN_(VolumetricFullConvolution_updateOutput)(
THNNState *state, // library state
@@ -1127,7 +1126,7 @@ TH_API void THNN_(VolumetricFullConvolution_accGradParameters)(
int dT, int dW, int dH, // stride
int pT, int pW, int pH, // padding
int aT, int aW, int aH, // extra output adjustment
- accreal scale); // scaling factor
+ real scale); // scaling factor
TH_API void THNN_(VolumetricDilatedConvolution_updateOutput)(
THNNState *state,
@@ -1166,7 +1165,7 @@ TH_API void THNN_(VolumetricDilatedConvolution_accGradParameters)(
int dT, int dW, int dH,
int padT, int padW, int padH,
int dilationT, int dilationW, int dilationH,
- accreal scale);
+ real scale);
TH_API void THNN_(VolumetricMaxPooling_updateOutput)(
THNNState *state,
@@ -1274,4 +1273,5 @@ TH_API void THNN_(VolumetricReplicationPadding_updateGradInput)(
int pleft, int pright,
int ptop, int pbottom,
int pfront, int pback);
+
#endif
diff --git a/lib/THNN/generic/TemporalConvolution.c b/lib/THNN/generic/TemporalConvolution.c
index a107da2..14297ad 100644
--- a/lib/THNN/generic/TemporalConvolution.c
+++ b/lib/THNN/generic/TemporalConvolution.c
@@ -48,11 +48,11 @@ void THNN_(TemporalConvolution_updateOutput)(
THTensor *outputWindow, *inputWindow;
int nInputFrame, nOutputFrame;
long k, i;
-
+
int dimS = 0; // sequence dimension
int dimF = 1; // feature dimension
-
- if (input->nDimension == 3)
+
+ if (input->nDimension == 3)
{
dimS = 1;
dimF = 2;
@@ -93,7 +93,7 @@ void THNN_(TemporalConvolution_updateOutput)(
nFrame, inputFrameStride*input->size[1],
kW*input->size[1], 1);
- THTensor_(setStorage2d)(outputWindow, output->storage,
+ THTensor_(setStorage2d)(outputWindow, output->storage,
output->storageOffset + k*output->size[1],
nFrame, outputFrameStride*output->size[1],
output->size[1], 1);
@@ -108,18 +108,18 @@ void THNN_(TemporalConvolution_updateOutput)(
THTensor *outputSample = THTensor_(new)();
THTensor *inputSample = THTensor_(new)();
int nBatchFrame = input->size[0];
-
+
THTensor_(resize3d)(output,
nBatchFrame,
nOutputFrame,
outputFrameSize);
-
+
for(i = 0; i < nBatchFrame; i++)
{
THTensor_(select)(outputSample, output, 0, i);
THTensor_(select)(inputSample, input, 0, i);
long nOutputSampleFrame = nOutputFrame;
-
+
/* bias first */
for(k = 0; k < nOutputFrame; k++)
{
@@ -140,7 +140,7 @@ void THNN_(TemporalConvolution_updateOutput)(
nFrame, inputFrameStride*inputSample->size[1],
kW*inputSample->size[1], 1);
- THTensor_(setStorage2d)(outputWindow, outputSample->storage,
+ THTensor_(setStorage2d)(outputWindow, outputSample->storage,
outputSample->storageOffset + k*outputSample->size[1],
nFrame, outputFrameStride*outputSample->size[1],
outputSample->size[1], 1);
@@ -175,11 +175,11 @@ void THNN_(TemporalConvolution_updateGradInput)(
THTensor *gradOutputWindow;
THTensor *gradInputWindow;
long k, i;
-
+
int dimS = 0; // sequence dimension
int dimF = 1; // feature dimension
-
- if (gradOutput->nDimension == 3)
+
+ if (gradOutput->nDimension == 3)
{
dimS = 1;
dimF = 2;
@@ -227,13 +227,13 @@ void THNN_(TemporalConvolution_updateGradInput)(
THTensor *gradOutputSample = THTensor_(new)();
THTensor *gradInputSample = THTensor_(new)();
int nBatchFrame = input->size[0];
-
+
for(i = 0; i < nBatchFrame; i++)
{
THTensor_(select)(gradOutputSample, gradOutput, 0, i);
THTensor_(select)(gradInputSample, gradInput, 0, i);
int nOutputSampleFrame = nOutputFrame;
-
+
/* ouch */
for(k = 0; nOutputSampleFrame > 0; k++)
{
@@ -274,20 +274,19 @@ void THNN_(TemporalConvolution_accGradParameters)(
THTensor *gradBias,
int kW,
int dW,
- accreal scale_)
+ real scale)
{
- real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
long nInputFrame;
long nOutputFrame;
THTensor *gradOutputWindow;
THTensor *inputWindow;
long k, i;
-
+
int dimS = 0; // sequence dimension
int dimF = 1; // feature dimension
-
- if (gradOutput->nDimension == 3)
+
+ if (gradOutput->nDimension == 3)
{
dimS = 1;
dimF = 2;
@@ -302,7 +301,7 @@ void THNN_(TemporalConvolution_accGradParameters)(
gradOutput = THTensor_(newContiguous)(gradOutput);
gradOutputWindow = THTensor_(new)();
inputWindow = THTensor_(new)();
-
+
if (input->nDimension == 2)
{
/* bias first */
@@ -325,7 +324,7 @@ void THNN_(TemporalConvolution_accGradParameters)(
nFrame, inputFrameStride*input->size[1],
kW*input->size[1], 1);
- THTensor_(setStorage2d)(gradOutputWindow, gradOutput->storage,
+ THTensor_(setStorage2d)(gradOutputWindow, gradOutput->storage,
gradOutput->storageOffset + k*gradOutput->size[1],
nFrame, outputFrameStride*gradOutput->size[1],
gradOutput->size[1], 1);
@@ -340,13 +339,13 @@ void THNN_(TemporalConvolution_accGradParameters)(
THTensor *gradOutputSample = THTensor_(new)();
THTensor *inputSample = THTensor_(new)();
int nBatchFrame = input->size[0];
-
+
for(i = 0; i < nBatchFrame; i++)
{
THTensor_(select)(gradOutputSample, gradOutput, 0, i);
THTensor_(select)(inputSample, input, 0, i);
int nOutputSampleFrame = nOutputFrame;
-
+
/* bias first */
for(k = 0; k < nOutputFrame; k++)
{
@@ -367,7 +366,7 @@ void THNN_(TemporalConvolution_accGradParameters)(
nFrame, inputFrameStride*inputSample->size[1],
kW*inputSample->size[1], 1);
- THTensor_(setStorage2d)(gradOutputWindow, gradOutputSample->storage,
+ THTensor_(setStorage2d)(gradOutputWindow, gradOutputSample->storage,
gradOutputSample->storageOffset + k*gradOutputSample->size[1],
nFrame, outputFrameStride*gradOutputSample->size[1],
gradOutputSample->size[1], 1);
diff --git a/lib/THNN/generic/TemporalSubSampling.c b/lib/THNN/generic/TemporalSubSampling.c
index 8728d14..bfc7d30 100644
--- a/lib/THNN/generic/TemporalSubSampling.c
+++ b/lib/THNN/generic/TemporalSubSampling.c
@@ -51,7 +51,7 @@ void THNN_(TemporalSubSampling_updateOutput)(
THTensor *outputFrame, *inputWindow;
int nInputFrame, nOutputFrame;
long k;
-
+
THNN_(TemporalSubSampling_shapeCheck)(state, input, NULL, kW, dW, &inputFrameSize);
outputFrame = THTensor_(new)();
@@ -63,7 +63,7 @@ void THNN_(TemporalSubSampling_updateOutput)(
THTensor_(resize2d)(output,
nOutputFrame,
inputFrameSize);
-
+
for(k = 0; k < nOutputFrame; k++)
{
THTensor_(narrow)(inputWindow, input, 0, k*dW, kW);
@@ -124,9 +124,8 @@ void THNN_(TemporalSubSampling_accGradParameters)(
THTensor *gradBias,
int kW,
int dW,
- accreal scale_)
+ real scale)
{
- real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THTensor *gradOutputFrame;
THTensor *inputWindow, *buffer;
long k;
diff --git a/lib/THNN/generic/Threshold.c b/lib/THNN/generic/Threshold.c
index 949c7a0..dd2a698 100644
--- a/lib/THNN/generic/Threshold.c
+++ b/lib/THNN/generic/Threshold.c
@@ -6,12 +6,10 @@ void THNN_(Threshold_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
- accreal threshold_,
- accreal val_,
+ real threshold,
+ real val,
bool inplace)
{
- real threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_);
- real val = TH_CONVERT_ACCREAL_TO_REAL(val_);
if (inplace)
{
TH_TENSOR_APPLY(real, input,
@@ -34,12 +32,10 @@ void THNN_(Threshold_updateGradInput)(
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
- accreal threshold_,
- accreal val_,
+ real threshold,
+ real val,
bool inplace)
{
- real threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_);
- real val = TH_CONVERT_ACCREAL_TO_REAL(val_);
THNN_CHECK_NELEMENT(input, gradOutput);
if (inplace)
{
diff --git a/lib/THNN/generic/VolumetricConvolution.c b/lib/THNN/generic/VolumetricConvolution.c
index bcd1a0f..4fd8ac3 100644
--- a/lib/THNN/generic/VolumetricConvolution.c
+++ b/lib/THNN/generic/VolumetricConvolution.c
@@ -170,9 +170,8 @@ void THNN_(VolumetricConvolution_accGradParameters)(
int pT,
int pW,
int pH,
- accreal scale_)
+ real scale)
{
- real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THArgCheck(pT != 0 || pW != 0 || pH != 0, 9, "padding not supported by CPU backend"); // sharing signature with CUDA version
THNN_ARGCHECK(gradWeight->nDimension == 5, 4, gradWeight,
diff --git a/lib/THNN/generic/VolumetricConvolutionMM.c b/lib/THNN/generic/VolumetricConvolutionMM.c
index f8d9eb2..4085e2b 100644
--- a/lib/THNN/generic/VolumetricConvolutionMM.c
+++ b/lib/THNN/generic/VolumetricConvolutionMM.c
@@ -575,9 +575,8 @@ void THNN_(VolumetricConvolutionMM_accGradParameters)(
int kT, int kW, int kH,
int dT, int dW, int dH,
int pT, int pW, int pH,
- accreal scale_)
+ real scale)
{
- real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
int freeWeight;
int nOutputPlane = (int)gradWeight->size[0];
diff --git a/lib/THNN/generic/VolumetricDilatedConvolution.c b/lib/THNN/generic/VolumetricDilatedConvolution.c
index e31ff2b..d2d5c88 100644
--- a/lib/THNN/generic/VolumetricDilatedConvolution.c
+++ b/lib/THNN/generic/VolumetricDilatedConvolution.c
@@ -299,9 +299,8 @@ void THNN_(VolumetricDilatedConvolution_accGradParameters)(
int dT, int dW, int dH,
int padT, int padW, int padH,
int dilationT, int dilationW, int dilationH,
- accreal scale_)
+ real scale)
{
- real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THNN_(VolumetricDilatedConvolution_shapeCheck)(
input, gradOutput, gradWeight, gradBias,
kT, kH, kW, dT, dH, dW, padT, padH, padW,
diff --git a/lib/THNN/generic/VolumetricFullConvolution.c b/lib/THNN/generic/VolumetricFullConvolution.c
index 61c3a44..b6ef1cd 100644
--- a/lib/THNN/generic/VolumetricFullConvolution.c
+++ b/lib/THNN/generic/VolumetricFullConvolution.c
@@ -402,9 +402,8 @@ void THNN_(VolumetricFullConvolution_accGradParameters)(
int dT, int dW, int dH, // stride
int pT, int pW, int pH, // padding
int aT, int aW, int aH, // extra output adjustment
- accreal scale_)
+ real scale)
{
- real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
// number of input & output planes and kernel size is indirectly defined by the gradWeight tensor
THNN_(VolumetricFullConvolution_shapeCheck)(
input, gradOutput, gradWeight, gradBias,