Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torch/nn.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdam Paszke <adam.paszke@gmail.com>2017-04-19 23:21:25 +0300
committerAdam Paszke <adam.paszke@gmail.com>2017-04-20 00:01:31 +0300
commita4e32dccc3fe4981c72f66f758105ad721ff5f23 (patch)
tree169736e066b56482250b39e2feb94b217121891a
parent0c35de7a71b9c91108f02eb042af7a7cf657e484 (diff)
Add more newContiguous calls and checks
-rw-r--r--lib/THNN/generic/PReLU.c7
-rw-r--r--lib/THNN/generic/SparseLinear.c8
-rw-r--r--lib/THNN/generic/SpatialConvolutionLocal.c85
-rw-r--r--lib/THNN/generic/SpatialConvolutionMM.c56
-rw-r--r--lib/THNN/generic/SpatialConvolutionMap.c30
-rw-r--r--lib/THNN/generic/SpatialDilatedConvolution.c8
-rw-r--r--lib/THNN/generic/SpatialFullConvolution.c8
-rw-r--r--lib/THNN/generic/SpatialFullConvolutionMap.c2
-rw-r--r--lib/THNN/generic/SpatialSubSampling.c2
-rw-r--r--lib/THNN/generic/TemporalConvolution.c3
-rw-r--r--lib/THNN/generic/TemporalRowConvolution.c2
-rw-r--r--lib/THNN/generic/TemporalSubSampling.c3
-rw-r--r--lib/THNN/generic/VolumetricDilatedConvolution.c2
-rw-r--r--lib/THNN/generic/VolumetricFullConvolution.c8
14 files changed, 135 insertions, 89 deletions
diff --git a/lib/THNN/generic/PReLU.c b/lib/THNN/generic/PReLU.c
index 8ddee29..488322f 100644
--- a/lib/THNN/generic/PReLU.c
+++ b/lib/THNN/generic/PReLU.c
@@ -82,6 +82,7 @@ void THNN_(PReLU_updateGradInput)(
{
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
+ weight = THTensor_(newContiguous)(weight);
const real *input_data = THTensor_(data)(input);
const real *gradOutput_data = THTensor_(data)(gradOutput);
const real *weight_data = THTensor_(data)(weight);
@@ -126,6 +127,7 @@ void THNN_(PReLU_updateGradInput)(
}
THTensor_(free)(input);
THTensor_(free)(gradOutput);
+ THTensor_(free)(weight);
}
}
@@ -143,10 +145,10 @@ void THNN_(PReLU_accGradParameters)(
{
real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THNN_CHECK_NELEMENT(input, gradOutput);
- real *gradWeight_data = THTensor_(data)(gradWeight);
if (nOutputPlane == 0)
{
+ real *gradWeight_data = THTensor_(data)(gradWeight);
real sum = 0;
TH_TENSOR_APPLY2(real, input, real, gradOutput,
if ((*input_data) <= 0)
@@ -156,8 +158,10 @@ void THNN_(PReLU_accGradParameters)(
}
else
{
+ THArgCheck(THTensor_(isContiguous)(gradWeight), 6, "gradWeight needs to be contiguous");
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
+ weight = THTensor_(newContiguous)(weight);
long bs = 1, ks = 1;
{
long input_ndim = THTensor_(nDimension)(input);
@@ -196,6 +200,7 @@ void THNN_(PReLU_accGradParameters)(
}
THTensor_(free)(input);
THTensor_(free)(gradOutput);
+ THTensor_(free)(weight);
}
}
diff --git a/lib/THNN/generic/SparseLinear.c b/lib/THNN/generic/SparseLinear.c
index 0c52541..d9cec8c 100644
--- a/lib/THNN/generic/SparseLinear.c
+++ b/lib/THNN/generic/SparseLinear.c
@@ -62,6 +62,8 @@ void THNN_(SparseLinear_updateOutput)(
THLongTensor * csr = THLongTensor_newWithSize1d(batchSize+1);
THLongTensor_zero(csr);
+ weight = THTensor_(newContiguous)(weight);
+
//#pragma omp parallel for private(i, h, hp0, hp1) schedule(static) if (nnz > 10000)
for (i=0; i<nnz; i++) {
hp0 = (long)(THNN_(get2d)(input, i, 0)) - 1;
@@ -106,6 +108,7 @@ void THNN_(SparseLinear_updateOutput)(
}
THTensor_(free)(output_row);
THLongTensor_free(csr);
+ THTensor_(free)(weight);
}
void THNN_(SparseLinear_legacyUpdateOutput)(
@@ -123,6 +126,8 @@ void THNN_(SparseLinear_legacyUpdateOutput)(
THArgCheck(THTensor_(isContiguous)(output), 3, "output must be contiguous");
THArgCheck(THNN_(checkSize1D)(bias, outDim), 5, "bias size wrong");
+ weight = THTensor_(newContiguous)(weight);
+
long batchSize = THTensor_(size)(input, 0);
long nnz = THTensor_(size)(input, 1);
THTensor_(resize2d)(output, batchSize, outDim);
@@ -157,6 +162,7 @@ void THNN_(SparseLinear_legacyUpdateOutput)(
THTensor_(cadd)(output_row, bias, 1.0, output_row);
}
THTensor_(free)(output_row);
+ THTensor_(free)(weight);
}
void THNN_(SparseLinear_accGradParameters)(
@@ -189,6 +195,7 @@ void THNN_(SparseLinear_accGradParameters)(
THLongTensor* csc = THLongTensor_newWithSize1d(inDim+1);
THLongTensor_zero(csc);
+ weight = THTensor_(newContiguous)(weight);
#pragma omp parallel for private(i, h, hp0, hp1) schedule(static) if (nnz > 10000)
for (i = 0; i < nnz; i++) {
@@ -235,6 +242,7 @@ void THNN_(SparseLinear_accGradParameters)(
if (weightDecay != 0) {
THTensor_(cadd)(gradWeight, gradWeight, weightDecay, weight);
}
+ THTensor_(free)(weight);
}
void THNN_(SparseLinear_legacyAccGradParameters)(
diff --git a/lib/THNN/generic/SpatialConvolutionLocal.c b/lib/THNN/generic/SpatialConvolutionLocal.c
index 0531df3..6db5a5d 100644
--- a/lib/THNN/generic/SpatialConvolutionLocal.c
+++ b/lib/THNN/generic/SpatialConvolutionLocal.c
@@ -3,17 +3,17 @@
#else
static inline void THNN_(SpatialConvolutionLocal_shapeCheck)(
- THTensor *input, THTensor *gradOutput,
- THTensor *weight, THTensor *bias,
- int kH, int kW, int dH,
- int dW, int padH, int padW,
- long inputHeight, long inputWidth,
- long outputHeight, long outputWidth) {
+ THTensor *input, THTensor *gradOutput,
+ THTensor *weight, THTensor *bias,
+ int kH, int kW, int dH,
+ int dW, int padH, int padW,
+ long inputHeight, long inputWidth,
+ long outputHeight, long outputWidth) {
THArgCheck(kW > 0 && kH > 0, 9,
- "kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
+ "kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 11,
- "stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
+ "stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
int ndim = input->nDimension;
int dimf = 0;
@@ -27,7 +27,7 @@ static inline void THNN_(SpatialConvolutionLocal_shapeCheck)(
}
THNN_ARGCHECK(ndim == 3 || ndim == 4, 2, input,
- "3D or 4D input tensor expected but got: %s");
+ "3D or 4D input tensor expected but got: %s");
long nInputPlane = weight->size[2] / (kH * kW);
long nOutputPlane = weight->size[1];
@@ -47,21 +47,22 @@ static inline void THNN_(SpatialConvolutionLocal_shapeCheck)(
}
}
-static int THNN_(view_weight_local)(THTensor **_weight)
+static THTensor* THNN_(view_weight_local)(THTensor *_weight)
{
- THTensor *weight = *_weight;
+ THTensor *weight = THTensor_(newContiguous)(_weight);
THArgCheck(weight->nDimension == 3 || weight->nDimension == 6, 4,
"weight tensor should be 3D or 6D - got %dD", weight->nDimension);
if (weight->nDimension == 6) {
long s1 = weight->size[0] * weight->size[1];
long s2 = weight->size[2];
long s3 = weight->size[3] * weight->size[4] * weight->size[5];
- *_weight = THTensor_(newWithStorage3d)(weight->storage,
- weight->storageOffset,
- s1, -1, s2, -1, s3, -1);
- return 1;
+ THTensor *old_weight = weight;
+ weight = THTensor_(newWithStorage3d)(weight->storage,
+ weight->storageOffset,
+ s1, -1, s2, -1, s3, -1);
+ THTensor_(free)(old_weight);
}
- return 0;
+ return weight;
}
static void THNN_(SpatialConvolutionLocal_updateOutput_frame)
@@ -76,8 +77,8 @@ static void THNN_(SpatialConvolutionLocal_updateOutput_frame)
THTensor *output3d, *finput3d;
THNN_(unfolded_copy)(finput, input, kW, kH, dW, dH, padW, padH,
- nInputPlane, inputWidth, inputHeight,
- outputWidth, outputHeight);
+ nInputPlane, inputWidth, inputHeight,
+ outputWidth, outputHeight);
THTensor_(copy)(output, bias);
@@ -116,7 +117,7 @@ void THNN_(SpatialConvolutionLocal_updateOutput)(
long inputWidth, long inputHeight,
long outputWidth, long outputHeight)
{
- int freeWeight = THNN_(view_weight_local)(&weight);
+ weight = THNN_(view_weight_local)(weight);
THNN_(SpatialConvolutionLocal_shapeCheck)
(input, NULL, weight, bias, kH, kW, dH, dW, padH, padW,
@@ -154,10 +155,10 @@ void THNN_(SpatialConvolutionLocal_updateOutput)(
THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
THNN_(SpatialConvolutionLocal_updateOutput_frame)
- (input_t, output_t, weight, bias, finput_t,
- kW, kH, dW, dH, padW, padH,
- nInputPlane, inputWidth, inputHeight,
- nOutputPlane, outputWidth, outputHeight);
+ (input_t, output_t, weight, bias, finput_t,
+ kW, kH, dW, dH, padW, padH,
+ nInputPlane, inputWidth, inputHeight,
+ nOutputPlane, outputWidth, outputHeight);
THTensor_(free)(input_t);
THTensor_(free)(output_t);
@@ -166,8 +167,7 @@ void THNN_(SpatialConvolutionLocal_updateOutput)(
}
THTensor_(free)(input);
- if (freeWeight)
- THTensor_(free)(weight);
+ THTensor_(free)(weight);
}
@@ -198,8 +198,8 @@ static void THNN_(SpatialConvolutionLocal_updateGradInput_frame)
THTensor_(zero)(gradInput);
THNN_(unfolded_acc)(fgradInput, gradInput, kW, kH, dW, dH, padW, padH,
- nInputPlane, inputWidth, inputHeight,
- outputWidth, outputHeight);
+ nInputPlane, inputWidth, inputHeight,
+ outputWidth, outputHeight);
}
@@ -217,7 +217,7 @@ void THNN_(SpatialConvolutionLocal_updateGradInput)(
long inputWidth, long inputHeight,
long outputWidth, long outputHeight)
{
- int freeWeight = THNN_(view_weight_local)(&weight);
+ weight = THNN_(view_weight_local)(weight);
THNN_(SpatialConvolutionLocal_shapeCheck)
(input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW,
@@ -255,10 +255,10 @@ void THNN_(SpatialConvolutionLocal_updateGradInput)(
THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t);
THNN_(SpatialConvolutionLocal_updateGradInput_frame)
- (gradInput_t, gradOutput_t, tweight, fgradInput_t,
- kW, kH, dW, dH, padW, padH,
- nInputPlane, inputWidth, inputHeight,
- nOutputPlane, outputWidth, outputHeight);
+ (gradInput_t, gradOutput_t, tweight, fgradInput_t,
+ kW, kH, dW, dH, padW, padH,
+ nInputPlane, inputWidth, inputHeight,
+ nOutputPlane, outputWidth, outputHeight);
THTensor_(free)(gradInput_t);
THTensor_(free)(gradOutput_t);
@@ -269,9 +269,7 @@ void THNN_(SpatialConvolutionLocal_updateGradInput)(
THTensor_(free)(tweight);
THTensor_(free)(input);
THTensor_(free)(gradOutput);
- if (freeWeight)
- THTensor_(free)(weight);
-
+ THTensor_(free)(weight);
}
static void THNN_(SpatialConvolutionLocal_accGradParameters_frame)
@@ -317,8 +315,10 @@ void THNN_(SpatialConvolutionLocal_accGradParameters)(
long outputWidth, long outputHeight,
accreal scale_)
{
+ THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous");
+ THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous");
real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
- int freeWeight = THNN_(view_weight_local)(&gradWeight);
+ gradWeight = THNN_(view_weight_local)(gradWeight);
THNN_(SpatialConvolutionLocal_shapeCheck)
(input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW,
@@ -349,10 +349,10 @@ void THNN_(SpatialConvolutionLocal_accGradParameters)(
THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
THNN_(SpatialConvolutionLocal_accGradParameters_frame)
- (gradOutput_t, gradWeight, gradBias, finput_t, scale,
- kW, kH, dW, dH, padW, padH,
- nInputPlane, inputWidth, inputHeight,
- nOutputPlane, outputWidth, outputHeight);
+ (gradOutput_t, gradWeight, gradBias, finput_t, scale,
+ kW, kH, dW, dH, padW, padH,
+ nInputPlane, inputWidth, inputHeight,
+ nOutputPlane, outputWidth, outputHeight);
THTensor_(free)(gradOutput_t);
THTensor_(free)(finput_t);
@@ -361,10 +361,7 @@ void THNN_(SpatialConvolutionLocal_accGradParameters)(
THTensor_(free)(input);
THTensor_(free)(gradOutput);
-
- if (freeWeight)
- THTensor_(free)(gradWeight);
-
+ THTensor_(free)(gradWeight);
}
#endif
diff --git a/lib/THNN/generic/SpatialConvolutionMM.c b/lib/THNN/generic/SpatialConvolutionMM.c
index 15a7627..631f6af 100644
--- a/lib/THNN/generic/SpatialConvolutionMM.c
+++ b/lib/THNN/generic/SpatialConvolutionMM.c
@@ -53,6 +53,19 @@ static inline void THNN_(SpatialConvolutionMM_shapeCheck)(
}
}
+static THTensor* THNN_(view_weight_MM2d)(THTensor *weight) {
+ weight = THTensor_(newContiguous)(weight);
+ if (weight->nDimension == 4) {
+ long s1 = weight->size[0];
+ long s2 = weight->size[1] * weight->size[2] * weight->size[3];
+ THTensor *old_weight = weight;
+ weight = THTensor_(newWithStorage2d)(weight->storage, weight->storageOffset,
+ s1, -1, s2, -1);
+ THTensor_(free)(old_weight);
+ }
+ return weight;
+}
+
static void THNN_(SpatialConvolutionMM_updateOutput_frame)(
THTensor *input,
THTensor *output,
@@ -111,15 +124,7 @@ void THNN_(SpatialConvolutionMM_updateOutput)(
int padW,
int padH)
{
- int freeWeight = 0;
-
- if (weight->nDimension == 4) {
- long s1 = weight->size[0];
- long s2 = weight->size[1] * weight->size[2] * weight->size[3];
- weight = THTensor_(newWithStorage2d)(weight->storage, weight->storageOffset,
- s1, -1, s2, -1);
- freeWeight = 1;
- }
+ weight = THNN_(view_weight_MM2d)(weight);
THNN_(SpatialConvolutionMM_shapeCheck)
(input, NULL, weight, bias, kH, kW, dH, dW, padH, padW);
@@ -182,8 +187,7 @@ void THNN_(SpatialConvolutionMM_updateOutput)(
}
THTensor_(free)(input);
- if (freeWeight)
- THTensor_(free)(weight);
+ THTensor_(free)(weight);
}
static void THNN_(SpatialConvolutionMM_updateGradInput_frame)(
@@ -228,15 +232,7 @@ void THNN_(SpatialConvolutionMM_updateGradInput)(
int padW,
int padH)
{
- int freeWeight = 0;
-
- if (weight->nDimension == 4) {
- long s1 = weight->size[0];
- long s2 = weight->size[1] * weight->size[2] * weight->size[3];
- weight = THTensor_(newWithStorage2d)(weight->storage, weight->storageOffset,
- s1, -1, s2, -1);
- freeWeight = 1;
- }
+ weight = THNN_(view_weight_MM2d)(weight);
THNN_(SpatialConvolutionMM_shapeCheck)
(input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW);
@@ -285,8 +281,7 @@ void THNN_(SpatialConvolutionMM_updateGradInput)(
THTensor_(free)(tweight);
THTensor_(free)(input);
THTensor_(free)(gradOutput);
- if (freeWeight)
- THTensor_(free)(weight);
+ THTensor_(free)(weight);
}
static void THNN_(SpatialConvolutionMM_accGradParameters_frame)(
@@ -338,17 +333,11 @@ void THNN_(SpatialConvolutionMM_accGradParameters)(
int padH,
accreal scale_)
{
+ THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous");
+ THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous");
+
real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
- int freeWeight = 0;
-
- if (gradWeight->nDimension == 4) {
- long s1 = gradWeight->size[0];
- long s2 = gradWeight->size[1] * gradWeight->size[2] * gradWeight->size[3];
- gradWeight = THTensor_(newWithStorage2d)(gradWeight->storage,
- gradWeight->storageOffset,
- s1, -1, s2, -1);
- freeWeight = 1;
- }
+ gradWeight = THNN_(view_weight_MM2d)(gradWeight);
THNN_(SpatialConvolutionMM_shapeCheck)
(input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW);
@@ -381,8 +370,7 @@ void THNN_(SpatialConvolutionMM_accGradParameters)(
THTensor_(free)(input);
THTensor_(free)(gradOutput);
- if (freeWeight)
- THTensor_(free)(gradWeight);
+ THTensor_(free)(gradWeight);
}
#endif
diff --git a/lib/THNN/generic/SpatialConvolutionMap.c b/lib/THNN/generic/SpatialConvolutionMap.c
index 750b212..142a035 100644
--- a/lib/THNN/generic/SpatialConvolutionMap.c
+++ b/lib/THNN/generic/SpatialConvolutionMap.c
@@ -13,10 +13,6 @@ void THNN_(SpatialConvolutionMap_updateOutput)(
"3D weight tensor expected (connTable:size(%d) x kH x kW)", TH_INDEX_BASE
);
- real *weight_data = THTensor_(data)(weight);
- real *bias_data = THTensor_(data)(bias);
- real *connTable_data = THTensor_(data)(connTable);
-
int dimw = 2;
int dimh = 1;
int dimc = 0;
@@ -51,10 +47,16 @@ void THNN_(SpatialConvolutionMap_updateOutput)(
/* contiguous */
input = THTensor_(newContiguous)(input);
output = THTensor_(newContiguous)(output);
+ weight = THTensor_(newContiguous)(weight);
+ bias = bias ? THTensor_(newContiguous)(bias) : bias;
+ connTable = THTensor_(newContiguous)(connTable);
/* get raw pointers */
real *input_data = THTensor_(data)(input);
real *output_data = THTensor_(data)(output);
+ real *weight_data = THTensor_(data)(weight);
+ real *bias_data = THTensor_(data)(bias);
+ real *connTable_data = THTensor_(data)(connTable);
long p;
#pragma omp parallel for private(p)
@@ -96,6 +98,9 @@ void THNN_(SpatialConvolutionMap_updateOutput)(
/* clean up */
THTensor_(free)(input);
THTensor_(free)(output);
+ THTensor_(free)(weight);
+ if (bias) THTensor_(free)(bias);
+ THTensor_(free)(connTable);
}
void THNN_(SpatialConvolutionMap_updateGradInput)(
@@ -109,9 +114,6 @@ void THNN_(SpatialConvolutionMap_updateGradInput)(
"3D weight tensor expected (connTable:size(%d) x kH x kW)", TH_INDEX_BASE
);
- real *weight_data = THTensor_(data)(weight);
- real *connTable_data = THTensor_(data)(connTable);
-
/* and dims */
int dimw = 2;
int dimh = 1;
@@ -133,6 +135,8 @@ void THNN_(SpatialConvolutionMap_updateGradInput)(
/* contiguous */
gradInput = THTensor_(newContiguous)(gradInput);
gradOutput = THTensor_(newContiguous)(gradOutput);
+ weight = THTensor_(newContiguous)(weight);
+ connTable = THTensor_(newContiguous)(connTable);
/* Resize/Zero */
THTensor_(resizeAs)(gradInput, input);
@@ -141,6 +145,8 @@ void THNN_(SpatialConvolutionMap_updateGradInput)(
/* get raw pointers */
real *gradInput_data = THTensor_(data)(gradInput);
real *gradOutput_data = THTensor_(data)(gradOutput);
+ real *weight_data = THTensor_(data)(weight);
+ real *connTable_data = THTensor_(data)(connTable);
long p;
#pragma omp parallel for private(p)
@@ -172,6 +178,8 @@ void THNN_(SpatialConvolutionMap_updateGradInput)(
/* clean up */
THTensor_(free)(gradInput);
THTensor_(free)(gradOutput);
+ THTensor_(free)(weight);
+ THTensor_(free)(connTable);
}
void THNN_(SpatialConvolutionMap_accGradParameters)(
@@ -193,9 +201,6 @@ void THNN_(SpatialConvolutionMap_accGradParameters)(
"3D gradWeight tensor expected (connTable:size(%d) x kH x kW)", TH_INDEX_BASE
);
- real *gradWeight_data = THTensor_(data)(gradWeight);
- real *gradBias_data = THTensor_(data)(gradBias);
-
/* and dims */
int dimw = 2;
int dimh = 1;
@@ -217,10 +222,15 @@ void THNN_(SpatialConvolutionMap_accGradParameters)(
/* contiguous */
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
+ THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous");
+ THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous");
/* get raw pointers */
real *input_data = THTensor_(data)(input);
real *gradOutput_data = THTensor_(data)(gradOutput);
+ real *gradWeight_data = THTensor_(data)(gradWeight);
+ real *gradBias_data = THTensor_(data)(gradBias);
+
long k;
/* gradients wrt bias */
diff --git a/lib/THNN/generic/SpatialDilatedConvolution.c b/lib/THNN/generic/SpatialDilatedConvolution.c
index d345f7a..b07aa3a 100644
--- a/lib/THNN/generic/SpatialDilatedConvolution.c
+++ b/lib/THNN/generic/SpatialDilatedConvolution.c
@@ -81,6 +81,8 @@ void THNN_(SpatialDilatedConvolution_updateOutput)(
int nOutputPlane = weight->size[0];
input = THTensor_(newContiguous)(input);
+ weight = THTensor_(newContiguous)(weight);
+ bias = bias ? THTensor_(newContiguous)(bias) : bias;
int batch = 1;
if (input->nDimension == 3) {
// Force batch
@@ -178,6 +180,8 @@ void THNN_(SpatialDilatedConvolution_updateOutput)(
}
THTensor_(free)(input);
+ THTensor_(free)(weight);
+ if (bias) THTensor_(free)(bias);
}
void THNN_(SpatialDilatedConvolution_updateGradInput)(
@@ -201,6 +205,7 @@ void THNN_(SpatialDilatedConvolution_updateGradInput)(
int nOutputPlane = weight->size[0];
input = THTensor_(newContiguous)(input);
+ weight = THTensor_(newContiguous)(weight);
gradOutput = THTensor_(newContiguous)(gradOutput);
int batch = 1;
if (input->nDimension == 3) {
@@ -274,6 +279,7 @@ void THNN_(SpatialDilatedConvolution_updateGradInput)(
THTensor_(free)(input);
THTensor_(free)(gradOutput);
+ THTensor_(free)(weight);
}
@@ -302,6 +308,8 @@ void THNN_(SpatialDilatedConvolution_accGradParameters)(
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
+ THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous");
+ THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous");
int batch = 1;
if (input->nDimension == 3) {
// Force batch
diff --git a/lib/THNN/generic/SpatialFullConvolution.c b/lib/THNN/generic/SpatialFullConvolution.c
index e2a835d..cbc94a9 100644
--- a/lib/THNN/generic/SpatialFullConvolution.c
+++ b/lib/THNN/generic/SpatialFullConvolution.c
@@ -131,6 +131,8 @@ void THNN_(SpatialFullConvolution_updateOutput)(
int nOutputPlane = THTensor_(size)(weight,1);
input = THTensor_(newContiguous)(input);
+ weight = THTensor_(newContiguous)(weight);
+ bias = bias ? THTensor_(newContiguous)(bias) : bias;
int batch = 1;
if (input->nDimension == 3) {
// Force batch
@@ -230,6 +232,8 @@ void THNN_(SpatialFullConvolution_updateOutput)(
}
THTensor_(free)(input);
+ THTensor_(free)(weight);
+ if (bias) THTensor_(free)(bias);
}
void THNN_(SpatialFullConvolution_updateGradInput)(
@@ -252,6 +256,7 @@ void THNN_(SpatialFullConvolution_updateGradInput)(
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
+ weight = THTensor_(newContiguous)(weight);
int batch = 1;
if (input->nDimension == 3) {
// Force batch
@@ -327,6 +332,7 @@ void THNN_(SpatialFullConvolution_updateGradInput)(
THTensor_(free)(input);
THTensor_(free)(gradOutput);
+ THTensor_(free)(weight);
}
@@ -353,6 +359,8 @@ void THNN_(SpatialFullConvolution_accGradParameters)(
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
+ THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous");
+ THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous");
int batch = 1;
if (input->nDimension == 3) {
// Force batch
diff --git a/lib/THNN/generic/SpatialFullConvolutionMap.c b/lib/THNN/generic/SpatialFullConvolutionMap.c
index e98dea0..6952fbe 100644
--- a/lib/THNN/generic/SpatialFullConvolutionMap.c
+++ b/lib/THNN/generic/SpatialFullConvolutionMap.c
@@ -7,6 +7,8 @@ void THNN_(SpatialFullConvolutionMap_updateOutput)(
THTensor *connTable, int nInputPlane, int nOutputPlane,
int dW, int dH)
{
+ THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous");
+ THArgCheck(!bias || THTensor_(isContiguous)(bias), 5, "bias must be contiguous");
THArgCheck(
weight != NULL && weight->nDimension == 3
&& connTable != NULL && connTable->size[0] == weight->size[0], 4,
diff --git a/lib/THNN/generic/SpatialSubSampling.c b/lib/THNN/generic/SpatialSubSampling.c
index 3f01540..4c077bc 100644
--- a/lib/THNN/generic/SpatialSubSampling.c
+++ b/lib/THNN/generic/SpatialSubSampling.c
@@ -10,6 +10,7 @@ static inline void THNN_(SpatialSubSampling_shapeCheck)(
int ndims = input->nDimension;
THNN_ARGCHECK(input->nDimension == 3 || input->nDimension == 4, 2, input,
"3D or 4D input tensor expected but got: %s");
+ THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous");
int nInputPlane = THTensor_(size)(weight, 0);
@@ -40,6 +41,7 @@ void THNN_(SpatialSubSampling_updateOutput)(
int kW, int kH,
int dW, int dH)
{
+ THArgCheck(!bias || THTensor_(isContiguous)(bias), 5, "bias must be contiguous");
real *weight_data = THTensor_(data)(weight);
real *bias_data = THTensor_(data)(bias);
diff --git a/lib/THNN/generic/TemporalConvolution.c b/lib/THNN/generic/TemporalConvolution.c
index 8b379e6..8cfd97d 100644
--- a/lib/THNN/generic/TemporalConvolution.c
+++ b/lib/THNN/generic/TemporalConvolution.c
@@ -58,6 +58,8 @@ void THNN_(TemporalConvolution_updateOutput)(
dimF = 2;
}
+ THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous");
+ THArgCheck(!bias || THTensor_(isContiguous)(bias), 5, "bias must be contiguous");
THNN_(TemporalConvolution_shapeCheck)
(state, input, kW, dW, &inputFrameSize);
input = THTensor_(newContiguous)(input);
@@ -187,6 +189,7 @@ void THNN_(TemporalConvolution_updateGradInput)(
dimF = 2;
}
+ THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous");
THNN_(TemporalConvolution_shapeCheck)(
state, input, kW, dW, NULL);
nInputFrame = input->size[dimS];
diff --git a/lib/THNN/generic/TemporalRowConvolution.c b/lib/THNN/generic/TemporalRowConvolution.c
index 1b35794..e3ae41e 100644
--- a/lib/THNN/generic/TemporalRowConvolution.c
+++ b/lib/THNN/generic/TemporalRowConvolution.c
@@ -18,6 +18,8 @@ static inline void THNN_(TemporalRowConvolution_shapeCheck)(
"stride should be greater than zero, but got dW: %d", dW);
THNN_ARGCHECK(weight->nDimension == 3, 3, weight,
"3D weight tensor expected, but got: %s");
+ THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous");
+ THArgCheck(!bias || THTensor_(isContiguous)(bias), 5, "bias must be contiguous");
if (bias != NULL) {
THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size[0]);
diff --git a/lib/THNN/generic/TemporalSubSampling.c b/lib/THNN/generic/TemporalSubSampling.c
index 8728d14..6b788df 100644
--- a/lib/THNN/generic/TemporalSubSampling.c
+++ b/lib/THNN/generic/TemporalSubSampling.c
@@ -52,6 +52,8 @@ void THNN_(TemporalSubSampling_updateOutput)(
int nInputFrame, nOutputFrame;
long k;
+ THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous");
+ THArgCheck(!bias || THTensor_(isContiguous)(bias), 4, "bias must be contiguous");
THNN_(TemporalSubSampling_shapeCheck)(state, input, NULL, kW, dW, &inputFrameSize);
outputFrame = THTensor_(new)();
@@ -91,6 +93,7 @@ void THNN_(TemporalSubSampling_updateGradInput)(
THTensor *gradInputWindow, *buffer, *kwunit;
long k;
+ THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous");
THNN_(TemporalSubSampling_shapeCheck)(state, input, gradOutput, kW, dW, NULL);
gradOutputFrame = THTensor_(new)();
diff --git a/lib/THNN/generic/VolumetricDilatedConvolution.c b/lib/THNN/generic/VolumetricDilatedConvolution.c
index e31ff2b..07c2737 100644
--- a/lib/THNN/generic/VolumetricDilatedConvolution.c
+++ b/lib/THNN/generic/VolumetricDilatedConvolution.c
@@ -20,6 +20,8 @@ static inline void THNN_(VolumetricDilatedConvolution_shapeCheck)(
THArgCheck(dilationT > 0 && dilationW > 0 && dilationH > 0, 15,
"dilation should be greater than zero, but got dilationT: %d, dilationH: %d, dilationW: %d",
dilationT, dilationH, dilationW);
+ THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous");
+ THArgCheck(!bias || THTensor_(isContiguous)(bias), 5, "bias must be contiguous");
if (bias != NULL) {
THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size[0]);
}
diff --git a/lib/THNN/generic/VolumetricFullConvolution.c b/lib/THNN/generic/VolumetricFullConvolution.c
index 62d0d74..070a926 100644
--- a/lib/THNN/generic/VolumetricFullConvolution.c
+++ b/lib/THNN/generic/VolumetricFullConvolution.c
@@ -172,6 +172,8 @@ void THNN_(VolumetricFullConvolution_updateOutput)(
const int kW = (int)weight->size[4];
input = THTensor_(newContiguous)(input);
+ weight = THTensor_(newContiguous)(weight);
+ bias = bias ? THTensor_(newContiguous)(bias) : bias;
int batch = 1;
if (input->nDimension == 4)
{
@@ -280,6 +282,8 @@ void THNN_(VolumetricFullConvolution_updateOutput)(
}
THTensor_(free)(input);
+ THTensor_(free)(weight);
+ if (bias) THTensor_(free)(bias);
}
void THNN_(VolumetricFullConvolution_updateGradInput)(
@@ -308,6 +312,7 @@ void THNN_(VolumetricFullConvolution_updateGradInput)(
const int kW = (int)weight->size[4];
input = THTensor_(newContiguous)(input);
+ weight = THTensor_(newContiguous)(weight);
gradOutput = THTensor_(newContiguous)(gradOutput);
int batch = 1;
@@ -391,6 +396,7 @@ void THNN_(VolumetricFullConvolution_updateGradInput)(
THTensor_(free)(input);
THTensor_(free)(gradOutput);
+ THTensor_(free)(weight);
}
void THNN_(VolumetricFullConvolution_accGradParameters)(
@@ -423,6 +429,8 @@ void THNN_(VolumetricFullConvolution_accGradParameters)(
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
+ THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous");
+ THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous");
int batch = 1;
if (input->nDimension == 4)