Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torch/nn.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGregory Chanan <gchanan@fb.com>2016-12-02 21:04:02 +0300
committerGregory Chanan <gchanan@fb.com>2016-12-06 20:06:03 +0300
commite7d033f64f2f9c1ba9c0808beef084c60def8798 (patch)
treee40b0f1a71b53e2fe73f2411084321691cc84970
parentd94416902bdc793960f06b7c784328fd6af70447 (diff)
Improve shape checks for VolumetricAveragePooling, VolumetricDilatedMaxPooling,
VolumetricMaxUnpooling, VolumetricReplicationPadding.
-rw-r--r--lib/THNN/generic/VolumetricAveragePooling.c81
-rw-r--r--lib/THNN/generic/VolumetricMaxUnpooling.c64
-rw-r--r--lib/THNN/generic/VolumetricReplicationPadding.c77
3 files changed, 182 insertions, 40 deletions
diff --git a/lib/THNN/generic/VolumetricAveragePooling.c b/lib/THNN/generic/VolumetricAveragePooling.c
index a317cbb..91c870e 100644
--- a/lib/THNN/generic/VolumetricAveragePooling.c
+++ b/lib/THNN/generic/VolumetricAveragePooling.c
@@ -2,6 +2,70 @@
#define TH_GENERIC_FILE "generic/VolumetricAveragePooling.c"
#else
+static inline void THNN_(VolumetricAveragePooling_shapeCheck)(
+ THNNState *state,
+ THTensor *input,
+ THTensor *gradOutput,
+ int kT,
+ int kW,
+ int kH,
+ int dT,
+ int dW,
+ int dH) {
+ long nslices;
+ long itime;
+ long iheight;
+ long iwidth;
+ long otime;
+ long oheight;
+ long owidth;
+ int ndim = input->nDimension;
+ int dimN = 0;
+ int dimt = 1;
+ int dimh = 2;
+ int dimw = 3;
+
+ if (input->nDimension == 5)
+ {
+ dimN++;
+ dimt++;
+ dimh++;
+ dimw++;
+ }
+
+ THArgCheck(kT > 0 && kW > 0 && kH > 0, 5,
+ "kernel size should be greater than zero, but got kT: %d kH: %d kW: %d",
+ kT, kH, kW);
+ THArgCheck(dT > 0 && dW > 0 && dH > 0, 8,
+ "stride should be greater than zero, but got dT: %d dH: %d dW: %d",
+ dT, dH, dW);
+ THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input,
+ "4D or 5D (batch mode) tensor expected for input, but got: %s");
+
+ THArgCheck(input->size[dimw] >= kW && input->size[dimh] >= kH
+ && input->size[dimt] >= kT, 2,
+ "input image (T: %d H: %d W: %d) smaller than "
+ "kernel size (kT: %d kH: %d kW: %d)",
+ input->size[dimt], input->size[dimh], input->size[dimw],
+ kT, kH, kW);
+
+ /* sizes */
+ nslices = input->size[dimN];
+ itime = input->size[dimt];
+ iheight = input->size[dimh];
+ iwidth = input->size[dimw];
+ otime = (itime - kT) / dT + 1;
+ oheight = (iheight - kH) / dH + 1;
+ owidth = (iwidth - kW) / dW + 1;
+
+ if (gradOutput != NULL) {
+ THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimN, nslices);
+ THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimt, otime);
+ THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, oheight);
+ THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, owidth);
+ }
+}
+
static void THNN_(VolumetricAveragePooling_updateOutput_frame)(
real *input_p,
real *output_p,
@@ -81,8 +145,9 @@ void THNN_(VolumetricAveragePooling_updateOutput)(
real *input_data;
real *output_data;
- THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input,
- "4D or 5D (batch mode) tensor expected for input, but got: %s");
+ THNN_(VolumetricAveragePooling_shapeCheck)(
+ state, input, NULL, kT, kW, kH,
+ dT, dW, dH);
int dimN = 0;
int dimt = 1;
@@ -97,13 +162,6 @@ void THNN_(VolumetricAveragePooling_updateOutput)(
dimw++;
}
- THArgCheck(input->size[dimw] >= kW && input->size[dimh] >= kH
- && input->size[dimt] >= kT, 2,
- "input image (T: %d H: %d W: %d) smaller than "
- "kernel size (kT: %d kH: %d kW: %d)",
- input->size[dimt], input->size[dimh], input->size[dimw],
- kT, kH, kW);
-
/* sizes */
nslices = input->size[dimN];
itime = input->size[dimt];
@@ -244,7 +302,10 @@ void THNN_(VolumetricAveragePooling_updateGradInput)(
int dimh = 2;
int dimw = 3;
- // TODO: gradOutput shape check
+ THNN_(VolumetricAveragePooling_shapeCheck)(
+ state, input, gradOutput, kT, kW, kH,
+ dT, dW, dH);
+
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
diff --git a/lib/THNN/generic/VolumetricMaxUnpooling.c b/lib/THNN/generic/VolumetricMaxUnpooling.c
index f2f879d..45fd116 100644
--- a/lib/THNN/generic/VolumetricMaxUnpooling.c
+++ b/lib/THNN/generic/VolumetricMaxUnpooling.c
@@ -2,6 +2,51 @@
#define TH_GENERIC_FILE "generic/VolumetricMaxUnpooling.c"
#else
+static inline void THNN_(VolumetricMaxUnpooling_shapeCheck)(
+ THNNState *state,
+ THTensor *input,
+ THTensor *gradOutput,
+ THIndexTensor *indices,
+ int oT,
+ int oW,
+ int oH,
+ int dT,
+ int dW,
+ int dH,
+ int pT,
+ int pW,
+ int pH)
+{
+ THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input,
+ "4D or 5D (batch mode) tensor expected for input, but got: %s");
+
+ THNN_CHECK_SHAPE_INDICES(input, indices);
+
+ THArgCheck(dT > 0 && dW > 0 && dH > 0, 10,
+ "stride should be greater than zero, but got dT: %d dH: %d dW: %d",
+ dT, dH, dW);
+
+ int dimw = 3;
+ int dimh = 2;
+ int dimt = 1;
+ if (input->nDimension == 5)
+ {
+ dimt++;
+ dimw++;
+ dimh++;
+ }
+
+ if (gradOutput != NULL) {
+ if (oT != gradOutput->size[dimt] || oW != gradOutput->size[dimw] || oH != gradOutput->size[dimh])
+ {
+ THError(
+ "Inconsistent gradOutput size. oT= %d, oH= %d, oW= %d, gradOutput: %dx%dx%d",
+ oT, oH, oW, gradOutput->size[dimt], gradOutput->size[dimh], gradOutput->size[dimw]
+ );
+ }
+ }
+}
+
static void THNN_(VolumetricMaxUnpooling_updateOutput_frame)(
real *input_p,
real *output_p,
@@ -95,10 +140,9 @@ void THNN_(VolumetricMaxUnpooling_updateOutput)(
real *output_data;
THIndex_t *indices_data;
- THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input,
- "4D or 5D (batch mode) tensor expected for input, but got: %s");
-
- THNN_CHECK_SHAPE_INDICES(input, indices);
+ THNN_(VolumetricMaxUnpooling_shapeCheck)(
+ state, input, NULL, indices,
+ oT, oW, oH, dT, dW, dH, pT, pW, pH);
if (input->nDimension == 5)
{
@@ -252,7 +296,9 @@ void THNN_(VolumetricMaxUnpooling_updateGradInput)(
real *gradOutput_data;
THIndex_t *indices_data;
- THNN_CHECK_SHAPE_INDICES(input, indices);
+ THNN_(VolumetricMaxUnpooling_shapeCheck)(
+ state, input, gradOutput, indices,
+ oT, oW, oH, dT, dW, dH, pT, pW, pH);
// TODO: check gradOutput shape
/* get contiguous gradOutput */
@@ -277,14 +323,6 @@ void THNN_(VolumetricMaxUnpooling_updateGradInput)(
iH = input->size[dimh];
iW = input->size[dimw];
- if (oT != gradOutput->size[dimt] || oW != gradOutput->size[dimw] || oH != gradOutput->size[dimh])
- {
- THError(
- "Inconsistent gradOutput size. oT= %d, oH= %d, oW= %d, gradOutput: %dx%d",
- oT, oH, oW,gradOutput->size[dimh], gradOutput->size[dimw]
- );
- }
-
/* get raw pointers */
gradInput_data = THTensor_(data)(gradInput);
gradOutput_data = THTensor_(data)(gradOutput);
diff --git a/lib/THNN/generic/VolumetricReplicationPadding.c b/lib/THNN/generic/VolumetricReplicationPadding.c
index aebddbd..8d9dda3 100644
--- a/lib/THNN/generic/VolumetricReplicationPadding.c
+++ b/lib/THNN/generic/VolumetricReplicationPadding.c
@@ -2,6 +2,59 @@
#define TH_GENERIC_FILE "generic/VolumetricReplicationPadding.c"
#else
+static inline void THNN_(VolumetricReplicationPadding_shapeCheck)(
+ THNNState *state,
+ THTensor *input,
+ THTensor *gradOutput,
+ int pleft, int pright,
+ int ptop, int pbottom,
+ int pfront, int pback) {
+ int dimw = 3;
+ int dimh = 2;
+ int dimd = 1;
+ long idepth;
+ long iheight;
+ long iwidth;
+ long odepth;
+ long oheight;
+ long owidth;
+
+ THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input,
+ "4D or 5D (batch mode) tensor expected for input, but got: %s");
+
+ if (input->nDimension == 5)
+ {
+ dimw++;
+ dimh++;
+ dimd++;
+ }
+
+ /* sizes */
+ idepth = input->size[dimd];
+ iheight = input->size[dimh];
+ iwidth = input->size[dimw];
+ odepth = idepth + pfront + pback;
+ oheight = iheight + ptop + pbottom;
+ owidth = iwidth + pleft + pright;
+
+ THArgCheck(owidth >= 1 || oheight >= 1 || odepth >= 1, 2,
+ "input (D: %d H: %d, W: %d)is too small."
+ " Calculated output D: %d H: %d W: %d",
+ idepth, iheight, iwidth, odepth, oheight, owidth);
+
+ if (gradOutput != NULL) {
+ THArgCheck(owidth == THTensor_(size)(gradOutput, dimw), 3,
+ "gradOutput width unexpected. Expected: %d, Got: %d",
+ owidth, THTensor_(size)(gradOutput, dimw));
+ THArgCheck(oheight == THTensor_(size)(gradOutput, dimh), 3,
+ "gradOutput height unexpected. Expected: %d, Got: %d",
+ oheight, THTensor_(size)(gradOutput, dimh));
+ THArgCheck(odepth == THTensor_(size)(gradOutput, dimd), 3,
+ "gradOutput depth unexpected. Expected: %d, Got: %d",
+ odepth, THTensor_(size)(gradOutput, dimd));
+ }
+}
+
static void THNN_(VolumetricReplicationPadding_updateOutput_frame)(
real *input_p, real *output_p,
long nslices,
@@ -85,8 +138,9 @@ void THNN_(VolumetricReplicationPadding_updateOutput)(THNNState *state,
real *input_data;
real *output_data;
- THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input,
- "4D or 5D (batch mode) tensor expected for input, but got: %s");
+THNN_(VolumetricReplicationPadding_shapeCheck)(
+ state, input, NULL, pleft, pright,
+ ptop, pbottom, pfront, pback);
if (input->nDimension == 5)
{
@@ -106,11 +160,6 @@ void THNN_(VolumetricReplicationPadding_updateOutput)(THNNState *state,
oheight = iheight + ptop + pbottom;
owidth = iwidth + pleft + pright;
- THArgCheck(owidth >= 1 || oheight >= 1 || odepth >= 1, 2,
- "input (D: %d H: %d, W: %d)is too small."
- " Calculated output D: %d H: %d W: %d",
- idepth, iheight, iwidth, odepth, oheight, owidth);
-
/* get contiguous input */
input = THTensor_(newContiguous)(input);
@@ -255,16 +304,10 @@ void THNN_(VolumetricReplicationPadding_updateGradInput)(THNNState *state,
oheight = iheight + ptop + pbottom;
owidth = iwidth + pleft + pright;
- THArgCheck(owidth == THTensor_(size)(gradOutput, dimw), 3,
- "gradOutput width unexpected. Expected: %d, Got: %d",
- owidth, THTensor_(size)(gradOutput, dimw));
- THArgCheck(oheight == THTensor_(size)(gradOutput, dimh), 3,
- "gradOutput height unexpected. Expected: %d, Got: %d",
- oheight, THTensor_(size)(gradOutput, dimh));
- THArgCheck(odepth == THTensor_(size)(gradOutput, dimd), 3,
- "gradOutput depth unexpected. Expected: %d, Got: %d",
- odepth, THTensor_(size)(gradOutput, dimd));
-
+
+THNN_(VolumetricReplicationPadding_shapeCheck)(
+ state, input, NULL, pleft, pright,
+ ptop, pbottom, pfront, pback);
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);