diff options
author | Gregory Chanan <gchanan@fb.com> | 2016-12-01 00:40:24 +0300 |
---|---|---|
committer | Gregory Chanan <gchanan@fb.com> | 2016-12-01 00:45:16 +0300 |
commit | 2a64e82da91cddc4b791d6c0763c2a38f1320e25 (patch) | |
tree | 2e7e218c27c8920507417743c2b221ec791a945f /VolumetricFullConvolution.lua | |
parent | 8dff3df9676caae7fd87551ee2ac24626f4a0852 (diff) |
Move make contiguous code from lua to C.
Exceptions are:
1) SparseLinear
requires additional parameters to be passed in (e.g. nbatches),
so it's not clear it's worth moving to C since it won't really simplify the binding
code logic.
2) BatchNormalization
requires "makeBatch", which isn't a trivial translation to C.
3) LookupTable
requires "view" in C, which is already a TODO
4) SpatialUpSamplingBilinear
requires "view" in C, which is already TODO
Diffstat (limited to 'VolumetricFullConvolution.lua')
-rw-r--r-- | VolumetricFullConvolution.lua | 19 |
1 files changed, 0 insertions, 19 deletions
diff --git a/VolumetricFullConvolution.lua b/VolumetricFullConvolution.lua index 3c86a14..58eaa1d 100644 --- a/VolumetricFullConvolution.lua +++ b/VolumetricFullConvolution.lua @@ -57,22 +57,6 @@ function VolumetricFullConvolution:reset(stdv) self.bias:uniform(-stdv, stdv) end -local function makeContiguous(self, input, gradOutput) - if not input:isContiguous() then - self._input = self._input or input.new() - self._input:resizeAs(input):copy(input) - input = self._input - end - if gradOutput then - if not gradOutput:isContiguous() then - self._gradOutput = self._gradOutput or gradOutput.new() - self._gradOutput:resizeAs(gradOutput):copy(gradOutput) - gradOutput = self._gradOutput - end - end - return input, gradOutput -end - local function calculateAdj(targetSize, ker, pad, stride) return (targetSize + 2 * pad - ker) % stride end @@ -113,7 +97,6 @@ function VolumetricFullConvolution:updateOutput(input) adjH = calculateAdj(tH, self.kH, self.padH, self.dH) end - inputTensor = makeContiguous(self, inputTensor) inputTensor.THNN.VolumetricFullConvolution_updateOutput( inputTensor:cdata(), self.output:cdata(), @@ -153,7 +136,6 @@ function VolumetricFullConvolution:updateGradInput(input, gradOutput) end end - inputTensor, gradOutput = makeContiguous(self, inputTensor, gradOutput) inputTensor.THNN.VolumetricFullConvolution_updateGradInput( inputTensor:cdata(), gradOutput:cdata(), @@ -199,7 +181,6 @@ function VolumetricFullConvolution:accGradParameters(input, gradOutput, scale) adjH = calculateAdj(tH, self.kH, self.padH, self.dH) end - inputTensor, gradOutput = makeContiguous(self, inputTensor, gradOutput) inputTensor.THNN.VolumetricFullConvolution_accGradParameters( inputTensor:cdata(), gradOutput:cdata(), |