Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torch/nn.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGregory Chanan <gchanan@fb.com>2016-12-01 00:40:24 +0300
committerGregory Chanan <gchanan@fb.com>2016-12-01 00:45:16 +0300
commit2a64e82da91cddc4b791d6c0763c2a38f1320e25 (patch)
tree2e7e218c27c8920507417743c2b221ec791a945f /VolumetricDilatedConvolution.lua
parent8dff3df9676caae7fd87551ee2ac24626f4a0852 (diff)
Move make contiguous code from lua to C.
Exceptions are: 1) SparseLinear requires additional parameters to be passed in (e.g. nbatches), so it's not clear it's worth moving to C since it won't really simplify the binding code logic. 2) BatchNormalization requires "makeBatch", which isn't a trivial translation to C. 3) LookupTable requires "view" in C, which is already a TODO 4) SpatialUpSamplingBilinear requires "view" in C, which is already TODO
Diffstat (limited to 'VolumetricDilatedConvolution.lua')
-rw-r--r--VolumetricDilatedConvolution.lua19
1 files changed, 0 insertions, 19 deletions
diff --git a/VolumetricDilatedConvolution.lua b/VolumetricDilatedConvolution.lua
index fc7f037..f1337eb 100644
--- a/VolumetricDilatedConvolution.lua
+++ b/VolumetricDilatedConvolution.lua
@@ -9,26 +9,9 @@ function VolumetricDilatedConvolution:__init(nInputPlane, nOutputPlane, kT, kW,
self.dilationH = dilationH or 1
end
-local function makeContiguous(self, input, gradOutput)
- if not input:isContiguous() then
- self._input = self._input or input.new()
- self._input:resizeAs(input):copy(input)
- input = self._input
- end
- if gradOutput then
- if not gradOutput:isContiguous() then
- self._gradOutput = self._gradOutput or gradOutput.new()
- self._gradOutput:resizeAs(gradOutput):copy(gradOutput)
- gradOutput = self._gradOutput
- end
- end
- return input, gradOutput
-end
-
function VolumetricDilatedConvolution:updateOutput(input)
self.finput = self.finput or self.weight.new()
self.fgradInput = self.fgradInput or self.weight.new()
- input = makeContiguous(self, input)
input.THNN.VolumetricDilatedConvolution_updateOutput(
input:cdata(),
self.output:cdata(),
@@ -46,7 +29,6 @@ end
function VolumetricDilatedConvolution:updateGradInput(input, gradOutput)
if self.gradInput then
- input, gradOutput = makeContiguous(self, input, gradOutput)
self.fgradInput = self.fgradInput or self.weight.new()
input.THNN.VolumetricDilatedConvolution_updateGradInput(
input:cdata(),
@@ -65,7 +47,6 @@ end
function VolumetricDilatedConvolution:accGradParameters(input, gradOutput, scale)
scale = scale or 1
- input, gradOutput = makeContiguous(self, input, gradOutput)
self.fgradInput = self.fgradInput or self.weight.new()
input.THNN.VolumetricDilatedConvolution_accGradParameters(
input:cdata(),