Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torch/nn.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNicholas Leonard <nick@nikopia.org>2015-03-21 20:54:19 +0300
committerNicholas Leonard <nick@nikopia.org>2015-03-26 02:54:57 +0300
commitba6f198873a888eeb065180b78ee6528b999412b (patch)
tree7e6db01f4c77f65ef72237f7b9963b716865d9d2
parent3c82eba291f96d0ce4fe611e8c80c7ebb20d5b47 (diff)
Padding.lua
-rw-r--r--Padding.lua47
-rw-r--r--doc/simple.md48
-rw-r--r--init.lua1
-rw-r--r--test.lua18
4 files changed, 106 insertions, 8 deletions
diff --git a/Padding.lua b/Padding.lua
new file mode 100644
index 0000000..3b5836f
--- /dev/null
+++ b/Padding.lua
@@ -0,0 +1,47 @@
+local Padding, parent = torch.class('nn.Padding', 'nn.Module')
+
+-- pad can be positive (right) negative (left)
+function Padding:__init(dim, pad, nInputDim, value)
+ self.dim = dim
+ self.pad = pad
+ self.nInputDim = nInputDim
+ self.value = value or 0
+ self.outputSize = torch.LongStorage()
+ parent.__init(self)
+end
+
+function Padding:updateOutput(input)
+ self.outputSize:resize(input:dim())
+ self.outputSize:copy(input:size())
+ local dim = self.dim
+ if self.nInputDim and input:dim() ~= self.nInputDim then
+ dim = dim + 1
+ end
+ self.outputSize[dim] = self.outputSize[dim] + math.abs(self.pad)
+ self.output:resize(self.outputSize)
+ self.output:fill(self.value)
+ local outputWindow
+ if self.pad > 0 then
+ outputWindow = self.output:narrow(dim, 1, input:size(dim))
+ else
+ outputWindow = self.output:narrow(dim, 1 - self.pad, input:size(dim))
+ end
+ outputWindow:copy(input)
+ return self.output
+end
+
+function Padding:updateGradInput(input, gradOutput)
+ self.gradInput:resizeAs(input)
+ local dim = self.dim
+ if self.nInputDim and input:dim() ~= self.nInputDim then
+ dim = dim + 1
+ end
+ local gradOutputWindow
+ if self.pad > 0 then
+ gradOutputWindow = gradOutput:narrow(dim, 1, input:size(dim))
+ else
+ gradOutputWindow = gradOutput:narrow(dim, 1 - self.pad, input:size(dim))
+ end
+ self.gradInput:copy(gradOutputWindow:copy(input))
+ return self.gradInput
+end
diff --git a/doc/simple.md b/doc/simple.md
index 35f36d0..97abd82 100644
--- a/doc/simple.md
+++ b/doc/simple.md
@@ -29,12 +29,11 @@ and providing affine transformations :
* [Square](#nn.Square) : an element-wise square operation ;
* [Sqrt](#nn.Sqrt) : an element-wise [sqrt](https://github.com/torch/torch7/blob/master/doc/maths.md#res-torchsqrtres-x) operation ;
* [MM](#nn.MM) : matrix-matrix multiplication (also supports batches of matrices) ;
-* Normalization modules:
- * [BatchNormalization](#nn.BatchNormalization) - mean/std normalization over the mini-batch inputs, with an optional affine transform that follows
* Miscellaneous Modules :
+ * [BatchNormalization](#nn.BatchNormalization) - mean/std normalization over the mini-batch inputs (with an optional affine transform) ;
* [Identity](#nn.Identity) : forward input as-is to output (useful with [ParallelTable](table.md#nn.ParallelTable));
* [Dropout](#nn.Dropout) : masks parts of the `input` using binary samples from a [bernoulli](http://en.wikipedia.org/wiki/Bernoulli_distribution) distribution ;
-
+ * [Padding](#nn.Padding) : adds padding to a dimension ;
<a name="nn.Linear"/>
## Linear ##
@@ -884,12 +883,12 @@ C = model.forward({A, B}) -- C will be of size `b x m x n`
giving N = 0 disables the learnable affine transform.
eps is a small value added to the standard-deviation to avoid divide-by-zero. Defaults to 1e-5
-In training time, this layer keeps a running estimate of it's computed mean and std.
-The running sum is kept with a default momentup of 0.1 (unless over-ridden)
-In test time, this running mean/std is used to normalize.
+During training, this layer keeps a running estimate of its computed mean and std.
+The running sum is kept with a default momentum of 0.1 (unless over-ridden)
+During evaluation, this running mean/std is used for normalization.
-Implements Batch Normalization as described in the paper:
+Implements Batch Normalization as described in [the paper](http://arxiv.org/pdf/1502.03167v3.pdf):
"Batch Normalization: Accelerating Deep Network Training
by Reducing Internal Covariate Shift"
by Sergey Ioffe, Christian Szegedy
@@ -901,7 +900,7 @@ The operation implemented is:
standard-deviation(x) + eps
```
where the mean and standard-deviation are calculated per-dimension over the mini-batches
-and where gamma and beta are learnable parameter vectors of size N (where N = input dimensionality).
+and where gamma and beta are learnable parameter vectors of size `N` (where `N` is the input size).
The learning of gamma and beta is optional.
The module only accepts 2D inputs.
@@ -917,3 +916,36 @@ model = nn.BatchNormalization(0)
A = torch.randn(b, m)
C = model.forward(A) -- C will be of size `b x m`
```
+
+<a name="nn.Padding"/>
+## Padding ##
+
+`module` = `nn.Padding(dim, pad [, nInputDim, value])`
+
+This module adds `pad` units of padding to dimension `dim` of the input.
+If `pad` is negative, padding is added to the left, otherwise, it is added to
+the right of the dimension. When `nInputDim` is provided, inputs larger than
+that value will be considered batches where the actual `dim` to be padded will
+be dimension `dim + 1`. When `value` is provide, the padding will be filled with
+that `value`. The default `value` is zero.
+
+Example 1:
+```lua
+module = nn.Padding(1,2,1,-1) --pad right x2
+module:forward(torch.randn(3)) --non-batch input
+ 0.2008
+ 0.4848
+-1.0783
+-1.0000
+-1.0000
+[torch.DoubleTensor of dimension 5]
+```
+
+Example 2:
+```lua
+module = nn.Padding(1,-2,1,-1) --pad left x2
+module:forward(torch.randn(2,3)) --batch input
+-1.0000 -1.0000 1.0203 0.2704 -1.6164
+-1.0000 -1.0000 -0.2219 -0.6529 -1.9218
+[torch.DoubleTensor of dimension 2x5]
+```
diff --git a/init.lua b/init.lua
index 7704712..f344b54 100644
--- a/init.lua
+++ b/init.lua
@@ -19,6 +19,7 @@ include('Narrow.lua')
include('Replicate.lua')
include('Transpose.lua')
include('BatchNormalization.lua')
+include('Padding.lua')
include('Copy.lua')
include('Min.lua')
diff --git a/test.lua b/test.lua
index 84410c9..a15cd61 100644
--- a/test.lua
+++ b/test.lua
@@ -3301,6 +3301,24 @@ function nntest.SpatialBatchNormalization()
mytester:asserteq(berr, 0, torch.typename(module) .. ' - i/o backward err ')
end
+function nntest.Padding()
+ local fanin = math.random(1,3)
+ local sizex = math.random(4,16)
+ local sizey = math.random(4,16)
+ local pad = math.random(-3,3)
+ local val = torch.randn(1):squeeze()
+ local module = nn.Padding(1, pad, 3, val)
+ local input = torch.rand(fanin,sizey,sizex)
+ local size = input:size():totable()
+ size[1] = size[1] + math.abs(pad)
+
+ local output = module:forward(input)
+ mytester:assertTableEq(size, output:size():totable(), 0.00001, "Padding size error")
+
+ local gradInput = module:backward(input, output)
+ mytester:assertTensorEq(gradInput, input, 0.00001, "Padding backward error")
+end
+
mytester:add(nntest)
if not nn then