Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torch/nn.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--SpatialDivisiveNormalization.lua134
-rw-r--r--init.lua1
-rw-r--r--test/test.lua32
3 files changed, 167 insertions, 0 deletions
diff --git a/SpatialDivisiveNormalization.lua b/SpatialDivisiveNormalization.lua
new file mode 100644
index 0000000..eaf7465
--- /dev/null
+++ b/SpatialDivisiveNormalization.lua
@@ -0,0 +1,134 @@
+local SpatialDivisiveNormalization, parent = torch.class('nn.SpatialDivisiveNormalization','nn.Module')
+
+function SpatialDivisiveNormalization:__init(nInputPlane, kernel, threshold, thresval)
+ parent.__init(self)
+
+ -- get args
+ self.nInputPlane = nInputPlane or 1
+ self.kernel = kernel or torch.Tensor(9,9):fill(1)
+ self.threshold = threshold or 1e-4
+ self.thresval = thresval or 1e-4
+ local kdim = self.kernel:nDimension()
+
+ -- check args
+ if kdim ~= 2 and kdim ~= 1 then
+ error('<SpatialDivisiveNormalization> averaging kernel must be 2D or 1D')
+ end
+ if (self.kernel:size(1) % 2) == 0 or (kdim == 2 and (self.kernel:size(2) % 2) == 0) then
+ error('<SpatialDivisiveNormalization> averaging kernel must have ODD dimensions')
+ end
+
+ -- normalize kernel
+ self.kernel:div(self.kernel:sum() * self.nInputPlane)
+
+ -- padding values
+ local padH = math.floor(self.kernel:size(1)/2)
+ local padW = padH
+ if kdim == 2 then
+ padW = math.floor(self.kernel:size(2)/2)
+ end
+
+ -- create convolutional mean estimator
+ self.meanestimator = nn.Sequential()
+ self.meanestimator:add(nn.SpatialZeroPadding(padW, padW, padH, padH))
+ if kdim == 2 then
+ self.meanestimator:add(nn.SpatialConvolutionMap(nn.tables.oneToOne(self.nInputPlane),
+ self.kernel:size(2), self.kernel:size(1)))
+ else
+ self.meanestimator:add(nn.SpatialConvolutionMap(nn.tables.oneToOne(self.nInputPlane),
+ self.kernel:size(1), 1))
+ self.meanestimator:add(nn.SpatialConvolutionMap(nn.tables.oneToOne(self.nInputPlane),
+ 1, self.kernel:size(1)))
+ end
+ self.meanestimator:add(nn.Sum(1))
+ self.meanestimator:add(nn.Replicate(self.nInputPlane))
+
+ -- create convolutional std estimator
+ self.stdestimator = nn.Sequential()
+ self.stdestimator:add(nn.Square())
+ self.stdestimator:add(nn.SpatialZeroPadding(padW, padW, padH, padH))
+ if kdim == 2 then
+ self.stdestimator:add(nn.SpatialConvolutionMap(nn.tables.oneToOne(self.nInputPlane),
+ self.kernel:size(2), self.kernel:size(1)))
+ else
+ self.stdestimator:add(nn.SpatialConvolutionMap(nn.tables.oneToOne(self.nInputPlane),
+ self.kernel:size(1), 1))
+ self.stdestimator:add(nn.SpatialConvolutionMap(nn.tables.oneToOne(self.nInputPlane),
+ 1, self.kernel:size(1)))
+ end
+ self.stdestimator:add(nn.Sum(1))
+ self.stdestimator:add(nn.Replicate(self.nInputPlane))
+ self.stdestimator:add(nn.Sqrt())
+
+ -- set kernel and bias
+ if kdim == 2 then
+ for i = 1,self.nInputPlane do
+ self.meanestimator.modules[2].weight[i] = self.kernel
+ self.stdestimator.modules[3].weight[i] = self.kernel
+ end
+ self.meanestimator.modules[2].bias:zero()
+ self.stdestimator.modules[3].bias:zero()
+ else
+ for i = 1,self.nInputPlane do
+ self.meanestimator.modules[2].weight[i]:copy(self.kernel)
+ self.meanestimator.modules[3].weight[i]:copy(self.kernel)
+ self.stdestimator.modules[3].weight[i]:copy(self.kernel)
+ self.stdestimator.modules[4].weight[i]:copy(self.kernel)
+ end
+ self.meanestimator.modules[2].bias:zero()
+ self.meanestimator.modules[3].bias:zero()
+ self.stdestimator.modules[3].bias:zero()
+ self.stdestimator.modules[4].bias:zero()
+ end
+
+ -- other operation
+ self.normalizer = nn.CDivTable()
+ self.divider = nn.CDivTable()
+ self.thresholder = nn.Threshold(self.threshold, self.thresval)
+
+ -- coefficient array, to adjust side effects
+ self.coef = torch.Tensor(1,1,1)
+end
+
+function SpatialDivisiveNormalization:updateOutput(input)
+ -- compute side coefficients
+ if (input:size(3) ~= self.coef:size(3)) or (input:size(2) ~= self.coef:size(2)) then
+ local ones = input.new():resizeAs(input):fill(1)
+ self.coef = self.meanestimator:updateOutput(ones)
+ self.coef = self.coef:clone()
+ end
+
+ -- normalize std dev
+ self.localstds = self.stdestimator:updateOutput(input)
+ self.adjustedstds = self.divider:updateOutput{self.localstds, self.coef}
+ self.thresholdedstds = self.thresholder:updateOutput(self.adjustedstds)
+ self.output = self.normalizer:updateOutput{input, self.thresholdedstds}
+
+ -- done
+ return self.output
+end
+
+function SpatialDivisiveNormalization:updateGradInput(input, gradOutput)
+ -- resize grad
+ self.gradInput:resizeAs(input):zero()
+
+ -- backprop through all modules
+ local gradnorm = self.normalizer:updateGradInput({input, self.thresholdedstds}, gradOutput)
+ local gradadj = self.thresholder:updateGradInput(self.adjustedstds, gradnorm[2])
+ local graddiv = self.divider:updateGradInput({self.localstds, self.coef}, gradadj)
+ self.gradInput:add(self.stdestimator:updateGradInput(input, graddiv[1]))
+ self.gradInput:add(gradnorm[1])
+
+ -- done
+ return self.gradInput
+end
+
+function SpatialDivisiveNormalization:type(type)
+ parent.type(self,type)
+ self.meanestimator:type(type)
+ self.stdestimator:type(type)
+ self.divider:type(type)
+ self.normalizer:type(type)
+ self.thresholder:type(type)
+ return self
+end
diff --git a/init.lua b/init.lua
index d53a803..4fe448e 100644
--- a/init.lua
+++ b/init.lua
@@ -62,6 +62,7 @@ torch.include('nn', 'SpatialLPPooling.lua')
torch.include('nn', 'TemporalConvolution.lua')
torch.include('nn', 'TemporalSubSampling.lua')
torch.include('nn', 'SpatialSubtractiveNormalization.lua')
+torch.include('nn', 'SpatialDivisiveNormalization.lua')
torch.include('nn', 'SpatialZeroPadding.lua')
torch.include('nn', 'VolumetricConvolution.lua')
diff --git a/test/test.lua b/test/test.lua
index d67165f..31a6049 100644
--- a/test/test.lua
+++ b/test/test.lua
@@ -556,6 +556,38 @@ function nntest.SpatialSubtractiveNormalization_1dkernel()
mytester:asserteq(berr, 0, torch.typename(module) .. ' - i/o backward err ')
end
+function nntest.SpatialDivisiveNormalization_2dkernel()
+ local inputSize = math.random(11,20)
+ local kersize = 9
+ local nbfeatures = math.random(5,10)
+ local kernel = torch.Tensor(kersize,kersize):fill(1)
+ local module = nn.SpatialDivisiveNormalization(nbfeatures,kernel)
+ local input = torch.rand(nbfeatures,inputSize,inputSize)
+
+ local err = jac.testJacobian(module,input)
+ mytester:assertlt(err,precision, 'error on state ')
+
+ local ferr,berr = jac.testIO(module,input)
+ mytester:asserteq(ferr, 0, torch.typename(module) .. ' - i/o forward err ')
+ mytester:asserteq(berr, 0, torch.typename(module) .. ' - i/o backward err ')
+end
+
+function nntest.SpatialDivisiveNormalization_1dkernel()
+ local inputSize = math.random(11,20)
+ local kersize = 9
+ local nbfeatures = math.random(5,10)
+ local kernel = torch.Tensor(kersize):fill(1)
+ local module = nn.SpatialDivisiveNormalization(nbfeatures,kernel)
+ local input = torch.rand(nbfeatures,inputSize,inputSize)
+
+ local err = jac.testJacobian(module,input)
+ mytester:assertlt(err,precision, 'error on state ')
+
+ local ferr,berr = jac.testIO(module,input)
+ mytester:asserteq(ferr, 0, torch.typename(module) .. ' - i/o forward err ')
+ mytester:asserteq(berr, 0, torch.typename(module) .. ' - i/o backward err ')
+end
+
function nntest.SpatialConvolution()
local from = math.random(1,10)
local to = math.random(1,10)