Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torch/nn.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSoumith Chintala <soumith@fb.com>2015-05-18 23:35:14 +0300
committerSoumith Chintala <soumith@fb.com>2015-05-18 23:35:14 +0300
commitd3fe672c549a33527d40f1eb6036f4beb7407f9f (patch)
tree0bd3be8325f07e630c019150ec8ad757524d5b01
parent0908c98eadcc947b2836be6d8a42805f1d0fa9b6 (diff)
Revert "Adding Batch L2 Normalization Layer that makes all rows of input Tensor unit L2 norm"
This reverts commit 905ea8c1a4033af2af0f90e92e16597f442f3512.
-rw-r--r--L2Normalize.lua40
-rw-r--r--init.lua1
-rw-r--r--test.lua23
3 files changed, 0 insertions, 64 deletions
diff --git a/L2Normalize.lua b/L2Normalize.lua
deleted file mode 100644
index f1dfd0e..0000000
--- a/L2Normalize.lua
+++ /dev/null
@@ -1,40 +0,0 @@
-
---[[
- This layer expects an [n x d] Tensor and normalizes each
- row to have unit L2 norm.
-]]--
-local L2Normalize, parent = torch.class('nn.L2Normalize', 'nn.Module')
-function L2Normalize:__init()
- parent.__init(self)
-end
-function L2Normalize:updateOutput(input)
- assert(input:dim() == 2, 'only mini-batch supported (2D tensor), got '
- .. input:dim() .. 'D tensor instead')
- self.output:resizeAs(input)
- self.buffer = self.buffer or input.new()
- self.normSquared = self.normSquared or input.new()
- self.normSquared:sum(self.buffer:cmul(input, input), 2)
- self.buffer:sqrt(self.normSquared)
- self.output:copy(input):cdiv(self.buffer:expandAs(input))
- return self.output
-end
-
-function L2Normalize:updateGradInput(input, gradOutput)
- assert(input:dim() == 2, 'only mini-batch supported')
- assert(gradOutput:dim() == 2, 'only mini-batch supported')
- local n = input:size(1) -- batch size
- local d = input:size(2) -- dimensionality of vectors
- -- compute diagonal term
- self.eye = self.eye or torch.eye(d):typeAs(input):repeatTensor(n,1):view(n,d,d)
- self.diag = self.diag or self.eye.new()
- self.diag:cmul(self.eye, self.normSquared:view(n,1,1):expand(n,d,d))
- -- compute cross term
- local b1 = input:view(n,d,1)
- local b2 = input:view(n,1,d)
- self.diag:add(-torch.bmm(b1,b2))
- -- compute the local gradient of the L2 transformation
- self.diag:cdiv(torch.pow(self.buffer,3):view(n,1,1):expand(n,d,d))
- -- chain the gradient
- self.gradInput:resize(n,d,1):bmm(self.diag, gradOutput:view(n,d,1)):resize(n,d)
- return self.gradInput
-end
diff --git a/init.lua b/init.lua
index 520b66e..b1d36db 100644
--- a/init.lua
+++ b/init.lua
@@ -46,7 +46,6 @@ include('WeightedEuclidean.lua')
include('PairwiseDistance.lua')
include('CosineDistance.lua')
include('DotProduct.lua')
-include('L2Normalize.lua')
include('Exp.lua')
include('Log.lua')
diff --git a/test.lua b/test.lua
index e69d3c8..94033dd 100644
--- a/test.lua
+++ b/test.lua
@@ -3708,29 +3708,6 @@ function nntest.Padding()
mytester:assertTensorEq(gradInput, input, 0.00001, "Padding backward error")
end
-function nntest.L2Normalize()
- local ini = math.random(6,8)
- local inj = math.random(3,5)
- local input = torch.randn(ini, inj)
-
- local module = nn.L2Normalize()
-
- -- test correctness of output
- local output = module:forward(input)
- local norms = torch.norm(output, 2, 2)
- local desired_norms = torch.ones(ini)
- mytester:assertTensorEq(norms, desired_norms, 0.000001, 'L2Normalize forward err')
-
- -- test the Jacobian
- local err = jac.testJacobian(module,input)
- mytester:assertlt(err, precision, 'error on state ')
-
- -- test IO correctness
- local ferr, berr = jac.testIO(module,input)
- mytester:asserteq(ferr, 0, torch.typename(module) .. ' - i/o forward err ')
- mytester:asserteq(berr, 0, torch.typename(module) .. ' - i/o backward err ')
-end
-
mytester:add(nntest)
if not nn then