Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torch/nn.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Tulloch <andrew@tullo.ch>2014-11-21 10:38:45 +0300
committerAndrew Tulloch <andrew@tullo.ch>2014-11-21 10:39:22 +0300
commit6916775db4731b5c40656085471448be476a321d (patch)
treeecbe7b560e213c0b0fc4f1b7911f3a3057151e0d
parentb7c39f91f0e47309e16993a9b63a23040786d495 (diff)
Fix various unused variables in nn
-rw-r--r--Concat.lua14
-rw-r--r--CosineEmbeddingCriterion.lua6
-rw-r--r--CriterionTable.lua1
-rw-r--r--DepthConcat.lua6
-rw-r--r--Identity.lua2
-rw-r--r--Jacobian.lua4
-rw-r--r--Module.lua2
-rw-r--r--Parallel.lua19
-rw-r--r--Sequential.lua1
-rw-r--r--SoftMax.lua2
-rw-r--r--SoftMin.lua2
-rw-r--r--SparseJacobian.lua4
-rw-r--r--SpatialConvolutionMap.lua2
-rw-r--r--SplitTable.lua1
-rw-r--r--Transpose.lua1
-rw-r--r--hessian.lua2
-rw-r--r--test/test.lua28
17 files changed, 46 insertions, 51 deletions
diff --git a/Concat.lua b/Concat.lua
index 286a351..5743af9 100644
--- a/Concat.lua
+++ b/Concat.lua
@@ -63,9 +63,10 @@ function Concat:accGradParameters(input, gradOutput, scale)
local offset = 1
for i,module in ipairs(self.modules) do
local currentOutput = module.output
- local currentGradInput = module:accGradParameters(input,
- gradOutput:narrow(self.dimension, offset, currentOutput:size(self.dimension)),
- scale)
+ module:accGradParameters(
+ input,
+ gradOutput:narrow(self.dimension, offset, currentOutput:size(self.dimension)),
+ scale)
offset = offset + currentOutput:size(self.dimension)
end
end
@@ -74,9 +75,10 @@ function Concat:accUpdateGradParameters(input, gradOutput, lr)
local offset = 1
for i,module in ipairs(self.modules) do
local currentOutput = module.output
- local currentGradInput = module:accUpdateGradParameters(input,
- gradOutput:narrow(self.dimension, offset, currentOutput:size(self.dimension)),
- lr)
+ module:accUpdateGradParameters(
+ input,
+ gradOutput:narrow(self.dimension, offset, currentOutput:size(self.dimension)),
+ lr)
offset = offset + currentOutput:size(self.dimension)
end
end
diff --git a/CosineEmbeddingCriterion.lua b/CosineEmbeddingCriterion.lua
index 93348fb..293ae23 100644
--- a/CosineEmbeddingCriterion.lua
+++ b/CosineEmbeddingCriterion.lua
@@ -23,12 +23,6 @@ function CosineEmbeddingCriterion:updateOutput(input,y)
return self.output
end
-local function mathsign(t)
- if t>0 then return 1; end
- if t<0 then return -1; end
- return 2*torch.random(2)-3;
-end
-
function CosineEmbeddingCriterion:updateGradInput(input, y)
local v1 = input[1]
local v2 = input[2]
diff --git a/CriterionTable.lua b/CriterionTable.lua
index e5538f7..be00837 100644
--- a/CriterionTable.lua
+++ b/CriterionTable.lua
@@ -1,6 +1,7 @@
local CriterionTable, parent = torch.class('nn.CriterionTable', 'nn.Module')
function CriterionTable:__init(criterion)
+ parent.__init(self)
self.criterion = criterion
self.gradInput = {criterion.gradInput}
end
diff --git a/DepthConcat.lua b/DepthConcat.lua
index 70646f4..7187d61 100644
--- a/DepthConcat.lua
+++ b/DepthConcat.lua
@@ -9,7 +9,7 @@
-- this, we select the largest spatial dimensions and add zero-padding
-- around the smaller dimensions.
------------------------------------------------------------------------
-local DepthConcat, parent = torch.class('nn.DepthConcat', 'nn.Concat')
+local DepthConcat, _ = torch.class('nn.DepthConcat', 'nn.Concat')
function DepthConcat:windowNarrow(output, currentOutput, offset)
local outputWindow = output:narrow(self.dimension, offset, currentOutput:size(self.dimension))
@@ -79,7 +79,7 @@ function DepthConcat:accGradParameters(input, gradOutput, scale)
for i,module in ipairs(self.modules) do
local currentOutput = module.output
local gradOutputWindow = self:windowNarrow(gradOutput, currentOutput, offset)
- local currentGradInput = module:accGradParameters(input, gradOutputWindow, scale)
+ module:accGradParameters(input, gradOutputWindow, scale)
offset = offset + currentOutput:size(self.dimension)
end
end
@@ -89,7 +89,7 @@ function DepthConcat:accUpdateGradParameters(input, gradOutput, lr)
for i,module in ipairs(self.modules) do
local currentOutput = module.output
local gradOutputWindow = self:windowNarrow(gradOutput, currentOutput, offset)
- local currentGradInput = module:accUpdateGradParameters(input, gradOutputWindow, lr)
+ module:accUpdateGradParameters(input, gradOutputWindow, lr)
offset = offset + currentOutput:size(self.dimension)
end
end
diff --git a/Identity.lua b/Identity.lua
index 79b5c08..088cc34 100644
--- a/Identity.lua
+++ b/Identity.lua
@@ -1,4 +1,4 @@
-local Identity, parent = torch.class('nn.Identity', 'nn.Module')
+local Identity, _ = torch.class('nn.Identity', 'nn.Module')
function Identity:updateOutput(input)
self.output = input
diff --git a/Jacobian.lua b/Jacobian.lua
index 24014b5..c3797bd 100644
--- a/Jacobian.lua
+++ b/Jacobian.lua
@@ -52,7 +52,7 @@ function nn.Jacobian.backwardUpdate(module, input, param)
end
dout:zero()
sdout[i] = 1
- local din = module:updateGradInput(input, dout)
+ module:updateGradInput(input, dout)
module:accUpdateGradParameters(input, dout, 1)
jacobian:select(2,i):copy(param)
end
@@ -242,7 +242,7 @@ function nn.Jacobian.testAllUpdate(module, input, weight, gradWeight)
macshu2:updateGradInput(input, gradOutput)
macshu1:accUpdateGradParameters(input, gradOutput, lr)
macshu2:accUpdateGradParameters(input, gradOutput, lr)
- local err = (weightc-maccgp[gradWeight]*(lr*2)-macshu1[weight]):norm()
+ err = (weightc-maccgp[gradWeight]*(lr*2)-macshu1[weight]):norm()
err = err + (weightc-maccgp[gradWeight]*(lr*2)-macshu2[weight]):norm()
errors["accUpdateGradParameters [shared]"] = err
diff --git a/Module.lua b/Module.lua
index d9410c9..c9b73cc 100644
--- a/Module.lua
+++ b/Module.lua
@@ -171,7 +171,7 @@ function Module:getParameters()
if storageAndOffset == nil then
return nil
end
- local storage, offset = unpack(storageAndOffset)
+ local _, offset = unpack(storageAndOffset)
return offset
end
diff --git a/Parallel.lua b/Parallel.lua
index 547f444..3057ba2 100644
--- a/Parallel.lua
+++ b/Parallel.lua
@@ -71,10 +71,12 @@ function Parallel:accGradParameters(input, gradOutput, scale)
for i=1,nModule do
local module = self.modules[i];
local currentOutput = module.output
- local currentGradInput =
- module:accGradParameters(input:select(self.inputDimension,i),
- gradOutput:narrow(self.outputDimension,
- offset, currentOutput:size(self.outputDimension)), scale)
+ module:accGradParameters(
+ input:select(self.inputDimension,i),
+ gradOutput:narrow(
+ self.outputDimension, offset,
+ currentOutput:size(self.outputDimension)),
+ scale)
offset = offset + currentOutput:size(self.outputDimension)
end
@@ -87,10 +89,11 @@ function Parallel:accUpdateGradParameters(input, gradOutput, lr)
for i=1,nModule do
local module = self.modules[i];
local currentOutput = module.output
- local currentGradInput =
- module:accUpdateGradParameters(input:select(self.inputDimension,i),
- gradOutput:narrow(self.outputDimension,
- offset, currentOutput:size(self.outputDimension)), lr)
+ module:accUpdateGradParameters(
+ input:select(self.inputDimension,i),
+ gradOutput:narrow(self.outputDimension, offset,
+ currentOutput:size(self.outputDimension)),
+ lr)
offset = offset + currentOutput:size(self.outputDimension)
end
diff --git a/Sequential.lua b/Sequential.lua
index ec3247b..97554b3 100644
--- a/Sequential.lua
+++ b/Sequential.lua
@@ -1,6 +1,7 @@
local Sequential, parent = torch.class('nn.Sequential', 'nn.Module')
function Sequential:__init()
+ parent.__init(self)
self.modules = {}
end
diff --git a/SoftMax.lua b/SoftMax.lua
index 609b353..22f0eda 100644
--- a/SoftMax.lua
+++ b/SoftMax.lua
@@ -1,4 +1,4 @@
-local SoftMax, parent = torch.class('nn.SoftMax', 'nn.Module')
+local SoftMax, _ = torch.class('nn.SoftMax', 'nn.Module')
function SoftMax:updateOutput(input)
return input.nn.SoftMax_updateOutput(self, input)
diff --git a/SoftMin.lua b/SoftMin.lua
index 90c6c60..7d2358c 100644
--- a/SoftMin.lua
+++ b/SoftMin.lua
@@ -1,4 +1,4 @@
-local SoftMin, parent = torch.class('nn.SoftMin', 'nn.Module')
+local SoftMin, _ = torch.class('nn.SoftMin', 'nn.Module')
function SoftMin:updateOutput(input)
self.mininput = self.mininput or input.new()
diff --git a/SparseJacobian.lua b/SparseJacobian.lua
index b778e67..19334d1 100644
--- a/SparseJacobian.lua
+++ b/SparseJacobian.lua
@@ -61,7 +61,7 @@ function nn.SparseJacobian.backwardUpdate (module, input, param)
dout:zero()
sdout[i] = 1
module:zeroGradParameters()
- local din = module:updateGradInput(input, dout)
+ module:updateGradInput(input, dout)
module:accUpdateGradParameters(input, dout, 1)
jacobian:select(2,i):copy(param)
end
@@ -269,7 +269,7 @@ function nn.SparseJacobian.testAllUpdate(module, input, weight, gradWeight)
macshu2:updateGradInput(input, gradOutput)
macshu1:accUpdateGradParameters(input, gradOutput, lr)
macshu2:accUpdateGradParameters(input, gradOutput, lr)
- local err = (weightc-maccgp[gradWeight]*(lr*2)-macshu1[weight]):norm()
+ err = (weightc-maccgp[gradWeight]*(lr*2)-macshu1[weight]):norm()
err = err + (weightc-maccgp[gradWeight]*(lr*2)-macshu2[weight]):norm()
errors["accUpdateGradParameters [shared]"] = err
diff --git a/SpatialConvolutionMap.lua b/SpatialConvolutionMap.lua
index e05ce6e..390ace0 100644
--- a/SpatialConvolutionMap.lua
+++ b/SpatialConvolutionMap.lua
@@ -29,9 +29,7 @@ function nn.tables.random(nin, nout, nto)
local tbl = torch.Tensor(nker, 2)
local fi = torch.randperm(nin)
local frcntr = 1
- local tocntr = 1
local nfi = math.floor(nin/nto) -- number of distinct nto chunks
- local rfi = math.fmod(nin,nto) -- number of remaining from maps
local totbl = tbl:select(2,2)
local frtbl = tbl:select(2,1)
local fitbl = fi:narrow(1, 1, (nfi * nto)) -- part of fi that covers distinct chunks
diff --git a/SplitTable.lua b/SplitTable.lua
index 70b45f6..bd46b71 100644
--- a/SplitTable.lua
+++ b/SplitTable.lua
@@ -28,7 +28,6 @@ function SplitTable:updateGradInput(input, gradOutput)
local slices = input:size(dimension)
self.gradInput:resizeAs(input)
- local offset = 1
for i=1,slices do
local currentGradInput = gradOutput[i];
self.gradInput:select(dimension,i):copy(currentGradInput)
diff --git a/Transpose.lua b/Transpose.lua
index a43729b..263db60 100644
--- a/Transpose.lua
+++ b/Transpose.lua
@@ -18,7 +18,6 @@ function Transpose:updateOutput(input)
end
function Transpose:updateGradInput(input, gradOutput)
- local ndim = gradOutput:nDimension()
for i = #self.permutations,1,-1 do
local perm = self.permutations[i]
gradOutput = gradOutput:transpose(perm[1],perm[2])
diff --git a/hessian.lua b/hessian.lua
index 3d336fe..21302cb 100644
--- a/hessian.lua
+++ b/hessian.lua
@@ -330,7 +330,7 @@ function nn.hessian.enable()
if storageAndOffset == nil then
return nil
end
- local storage, offset = unpack(storageAndOffset)
+ local _, offset = unpack(storageAndOffset)
return offset
end
diff --git a/test/test.lua b/test/test.lua
index 11fc1dd..ed7fd21 100644
--- a/test/test.lua
+++ b/test/test.lua
@@ -473,7 +473,7 @@ end
local function criterionJacobianTest1D(cri, input, target)
local eps = 1e-6
- local fx = cri:forward(input, target)
+ local _ = cri:forward(input, target)
local dfdx = cri:backward(input, target)
-- for each input perturbation, do central difference
local centraldiff_dfdx = torch.Tensor():resizeAs(dfdx)
@@ -1702,7 +1702,6 @@ end
function nntest.VolumetricMaxPooling()
local from = math.random(2,3)
- local to = from
local kt = math.random(3,4)
local ki = math.random(3,4)
local kj = math.random(3,4)
@@ -1738,10 +1737,10 @@ end
function nntest.Module_getParameters_2()
local n = nn.Sequential()
n:add( nn.Linear(10,10) )
- local p = n:getParameters()
+ local _ = n:getParameters()
n:add( nn.Linear(10,10) )
- p = n:getParameters()
+ local p = n:getParameters()
mytester:asserteq((p[{ {111,210} }] - n.modules[2].weight):norm(), 0, 'error when appending new module')
mytester:asserteq((p[{ {211,220} }] - n.modules[2].bias):norm(), 0, 'error when appending new module')
@@ -1772,10 +1771,10 @@ function nntest.Module_getParameters_4()
local n = nn.Sequential()
n:add( nn.Linear(10,10) )
n:add( n.modules[1]:clone() )
- local p = n:getParameters()
+ local _ = n:getParameters()
n:add(nn.Linear(10,10))
- p = n:getParameters()
+ local p = n:getParameters()
mytester:asserteq((p[{ {1,100} }] - n.modules[1].weight):norm(), 0, 'error when using cloning')
mytester:asserteq((p[{ {101,110} }] - n.modules[1].bias):norm(), 0, 'error when using cloning')
@@ -1813,10 +1812,10 @@ function nntest.Module_getParameters_6()
local n = nn.Sequential()
n:add( nn.Linear(10,10) )
n:add( n.modules[1]:clone('weight','bias') )
- local p = n:getParameters()
+ local _ = n:getParameters()
n:add(nn.Linear(10,10))
- p = n:getParameters()
+ local p = n:getParameters()
mytester:asserteq((p[{ {1,100} }] - n.modules[1].weight):norm(), 0, 'error when using cloning+sharing')
mytester:asserteq((p[{ {101,110} }] - n.modules[1].bias):norm(), 0, 'error when using cloning+sharing')
@@ -1834,10 +1833,10 @@ function nntest.Module_getParameters_7()
local n = nn.Sequential()
n:add( nn.Linear(10,10) )
n:add( n.modules[1]:clone('weight','bias') )
- local p = n:getParameters()
+ local _ = n:getParameters()
n:add(nn.Linear(10,10))
- p = n:getParameters()
+ local _ = n:getParameters()
local n1 = nn.Sequential()
n1:add( nn.Linear(10,10) )
@@ -1849,7 +1848,7 @@ function nntest.Module_getParameters_7()
n:add( n1 )
n:add( n2 )
- local p = n:getParameters()
+ local _ = n:getParameters()
local nf = nn.Sequential()
nf:add( n1 )
@@ -1887,7 +1886,7 @@ function nntest.Module_getParameters_8()
-- clone the second MLP to ensure that the weights before calling getParameters are preserved
mlp2 = mlp2:clone()
- local p, gp = net:getParameters()
+ local p, _ = net:getParameters()
mytester:asserteq((p[{ {1,100} }] - net.modules[1].weight):norm(), 0, 'error when using partial realloc')
mytester:asserteq((p[{ {111,210} }] - net.modules[2].weight):norm(), 0, 'error when using partial realloc')
@@ -2407,7 +2406,7 @@ function nntest.FlattenTable()
-- CASE 1: Nothing changes so the output table shouldn't be redefined
local old_input_map = m.input_map
local old_output = m.output
- output = m:forward(input)
+ local _ = m:forward(input)
mytester:assert(old_input_map == m.input_map and old_output == m.output)
-- CASE 2: An element is added to the input table
@@ -2449,7 +2448,7 @@ function nntest.L1Penalty()
local input = torch.rand(2,10):add(-0.5)
input[1][1] = 0
- local out = m:forward(input)
+ local _ = m:forward(input)
local grad = m:backward(input, torch.ones(input:size()))
local err = input:clone():abs():sum()*weight - m.loss
@@ -2482,7 +2481,6 @@ function nntest.DepthConcat()
local output = torch.Tensor(2, outputSize:sum(), 12, 12):zero() -- zero for padding
local narrows = { {{},{1,5},{},{}}, {{},{6,11},{2,11},{2,11}}, {{},{12,18},{2,10},{2,10}}, {{},{19,26},{3,10},{3,10}} }
local gradInput = input:clone():zero()
- local gradWeights = {}
for i=1,4 do
local conv = concat:get(i)
local gradWeight = conv.gradWeight:clone()