Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torch/nn.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorAndrew Tulloch <andrew@tullo.ch>2014-11-21 10:38:45 +0300
committerAndrew Tulloch <andrew@tullo.ch>2014-11-21 10:39:22 +0300
commit6916775db4731b5c40656085471448be476a321d (patch)
treeecbe7b560e213c0b0fc4f1b7911f3a3057151e0d /test
parentb7c39f91f0e47309e16993a9b63a23040786d495 (diff)
Fix various unused variables in nn
Diffstat (limited to 'test')
-rw-r--r--test/test.lua28
1 files changed, 13 insertions, 15 deletions
diff --git a/test/test.lua b/test/test.lua
index 11fc1dd..ed7fd21 100644
--- a/test/test.lua
+++ b/test/test.lua
@@ -473,7 +473,7 @@ end
local function criterionJacobianTest1D(cri, input, target)
local eps = 1e-6
- local fx = cri:forward(input, target)
+ local _ = cri:forward(input, target)
local dfdx = cri:backward(input, target)
-- for each input perturbation, do central difference
local centraldiff_dfdx = torch.Tensor():resizeAs(dfdx)
@@ -1702,7 +1702,6 @@ end
function nntest.VolumetricMaxPooling()
local from = math.random(2,3)
- local to = from
local kt = math.random(3,4)
local ki = math.random(3,4)
local kj = math.random(3,4)
@@ -1738,10 +1737,10 @@ end
function nntest.Module_getParameters_2()
local n = nn.Sequential()
n:add( nn.Linear(10,10) )
- local p = n:getParameters()
+ local _ = n:getParameters()
n:add( nn.Linear(10,10) )
- p = n:getParameters()
+ local p = n:getParameters()
mytester:asserteq((p[{ {111,210} }] - n.modules[2].weight):norm(), 0, 'error when appending new module')
mytester:asserteq((p[{ {211,220} }] - n.modules[2].bias):norm(), 0, 'error when appending new module')
@@ -1772,10 +1771,10 @@ function nntest.Module_getParameters_4()
local n = nn.Sequential()
n:add( nn.Linear(10,10) )
n:add( n.modules[1]:clone() )
- local p = n:getParameters()
+ local _ = n:getParameters()
n:add(nn.Linear(10,10))
- p = n:getParameters()
+ local p = n:getParameters()
mytester:asserteq((p[{ {1,100} }] - n.modules[1].weight):norm(), 0, 'error when using cloning')
mytester:asserteq((p[{ {101,110} }] - n.modules[1].bias):norm(), 0, 'error when using cloning')
@@ -1813,10 +1812,10 @@ function nntest.Module_getParameters_6()
local n = nn.Sequential()
n:add( nn.Linear(10,10) )
n:add( n.modules[1]:clone('weight','bias') )
- local p = n:getParameters()
+ local _ = n:getParameters()
n:add(nn.Linear(10,10))
- p = n:getParameters()
+ local p = n:getParameters()
mytester:asserteq((p[{ {1,100} }] - n.modules[1].weight):norm(), 0, 'error when using cloning+sharing')
mytester:asserteq((p[{ {101,110} }] - n.modules[1].bias):norm(), 0, 'error when using cloning+sharing')
@@ -1834,10 +1833,10 @@ function nntest.Module_getParameters_7()
local n = nn.Sequential()
n:add( nn.Linear(10,10) )
n:add( n.modules[1]:clone('weight','bias') )
- local p = n:getParameters()
+ local _ = n:getParameters()
n:add(nn.Linear(10,10))
- p = n:getParameters()
+ local _ = n:getParameters()
local n1 = nn.Sequential()
n1:add( nn.Linear(10,10) )
@@ -1849,7 +1848,7 @@ function nntest.Module_getParameters_7()
n:add( n1 )
n:add( n2 )
- local p = n:getParameters()
+ local _ = n:getParameters()
local nf = nn.Sequential()
nf:add( n1 )
@@ -1887,7 +1886,7 @@ function nntest.Module_getParameters_8()
-- clone the second MLP to ensure that the weights before calling getParameters are preserved
mlp2 = mlp2:clone()
- local p, gp = net:getParameters()
+ local p, _ = net:getParameters()
mytester:asserteq((p[{ {1,100} }] - net.modules[1].weight):norm(), 0, 'error when using partial realloc')
mytester:asserteq((p[{ {111,210} }] - net.modules[2].weight):norm(), 0, 'error when using partial realloc')
@@ -2407,7 +2406,7 @@ function nntest.FlattenTable()
-- CASE 1: Nothing changes so the output table shouldn't be redefined
local old_input_map = m.input_map
local old_output = m.output
- output = m:forward(input)
+ local _ = m:forward(input)
mytester:assert(old_input_map == m.input_map and old_output == m.output)
-- CASE 2: An element is added to the input table
@@ -2449,7 +2448,7 @@ function nntest.L1Penalty()
local input = torch.rand(2,10):add(-0.5)
input[1][1] = 0
- local out = m:forward(input)
+ local _ = m:forward(input)
local grad = m:backward(input, torch.ones(input:size()))
local err = input:clone():abs():sum()*weight - m.loss
@@ -2482,7 +2481,6 @@ function nntest.DepthConcat()
local output = torch.Tensor(2, outputSize:sum(), 12, 12):zero() -- zero for padding
local narrows = { {{},{1,5},{},{}}, {{},{6,11},{2,11},{2,11}}, {{},{12,18},{2,10},{2,10}}, {{},{19,26},{3,10},{3,10}} }
local gradInput = input:clone():zero()
- local gradWeights = {}
for i=1,4 do
local conv = concat:get(i)
local gradWeight = conv.gradWeight:clone()