Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torch/nn.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSoumith Chintala <soumith@gmail.com>2015-05-13 07:52:25 +0300
committerSoumith Chintala <soumith@gmail.com>2015-05-13 07:52:25 +0300
commit8b9b5cd7a7e1887fc0be1b60537c79d770fe8e1f (patch)
tree4dac9719dcfae1e1a1944e62779eb54236d070d6
parent905ea8c1a4033af2af0f90e92e16597f442f3512 (diff)
parentd30ab5a0f6871e8efc27f9cd833241ac32363dd7 (diff)
Merge pull request #256 from colesbury/lua52
Rename unpack to table.unpack for Lua 5.2
-rw-r--r--CriterionTable.lua4
-rw-r--r--Euclidean.lua4
-rw-r--r--MM.lua4
-rw-r--r--MixtureTable.lua6
-rw-r--r--Module.lua4
-rw-r--r--View.lua2
-rw-r--r--WeightedEuclidean.lua4
-rwxr-xr-xdoc/table.md4
-rw-r--r--hessian.lua4
-rw-r--r--test.lua16
10 files changed, 26 insertions, 26 deletions
diff --git a/CriterionTable.lua b/CriterionTable.lua
index be00837..14c64ac 100644
--- a/CriterionTable.lua
+++ b/CriterionTable.lua
@@ -7,11 +7,11 @@ function CriterionTable:__init(criterion)
end
function CriterionTable:updateOutput(input)
- self.output = self.criterion:updateOutput(unpack(input))
+ self.output = self.criterion:updateOutput(table.unpack(input))
return self.output
end
function CriterionTable:updateGradInput(input, gradOutput)
- self.criterion:updateGradInput(unpack(input))
+ self.criterion:updateGradInput(table.unpack(input))
return self.gradInput
end
diff --git a/Euclidean.lua b/Euclidean.lua
index d5700a5..ae3fee9 100644
--- a/Euclidean.lua
+++ b/Euclidean.lua
@@ -35,9 +35,9 @@ end
local function view(res, src, ...)
local args = {...}
if src:isContiguous() then
- res:view(src, unpack(args))
+ res:view(src, table.unpack(args))
else
- res:reshape(src, unpack(args))
+ res:reshape(src, table.unpack(args))
end
end
diff --git a/MM.lua b/MM.lua
index a7bfd94..35f02cd 100644
--- a/MM.lua
+++ b/MM.lua
@@ -18,7 +18,7 @@ end
function MM:updateOutput(input)
assert(#input == 2, 'input must be a pair of minibatch matrices')
- local a, b = unpack(input)
+ local a, b = table.unpack(input)
assert(a:nDimension() == 2 or a:nDimension() == 3, 'input tensors must be 2D or 3D')
if a:nDimension() == 2 then
@@ -47,7 +47,7 @@ end
function MM:updateGradInput(input, gradOutput)
assert(#input == 2, 'input must be a pair of tensors')
- local a, b = unpack(input)
+ local a, b = table.unpack(input)
self.gradInput[1]:resizeAs(a)
self.gradInput[2]:resizeAs(b)
diff --git a/MixtureTable.lua b/MixtureTable.lua
index 77a7d3e..18a5a07 100644
--- a/MixtureTable.lua
+++ b/MixtureTable.lua
@@ -18,7 +18,7 @@ function MixtureTable:__init(dim)
end
function MixtureTable:updateOutput(input)
- local gaterInput, expertInputs = unpack(input)
+ local gaterInput, expertInputs = table.unpack(input)
self.dimG = 2
local batchSize = gaterInput:size(1)
@@ -79,8 +79,8 @@ function MixtureTable:updateOutput(input)
end
function MixtureTable:updateGradInput(input, gradOutput)
- local gaterInput, expertInputs = unpack(input)
- local gaterGradInput, expertGradInputs = unpack(self.gradInput)
+ local gaterInput, expertInputs = table.unpack(input)
+ local gaterGradInput, expertGradInputs = table.unpack(self.gradInput)
if self.table then
if not self.backwardSetup then
diff --git a/Module.lua b/Module.lua
index be7896c..c2075b8 100644
--- a/Module.lua
+++ b/Module.lua
@@ -146,7 +146,7 @@ function Module:getParameters()
if storageAndOffset == nil then
return nil
end
- local _, offset = unpack(storageAndOffset)
+ local _, offset = table.unpack(storageAndOffset)
return offset
end
@@ -200,7 +200,7 @@ function Module:getParameters()
end
for _, storageAndOffset in pairs(storages) do
- local k, v = unpack(storageAndOffset)
+ local k, v = table.unpack(storageAndOffset)
flatParameters[{{v+1,v+k:size()}}]:copy(Tensor():set(k))
end
diff --git a/View.lua b/View.lua
index 766e149..33ddb69 100644
--- a/View.lua
+++ b/View.lua
@@ -74,7 +74,7 @@ end
function View:updateOutput(input)
local bsz = batchsize(input, self.size, self.numInputDims, self.numElements)
if bsz then
- self.output = input:view(bsz, unpack(self.size:totable()))
+ self.output = input:view(bsz, table.unpack(self.size:totable()))
else
self.output = input:view(self.size)
end
diff --git a/WeightedEuclidean.lua b/WeightedEuclidean.lua
index 071203e..8acd351 100644
--- a/WeightedEuclidean.lua
+++ b/WeightedEuclidean.lua
@@ -26,9 +26,9 @@ end
local function view(res, src, ...)
local args = {...}
if src:isContiguous() then
- res:view(src, unpack(args))
+ res:view(src, table.unpack(args))
else
- res:reshape(src, unpack(args))
+ res:reshape(src, table.unpack(args))
end
end
diff --git a/doc/table.md b/doc/table.md
index d4725bb..b8cb3a9 100755
--- a/doc/table.md
+++ b/doc/table.md
@@ -610,7 +610,7 @@ Example 1:
-0.2955
[torch.DoubleTensor of dimension 2x1]
-> =unpack(nn.SelectTable(1):backward(input, torch.randn(2, 3)))
+> =table.unpack(nn.SelectTable(1):backward(input, torch.randn(2, 3)))
-0.4891 -0.3495 -0.3182
-2.0999 0.7381 -0.5312
[torch.DoubleTensor of dimension 2x3]
@@ -634,7 +634,7 @@ Example 2:
}
}
-> =unpack(nn.SelectTable(2):backward(input, {torch.randn(2, 1), {torch.randn(2, 2)}}))
+> =table.unpack(nn.SelectTable(2):backward(input, {torch.randn(2, 1), {torch.randn(2, 2)}}))
0 0 0
0 0 0
[torch.DoubleTensor of dimension 2x3]
diff --git a/hessian.lua b/hessian.lua
index 21302cb..d63c6a8 100644
--- a/hessian.lua
+++ b/hessian.lua
@@ -330,7 +330,7 @@ function nn.hessian.enable()
if storageAndOffset == nil then
return nil
end
- local _, offset = unpack(storageAndOffset)
+ local _, offset = table.unpack(storageAndOffset)
return offset
end
@@ -373,7 +373,7 @@ function nn.hessian.enable()
end
for _, storageAndOffset in pairs(storages) do
- local k, v = unpack(storageAndOffset)
+ local k, v = table.unpack(storageAndOffset)
flatParameters[{{v+1,v+k:size()}}]:copy(torch.Tensor():set(k))
end
for k = 1,flatUsedParameters:nElement() do
diff --git a/test.lua b/test.lua
index 959c369..d82e470 100644
--- a/test.lua
+++ b/test.lua
@@ -2923,7 +2923,7 @@ function nntest.View()
local target = template:size():totable()
local module = nn.View(template:size())
mytester:assertTableEq(module:forward(input):size():totable(), target, "Error in forward (1)")
- local module = nn.View(unpack(target))
+ local module = nn.View(table.unpack(target))
mytester:assertTableEq(module:forward(input):size():totable(), target, "Error in forward (2)")
-- Minibatch
@@ -2979,7 +2979,7 @@ function nntest.Reshape()
local target = template:size():totable()
local module = nn.Reshape(template:size())
mytester:assertTableEq(module:forward(input):size():totable(), target, "Error in forward (1)")
- local module = nn.View(unpack(target))
+ local module = nn.View(table.unpack(target))
mytester:assertTableEq(module:forward(input):size():totable(), target, "Error in forward (2)")
-- Minibatch
@@ -3005,7 +3005,7 @@ function nntest.SpatialUpSamplingNearest()
end
-- Check that the gradient is correct by using finite elements
- local input = torch.Tensor(unpack(shape)):zero()
+ local input = torch.Tensor(table.unpack(shape)):zero()
local err = jac.testJacobian(m, input)
mytester:assertlt(err, precision, ' error on state ')
@@ -3250,7 +3250,7 @@ function nntest.MM()
local gradOutput = torch.randn(M, P)
local gradInput = mm:backward({A, B}, gradOutput)
mytester:assert(#gradInput == 2, 'gradInput must be table of size 2')
- local gradA, gradB = unpack(gradInput)
+ local gradA, gradB = table.unpack(gradInput)
mytester:assertTableEq(gradA:size():totable(), A:size():totable(),
'Gradient for input A has wrong size')
mytester:assertTableEq(gradB:size():totable(), B:size():totable(),
@@ -3281,7 +3281,7 @@ function nntest.BatchMMNoTranspose()
local gradOutput = torch.randn(bSize, M, P)
local gradInput = mm:backward({A, B}, gradOutput)
mytester:assert(#gradInput == 2, 'gradInput must be table of size 2')
- local gradA, gradB = unpack(gradInput)
+ local gradA, gradB = table.unpack(gradInput)
mytester:assertTableEq(gradA:size():totable(), A:size():totable(),
'Gradient for input A has wrong size')
mytester:assertTableEq(gradB:size():totable(), B:size():totable(),
@@ -3315,7 +3315,7 @@ function nntest.BatchMMTransposeA()
local gradOutput = torch.randn(bSize, M, P)
local gradInput = mm:backward({A, B}, gradOutput)
mytester:assert(#gradInput == 2, 'gradInput must be table of size 2')
- local gradA, gradB = unpack(gradInput)
+ local gradA, gradB = table.unpack(gradInput)
mytester:assertTableEq(gradA:size():totable(), A:size():totable(),
'Gradient for input A has wrong size')
mytester:assertTableEq(gradB:size():totable(), B:size():totable(),
@@ -3349,7 +3349,7 @@ function nntest.BatchMMTransposeB()
local gradOutput = torch.randn(bSize, M, P)
local gradInput = mm:backward({A, B}, gradOutput)
mytester:assert(#gradInput == 2, 'gradInput must be table of size 2')
- local gradA, gradB = unpack(gradInput)
+ local gradA, gradB = table.unpack(gradInput)
mytester:assertTableEq(gradA:size():totable(), A:size():totable(),
'Gradient for input A has wrong size')
mytester:assertTableEq(gradB:size():totable(), B:size():totable(),
@@ -3383,7 +3383,7 @@ function nntest.BatchMMTransposeBoth()
local gradOutput = torch.randn(bSize, M, P)
local gradInput = mm:backward({A, B}, gradOutput)
mytester:assert(#gradInput == 2, 'gradInput must be table of size 2')
- local gradA, gradB = unpack(gradInput)
+ local gradA, gradB = table.unpack(gradInput)
mytester:assertTableEq(gradA:size():totable(), A:size():totable(),
'Gradient for input A has wrong size')
mytester:assertTableEq(gradB:size():totable(), B:size():totable(),