Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torch/nn.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNicholas Leonard <nleonard@twitter.com>2017-07-11 00:20:30 +0300
committerNicholas Leonard <nleonard@twitter.com>2017-07-11 00:53:40 +0300
commit0aeb67b544e7b385647b17342fd2eccf3cb9a0e2 (patch)
treea437032e313e31ab69b630cb7dc828b3b2ce3b65
parent4bd94cb76086290d2391e68be0d3c04409a0b598 (diff)
parameters() uses torch.type instead of type
-rw-r--r--Bilinear.lua2
-rw-r--r--Container.lua4
-rw-r--r--DontCast.lua2
-rw-r--r--FlattenTable.lua16
-rw-r--r--Identity.lua2
-rw-r--r--IndexLinear.lua4
-rw-r--r--SparseLinear.lua12
-rw-r--r--SpatialFullConvolution.lua10
-rw-r--r--VolumetricFullConvolution.lua10
-rw-r--r--hessian.lua4
-rw-r--r--utils.lua6
11 files changed, 36 insertions, 36 deletions
diff --git a/Bilinear.lua b/Bilinear.lua
index 9350b03..3c0f6db 100644
--- a/Bilinear.lua
+++ b/Bilinear.lua
@@ -2,7 +2,7 @@ local Bilinear, parent = torch.class('nn.Bilinear', 'nn.Module')
local function isint(x) return type(x) == 'number' and x == math.floor(x) end
function Bilinear:__assertInput(input)
- assert(input and type(input) == 'table' and #input == 2,
+ assert(input and torch.type(input) == 'table' and #input == 2,
'input should be a table containing two data Tensors')
assert(input[1]:nDimension() == 2 and input[2]:nDimension() == 2,
'input Tensors should be two-dimensional')
diff --git a/Container.lua b/Container.lua
index 7e264ba..67fac9f 100644
--- a/Container.lua
+++ b/Container.lua
@@ -105,7 +105,7 @@ end
function Container:parameters()
local function tinsert(to, from)
- if type(from) == 'table' then
+ if torch.type(from) == 'table' then
for i=1,#from do
tinsert(to,from[i])
end
@@ -131,7 +131,7 @@ function Container:clearState()
if self[f] then
if torch.isTensor(self[f]) then
self[f] = self[f].new()
- elseif type(self[f]) == 'table' then
+ elseif torch.type(self[f]) == 'table' then
self[f] = {}
else
self[f] = nil
diff --git a/DontCast.lua b/DontCast.lua
index b89f543..eaa39b6 100644
--- a/DontCast.lua
+++ b/DontCast.lua
@@ -19,7 +19,7 @@ local function recursiveTypeCopy(dst, src, type_str)
end
local function tableTensorType(src)
- if type(src) == 'table' then
+ if type(src) == 'table' then -- Note: don't use torch.type here
local type_str, found
for k,v in pairs(src) do
type_str, found = tableTensorType(v)
diff --git a/FlattenTable.lua b/FlattenTable.lua
index 1c18255..3fe2fd5 100644
--- a/FlattenTable.lua
+++ b/FlattenTable.lua
@@ -12,7 +12,7 @@ end
local function flatten(output, input)
local input_map -- has the same structure as input, but stores the
-- indices to the corresponding output
- if type(input) == 'table' then
+ if torch.type(input) == 'table' then
input_map = {}
-- forward DFS order
for i = 1, #input do
@@ -30,8 +30,8 @@ local function checkMapping(output, input, input_map)
if input_map == nil or output == nil or input == nil then
return false
end
- if type(input) == 'table' then
- if type(input_map) ~= 'table' then
+ if torch.type(input) == 'table' then
+ if torch.type(input_map) ~= 'table' then
return false
end
if #input ~= #input_map then
@@ -46,7 +46,7 @@ local function checkMapping(output, input, input_map)
end
return true
else
- if type(input_map) ~= 'number' then
+ if torch.type(input_map) ~= 'number' then
return false
end
return output[input_map] == input
@@ -56,7 +56,7 @@ end
-- During BPROP we have to build a gradInput with the same shape as the
-- input. This is a recursive function to build up a gradInput
local function inverseFlatten(gradOutput, input_map)
- if type(input_map) == 'table' then
+ if torch.type(input_map) == 'table' then
local gradInput = {}
for i = 1, #input_map do
gradInput[#gradInput + 1] = inverseFlatten(gradOutput, input_map[i])
@@ -68,7 +68,7 @@ local function inverseFlatten(gradOutput, input_map)
end
function FlattenTable:updateOutput(input)
- assert(type(input) == 'table', 'input must be a table')
+ assert(torch.type(input) == 'table', 'input must be a table')
-- to avoid updating rebuilding the flattened table every updateOutput call
-- we will do a DFS pass over the existing output table and the inputs to
-- see if it needs to be rebuilt.
@@ -80,8 +80,8 @@ function FlattenTable:updateOutput(input)
end
function FlattenTable:updateGradInput(input, gradOutput)
- assert(type(input) == 'table', 'input must be a table')
- assert(type(input) == 'table', 'gradOutput must be a table')
+ assert(torch.type(input) == 'table', 'input must be a table')
+ assert(torch.type(input) == 'table', 'gradOutput must be a table')
-- If the input changes between the updateOutput and updateGradInput call,
-- then we may have to rebuild the input_map! However, let's assume that
-- the input_map is valid and that forward has already been called.
diff --git a/Identity.lua b/Identity.lua
index 5e6ccb6..647aee3 100644
--- a/Identity.lua
+++ b/Identity.lua
@@ -17,7 +17,7 @@ function Identity:clearState()
if self[f] then
if torch.isTensor(self[f]) then
self[f] = self[f].new()
- elseif type(self[f]) == 'table' then
+ elseif torch.type(self[f]) == 'table' then
self[f] = {}
else
self[f] = nil
diff --git a/IndexLinear.lua b/IndexLinear.lua
index 2ddbcbd..6b6b200 100644
--- a/IndexLinear.lua
+++ b/IndexLinear.lua
@@ -73,7 +73,7 @@ function IndexLinear:reset(stdv)
end
function IndexLinear:reshapeInput(input)
- assert(type(input) == 'table')
+ assert(torch.type(input) == 'table')
local ninputs = 0
for _, v in ipairs(input) do
@@ -108,7 +108,7 @@ function IndexLinear:reshapeInput(input)
-- { torch.LongTensor(size1), torch.LongTensor(size2), ..., torch.LongTensor(sizeN) }, -- batch of keys
-- { torch.Tensor(size1), torch.Tensor(size2), ..., torch.Tensor(sizeN) }, -- batch of values,
-- }
- if type(keys) == 'table' and type(values) == 'table' then
+ if torch.type(keys) == 'table' and torch.type(values) == 'table' then
lkeys, lvalues = keys, values
self.isFlat = false
self.noBatch = false
diff --git a/SparseLinear.lua b/SparseLinear.lua
index 7c3edad..4888fc1 100644
--- a/SparseLinear.lua
+++ b/SparseLinear.lua
@@ -15,7 +15,7 @@ function SparseLinear:__init(inputSize, outputSize, doGradInput)
self.gradWeight = torch.Tensor(outputSize, inputSize):zero()
self.gradBias = torch.Tensor(outputSize):zero()
- assert(type(self.doGradInput) == type(true))
+ assert(type(self.doGradInput) == 'boolean')
self.lastInput = nil
self.sparseUpdate = NO_LAST_INPUT
@@ -39,7 +39,7 @@ function SparseLinear:reset(stdv)
end
function SparseLinear:reshapeInput(input)
- if type(input) == 'table' then
+ if torch.type(input) == 'table' then
return input, true, false
else
if input:dim() == 2 then
@@ -57,7 +57,7 @@ function SparseLinear:updateOutput(input)
local input, batchMode, legacyMode = self:reshapeInput(input)
self.legacyMode = legacyMode
- if legacyMode then
+ if legacyMode then
input.THNN.SparseLinear_legacyUpdateOutput(
input:cdata(),
self.output:cdata(),
@@ -149,8 +149,8 @@ function SparseLinear:accGradParameters(input, gradOutput, scale)
end
function SparseLinear:updateGradInput(input, gradOutput)
- if self.legacyMode then
- if type(self.gradInput) ~= type(gradOutput) then self.gradInput = gradOutput.new() end
+ if self.legacyMode then
+ if torch.type(self.gradInput) ~= torch.type(gradOutput) then self.gradInput = gradOutput.new() end
self.gradInput:resizeAs(input)
else
self.gradInput = {}
@@ -185,7 +185,7 @@ function SparseLinear:updateGradInput(input, gradOutput)
return self.gradInput
end
--- These functions do sparse updates / zeros. However, if we accumulated
+-- These functions do sparse updates / zeros. However, if we accumulated
-- gradients multiple times, we can't depend on the last input to do sparse
-- updates.
function SparseLinear:updateParameters(learningRate)
diff --git a/SpatialFullConvolution.lua b/SpatialFullConvolution.lua
index e6019bc..d28579b 100644
--- a/SpatialFullConvolution.lua
+++ b/SpatialFullConvolution.lua
@@ -72,7 +72,7 @@ function SpatialFullConvolution:updateOutput(input)
-- The input can be a table where the second element indicates the target
-- output size, in which case the adj factors are computed automatically
- if type(inputTensor) == 'table' then
+ if torch.type(inputTensor) == 'table' then
inputTensor = input[1]
local targetTensor = input[2]
local tDims = targetTensor:dim()
@@ -113,7 +113,7 @@ function SpatialFullConvolution:updateGradInput(input, gradOutput)
-- The input can be a table where the second element indicates the target
-- output size, in which case the adj factors are computed automatically
- if type(inputTensor) == 'table' then
+ if torch.type(inputTensor) == 'table' then
inputTensor = input[1]
local targetTensor = input[2]
local tDims = targetTensor:dim()
@@ -122,7 +122,7 @@ function SpatialFullConvolution:updateGradInput(input, gradOutput)
adjW = calculateAdj(tW, self.kW, self.padW, self.dW)
adjH = calculateAdj(tH, self.kH, self.padH, self.dH)
-- Momentarily extract the gradInput tensor
- if type(self.gradInput) == 'table' then
+ if torch.type(self.gradInput) == 'table' then
self.gradInput = self.gradInput[1] or inputTensor.new()
end
end
@@ -139,7 +139,7 @@ function SpatialFullConvolution:updateGradInput(input, gradOutput)
adjW, adjH
)
- if type(input) == 'table' then
+ if torch.type(input) == 'table' then
-- Create a zero tensor to be expanded and used as gradInput[2].
self.zeroScalar = self.zeroScalar or input[2].new(1):zero()
self.ones:resize(input[2]:dim()):fill(1)
@@ -162,7 +162,7 @@ function SpatialFullConvolution:accGradParameters(input, gradOutput, scale)
-- The input can be a table where the second element indicates the target
-- output size, in which case the adj factors are computed automatically
- if type(inputTensor) == 'table' then
+ if torch.type(inputTensor) == 'table' then
inputTensor = input[1]
local targetTensor = input[2]
local tDims = targetTensor:dim()
diff --git a/VolumetricFullConvolution.lua b/VolumetricFullConvolution.lua
index 0ce2340..60843e7 100644
--- a/VolumetricFullConvolution.lua
+++ b/VolumetricFullConvolution.lua
@@ -93,7 +93,7 @@ function VolumetricFullConvolution:updateOutput(input)
-- The input can be a table where the second element indicates the target
-- output size, in which case the adj factors are computed automatically
- if type(inputTensor) == 'table' then
+ if torch.type(inputTensor) == 'table' then
inputTensor = input[1]
local targetTensor = input[2]
local tDims = targetTensor:dim()
@@ -128,7 +128,7 @@ function VolumetricFullConvolution:updateGradInput(input, gradOutput)
-- The input can be a table where the second element indicates the target
-- output size, in which case the adj factors are computed automatically
- if type(inputTensor) == 'table' then
+ if torch.type(inputTensor) == 'table' then
inputTensor = input[1]
local targetTensor = input[2]
local tDims = targetTensor:dim()
@@ -139,7 +139,7 @@ function VolumetricFullConvolution:updateGradInput(input, gradOutput)
adjW = calculateAdj(tW, self.kW, self.padW, self.dW)
adjH = calculateAdj(tH, self.kH, self.padH, self.dH)
-- Momentarily extract the gradInput tensor
- if type(self.gradInput) == 'table' then
+ if torch.type(self.gradInput) == 'table' then
self.gradInput = self.gradInput[1]
end
end
@@ -156,7 +156,7 @@ function VolumetricFullConvolution:updateGradInput(input, gradOutput)
adjT, adjW, adjH
)
- if type(input) == 'table' then
+ if torch.type(input) == 'table' then
-- Create a zero tensor to be expanded and used as gradInput[2].
self.zeroScalar = self.zeroScalar or input[2].new(1):zero()
self.ones:resize(input[2]:dim()):fill(1)
@@ -177,7 +177,7 @@ function VolumetricFullConvolution:accGradParameters(input, gradOutput, scale)
-- The input can be a table where the second element indicates the target
-- output size, in which case the adj factors are computed automatically
- if type(inputTensor) == 'table' then
+ if torch.type(inputTensor) == 'table' then
inputTensor = input[1]
local targetTensor = input[2]
local tDims = targetTensor:dim()
diff --git a/hessian.lua b/hessian.lua
index 33ef2b0..7518e1a 100644
--- a/hessian.lua
+++ b/hessian.lua
@@ -216,7 +216,7 @@ function nn.hessian.enable()
function nn.SpatialConvolution.initDiagHessianParameters(self)
initDiagHessianParameters(self,{'gradWeight','gradBias'},{'diagHessianWeight','diagHessianBias'})
end
-
+
----------------------------------------------------------------------
-- SpatialConvolutionLocal
----------------------------------------------------------------------
@@ -361,7 +361,7 @@ function nn.hessian.enable()
function nn.Sequential.parameters(self)
local function tinsert(to, from)
- if type(from) == 'table' then
+ if torch.type(from) == 'table' then
for i=1,#from do
tinsert(to,from[i])
end
diff --git a/utils.lua b/utils.lua
index 17b52af..09ce1b9 100644
--- a/utils.lua
+++ b/utils.lua
@@ -158,7 +158,7 @@ function nn.utils.addSingletonDimension(...)
else
view, t, dim = select(1,...)
assert(torch.isTensor(view),
- "output tensor expected, got " .. type(view))
+ "output tensor expected, got " .. torch.type(view))
end
assert(torch.isTensor(t), "input tensor expected")
@@ -202,14 +202,14 @@ end
-- nn.utils.clearState(self, '_buffer', '_buffer2')
function nn.utils.clear(self, ...)
local arg = {...}
- if #arg > 0 and type(arg[1]) == 'table' then
+ if #arg > 0 and torch.type(arg[1]) == 'table' then
arg = arg[1]
end
local function clear(f)
if self[f] then
if torch.isTensor(self[f]) then
self[f]:set()
- elseif type(self[f]) == 'table' then
+ elseif torch.type(self[f]) == 'table' then
self[f] = {}
else
self[f] = nil