Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/clementfarabet/lua---nnx.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorClement Farabet <clement.farabet@gmail.com>2011-07-06 05:41:09 +0400
committerClement Farabet <clement.farabet@gmail.com>2011-07-06 05:41:09 +0400
commit951ed4fd0b11aeb95c6d3958704cfb10d25e0bef (patch)
tree5f2a73f26770bec68c09100bfc2e4b0d7946c1b0
parenta74efe158d6c66929a8b8849c257a384f6de6aaf (diff)
Added new modules.
-rw-r--r--Abs.lua30
-rw-r--r--ConfusionMatrix.lua72
-rw-r--r--HardShrink.lua11
-rw-r--r--Narrow.lua35
-rw-r--r--Power.lua37
-rw-r--r--Sqrt.lua34
-rw-r--r--Square.lua33
-rw-r--r--generic/Abs.c43
-rw-r--r--generic/HardShrink.c47
-rw-r--r--generic/Template.c28
-rw-r--r--init.c11
-rw-r--r--init.lua15
-rw-r--r--nnx-1.0-1.rockspec7
13 files changed, 402 insertions, 1 deletions
diff --git a/Abs.lua b/Abs.lua
new file mode 100644
index 0000000..ca17acf
--- /dev/null
+++ b/Abs.lua
@@ -0,0 +1,30 @@
+local Abs, parent = torch.class('nn.Abs', 'nn.Module')
+
+function Abs:__init(args)
+ parent.__init(self)
+ if args then
+ error(xlua.usage('nn.Abs',
+ 'a simple component-wise mapping: abs()',
+ 'abs = nn.Abs()\n'..
+ 'rectified = abs:forward(sometensor)',
+ {type='nil', help='no arg required'}))
+ end
+end
+
+function Abs:forward(input)
+ input.nn.Abs_forward(self, input)
+ return self.output
+end
+
+function Abs:backward(input, gradOutput)
+ input.nn.Abs_backward(self, input, gradOutput)
+ return self.gradInput
+end
+
+function Abs:write(file)
+ parent.write(self, file)
+end
+
+function Abs:read(file)
+ parent.read(self, file)
+end
diff --git a/ConfusionMatrix.lua b/ConfusionMatrix.lua
new file mode 100644
index 0000000..78809ca
--- /dev/null
+++ b/ConfusionMatrix.lua
@@ -0,0 +1,72 @@
+
+local ConfusionMatrix = torch.class('nn.ConfusionMatrix')
+
+function ConfusionMatrix:__init(nclasses)
+ self.mat = lab.zeros(nclasses,nclasses)
+ self.valids = lab.zeros(nclasses)
+ self.totalValid = 0
+ self.averageValid = 0
+end
+
+function ConfusionMatrix:add(prediction, target)
+ if type(prediction) == 'number' then
+ -- comparing numbers
+ self.mat[target][prediction] = self.mat[target][prediction] + 1
+ else
+ -- comparing vectors
+ local _,prediction = lab.max(prediction)
+ local _,target = lab.max(target)
+ self.mat[target[1]][prediction[1]] = self.mat[target[1]][prediction[1]] + 1
+ end
+end
+
+function ConfusionMatrix:zero()
+ self.mat:zero()
+ self.valids:zero()
+ self.totalValid = 0
+ self.averageValid = 0
+end
+
+function ConfusionMatrix:updateValids()
+ local total = 0
+ for t = 1,self.mat:size(1) do
+ self.valids[t] = self.mat[t][t] / self.mat:select(1,t):sum()
+ total = total + self.mat[t][t]
+ end
+ self.totalValid = total / self.mat:sum()
+ self.averageValid = 0
+ local nvalids = 0
+ for t = 1,self.mat:size(1) do
+ if not xlua.isNaN(self.valids[t]) then
+ self.averageValid = self.averageValid + self.valids[t]
+ nvalids = nvalids + 1
+ end
+ end
+ self.averageValid = self.averageValid / nvalids
+end
+
+function ConfusionMatrix:__tostring__()
+ self:updateValids()
+ local str = 'ConfusionMatrix:\n'
+ local nclasses = self.mat:size(1)
+ str = str .. '['
+ for t = 1,nclasses do
+ local pclass = self.valids[t] * 100
+ if t == 1 then
+ str = str .. '['
+ else
+ str = str .. ' ['
+ end
+ for p = 1,nclasses do
+ str = str .. '' .. string.format('%8d\t', self.mat[t][p])
+ end
+ if t == nclasses then
+ str = str .. ']] ' .. pclass .. '% \n'
+ else
+ str = str .. '] ' .. pclass .. '% \n'
+ end
+ end
+ str = str .. ' + average row correct: ' .. (self.averageValid*100) .. '% \n'
+ str = str .. ' + global correct: ' .. (self.totalValid*100) .. '%'
+ return str
+end
diff --git a/HardShrink.lua b/HardShrink.lua
new file mode 100644
index 0000000..b33fd6c
--- /dev/null
+++ b/HardShrink.lua
@@ -0,0 +1,11 @@
+local HardShrink = torch.class('nn.HardShrink', 'nn.Module')
+
+function HardShrink:forward(input)
+ input.nn.HardShrink_forward(self, input)
+ return self.output
+end
+
+function HardShrink:backward(input, gradOutput)
+ input.nn.HardShrink_backward(self, input, gradOutput)
+ return self.gradInput
+end
diff --git a/Narrow.lua b/Narrow.lua
index c9b7b89..5c6d07e 100644
--- a/Narrow.lua
+++ b/Narrow.lua
@@ -1,10 +1,45 @@
local Narrow, parent = torch.class('nn.Narrow', 'nn.Module')
+local help_desc =
+[[Selects a subset of a dimension of a nxpxqx.. Tensor.]]
+
+local help_example =
+[[mlp=nn.Sequential();
+mlp:add(nn.Narrow(1,3,2))
+
+require "lab"
+x=lab.randn(10,5)
+print(x)
+print(mlp:forward(x))
+
+-- gives the output:
+ 0.9720 -0.0836 0.0831 -0.2059 -0.0871
+ 0.8750 -2.0432 -0.1295 -2.3932 0.8168
+ 0.0369 1.1633 0.6483 1.2862 0.6596
+ 0.1667 -0.5704 -0.7303 0.3697 -2.2941
+ 0.4794 2.0636 0.3502 0.3560 -0.5500
+-0.1898 -1.1547 0.1145 -1.1399 0.1711
+-1.5130 1.4445 0.2356 -0.5393 -0.6222
+-0.6587 0.4314 1.1916 -1.4509 1.9400
+ 0.2733 1.0911 0.7667 0.4002 0.1646
+ 0.5804 -0.5333 1.1621 1.5683 -0.1978
+[torch.Tensor of dimension 10x5]
+
+ 0.0369 1.1633 0.6483 1.2862 0.6596
+ 0.1667 -0.5704 -0.7303 0.3697 -2.2941
+[torch.Tensor of dimension 2x5] ]]
+
function Narrow:__init(dimension,offset,length)
parent.__init(self)
self.dimension=dimension
self.index=offset
self.length=length or 1
+ if not dimension or not offset then
+ error(xlua.usage('nn.Narrow', help_desc, help_example,
+ {type='number', help='dimension', req=true},
+ {type='number', help='offset', req=true},
+ {type='number', help='length', default=1}))
+ end
end
function Narrow:forward(input)
diff --git a/Power.lua b/Power.lua
new file mode 100644
index 0000000..c0b60a1
--- /dev/null
+++ b/Power.lua
@@ -0,0 +1,37 @@
+
+local Power, parent = torch.class('nn.Power','nn.Module')
+
+function Power:__init(p)
+ parent.__init(self)
+ self.pow = p
+ if args then
+ error(xlua.usage('nn.Power',
+ 'a simple component-wise mapping: power(p)',
+ 'pow = nn.Power(p)\n'..
+ 'powered = pow:forward(sometensor)',
+ {type='nil', help='no arg required'}))
+ end
+end
+
+function Power:forward(input)
+ self.output:resizeAs(input):copy(input)
+ self.output:pow(self.pow)
+ return self.output
+end
+
+function Power:backward(input, gradOutput)
+ self.gradInput:resizeAs(input):copy(gradOutput)
+ self.gradInput:cmul(self.output):cdiv(input):mul(self.pow)
+ return self.gradInput
+end
+
+
+function Power:write(file)
+ parent.write(self,file)
+ file:writeDouble(self.pow)
+end
+
+function Power:read(file)
+ parent.read(self,file)
+ self.pow = file:readDouble()
+end
diff --git a/Sqrt.lua b/Sqrt.lua
new file mode 100644
index 0000000..4c0427b
--- /dev/null
+++ b/Sqrt.lua
@@ -0,0 +1,34 @@
+
+local Sqrt, parent = torch.class('nn.Sqrt','nn.Module')
+
+function Sqrt:__init(args)
+ parent.__init(self)
+ if args then
+ error(xlua.usage('nn.Sqrt',
+ 'a simple component-wise mapping: sqrt()',
+ 'sq = nn.Sqrt()\n'..
+ 'sqrt = sq:forward(sometensor)',
+ {type='nil', help='no arg required'}))
+ end
+end
+
+function Sqrt:forward(input)
+ self.output:resizeAs(input):copy(input)
+ self.output:sqrt()
+ return self.output
+end
+
+function Sqrt:backward(input, gradOutput)
+ self.gradInput:resizeAs(input):copy(gradOutput)
+ self.gradInput:cdiv(self.output):mul(0.5)
+ return self.gradInput
+end
+
+
+function Sqrt:write(file)
+ parent.write(self,file)
+end
+
+function Sqrt:read(file)
+ parent.read(self,file)
+end
diff --git a/Square.lua b/Square.lua
new file mode 100644
index 0000000..30e8cb9
--- /dev/null
+++ b/Square.lua
@@ -0,0 +1,33 @@
+local Square, parent = torch.class('nn.Square','nn.Module')
+
+function Square:__init(args)
+ parent.__init(self)
+ if args then
+ error(xlua.usage('nn.Square',
+ 'a simple component-wise mapping: square()',
+ 'sq = nn.Square()\n'..
+ 'squared = sq:forward(sometensor)',
+ {type='nil', help='no arg required'}))
+ end
+end
+
+function Square:forward(input)
+ self.output:resizeAs(input):copy(input)
+ self.output:cmul(input)
+ return self.output
+end
+
+function Square:backward(input, gradOutput)
+ self.gradInput:resizeAs(input):copy(gradOutput)
+ self.gradInput:cmul(input):mul(2)
+ return self.gradInput
+end
+
+
+function Square:write(file)
+ parent.write(self,file)
+end
+
+function Square:read(file)
+ parent.read(self,file)
+end
diff --git a/generic/Abs.c b/generic/Abs.c
new file mode 100644
index 0000000..c45c0ab
--- /dev/null
+++ b/generic/Abs.c
@@ -0,0 +1,43 @@
+#ifndef TH_GENERIC_FILE
+#define TH_GENERIC_FILE "generic/Abs.c"
+#else
+
+static int nn_(Abs_forward)(lua_State *L)
+{
+ THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id));
+ THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_(Tensor_id));
+
+ THTensor_(resizeAs)(output, input);
+
+ TH_TENSOR_APPLY2(real, output, real, input, \
+ *output_data = fabs(*input_data);)
+ return 1;
+}
+
+static int nn_(Abs_backward)(lua_State *L)
+{
+ THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id));
+ THTensor *gradOutput = luaT_checkudata(L, 3, torch_(Tensor_id));
+ THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_(Tensor_id));
+
+ THTensor_(resizeAs)(gradInput, input);
+ TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, \
+ real z = *input_data; \
+ *gradInput_data = *gradOutput_data * (z >= 0 ? 1 : -1);)
+ return 1;
+}
+
+static const struct luaL_Reg nn_(Abs__) [] = {
+ {"Abs_forward", nn_(Abs_forward)},
+ {"Abs_backward", nn_(Abs_backward)},
+ {NULL, NULL}
+};
+
+static void nn_(Abs_init)(lua_State *L)
+{
+ luaT_pushmetaclass(L, torch_(Tensor_id));
+ luaT_registeratname(L, nn_(Abs__), "nn");
+ lua_pop(L,1);
+}
+
+#endif
diff --git a/generic/HardShrink.c b/generic/HardShrink.c
new file mode 100644
index 0000000..d3bcfcd
--- /dev/null
+++ b/generic/HardShrink.c
@@ -0,0 +1,47 @@
+#ifndef TH_GENERIC_FILE
+#define TH_GENERIC_FILE "generic/HardShrink.c"
+#else
+
+static int nn_(HardShrink_forward)(lua_State *L)
+{
+ THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id));
+ THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_(Tensor_id));
+
+ THTensor_(resizeAs)(output, input);
+
+ TH_TENSOR_APPLY2(real, output, real, input, \
+ if ((*input_data) > 0.5) *output_data = *input_data - 0.5; \
+ else if ((*input_data) < 0.5) *output_data = *input_data + 0.5; \
+ else *output_data = 0;);
+ return 1;
+}
+
+static int nn_(HardShrink_backward)(lua_State *L)
+{
+ THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id));
+ THTensor *gradOutput = luaT_checkudata(L, 3, torch_(Tensor_id));
+ THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_(Tensor_id));
+
+ THTensor_(resizeAs)(gradInput, input);
+ TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, \
+ if ((*input_data) > 0.5) *gradInput_data = 1; \
+ else if ((*input_data) < 0.5) *gradInput_data = 1; \
+ else *gradInput_data = 0; \
+ *gradInput_data = (*gradOutput_data) * (*gradInput_data););
+ return 1;
+}
+
+static const struct luaL_Reg nn_(HardShrink__) [] = {
+ {"HardShrink_forward", nn_(HardShrink_forward)},
+ {"HardShrink_backward", nn_(HardShrink_backward)},
+ {NULL, NULL}
+};
+
+static void nn_(HardShrink_init)(lua_State *L)
+{
+ luaT_pushmetaclass(L, torch_(Tensor_id));
+ luaT_registeratname(L, nn_(HardShrink__), "nn");
+ lua_pop(L,1);
+}
+
+#endif
diff --git a/generic/Template.c b/generic/Template.c
new file mode 100644
index 0000000..9d1f444
--- /dev/null
+++ b/generic/Template.c
@@ -0,0 +1,28 @@
+#ifndef TH_GENERIC_FILE
+#define TH_GENERIC_FILE "generic/Template.c"
+#else
+
+static int nn_(Template_forward)(lua_State *L)
+{
+
+}
+
+static int nn_(Template_backward)(lua_State *L)
+{
+
+}
+
+static const struct luaL_Reg nn_(Template__) [] = {
+ {"Template_forward", nn_(Template_forward)},
+ {"Template_backward", nn_(Template_backward)},
+ {NULL, NULL}
+};
+
+static void nn_(Template_init)(lua_State *L)
+{
+ luaT_pushmetaclass(L, torch_(Tensor_id));
+ luaT_registeratname(L, nn_(Template__), "nn");
+ lua_pop(L,1);
+}
+
+#endif
diff --git a/init.c b/init.c
index 8721306..c02baab 100644
--- a/init.c
+++ b/init.c
@@ -8,6 +8,12 @@
static const void* torch_FloatTensor_id = NULL;
static const void* torch_DoubleTensor_id = NULL;
+#include "generic/Abs.c"
+#include "THGenerateFloatTypes.h"
+
+#include "generic/HardShrink.c"
+#include "THGenerateFloatTypes.h"
+
#include "generic/SpatialLinear.c"
#include "THGenerateFloatTypes.h"
@@ -17,7 +23,12 @@ DLL_EXPORT int luaopen_libnnx(lua_State *L)
torch_DoubleTensor_id = luaT_checktypename2id(L, "torch.DoubleTensor");
nn_FloatSpatialLinear_init(L);
+ nn_FloatHardShrink_init(L);
+ nn_FloatAbs_init(L);
+
nn_DoubleSpatialLinear_init(L);
+ nn_DoubleHardShrink_init(L);
+ nn_DoubleAbs_init(L);
return 1;
}
diff --git a/init.lua b/init.lua
index 7ff3074..5d95537 100644
--- a/init.lua
+++ b/init.lua
@@ -31,6 +31,7 @@
----------------------------------------------------------------------
require 'torch'
+require 'xlua'
require 'nn'
-- create global nnx table:
@@ -43,6 +44,18 @@ require 'libnnx'
torch.include('nnx', 'jacobian.lua')
torch.include('nnx', 'test-all.lua')
--- modules:
+-- tools:
+torch.include('nnx', 'ConfusionMatrix.lua')
+
+-- pointwise modules:
+torch.include('nnx', 'Abs.lua')
+torch.include('nnx', 'Power.lua')
+torch.include('nnx', 'Square.lua')
+torch.include('nnx', 'Sqrt.lua')
+torch.include('nnx', 'HardShrink.lua')
+
+-- reshapers:
torch.include('nnx', 'Narrow.lua')
+
+-- spatial (images) operators:
torch.include('nnx', 'SpatialLinear.lua')
diff --git a/nnx-1.0-1.rockspec b/nnx-1.0-1.rockspec
index e34aa0b..0301702 100644
--- a/nnx-1.0-1.rockspec
+++ b/nnx-1.0-1.rockspec
@@ -22,6 +22,7 @@ description = {
dependencies = {
"lua >= 5.1",
"torch",
+ "xlua",
"lunit"
}
@@ -47,7 +48,13 @@ build = {
target_link_libraries (nnx ${TORCH_LIBRARIES})
install_files(/lua/nnx init.lua)
+ install_files(/lua/nnx Abs.lua)
+ install_files(/lua/nnx ConfusionMatrix.lua)
+ install_files(/lua/nnx HardShrink.lua)
install_files(/lua/nnx Narrow.lua)
+ install_files(/lua/nnx Power.lua)
+ install_files(/lua/nnx Square.lua)
+ install_files(/lua/nnx Sqrt.lua)
install_files(/lua/nnx SpatialLinear.lua)
add_subdirectory (test)
install_targets(/lib nnx)