Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/clementfarabet/lua---nnx.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--FindTorch.cmake71
-rw-r--r--SpatialLinear.lua87
-rw-r--r--generic/SpatialLinear.c131
-rw-r--r--init.c23
-rw-r--r--init.lua4
-rw-r--r--nnx-1.0-1.rockspec17
-rw-r--r--test/jacobian.lua109
-rw-r--r--test/test-all.lua32
8 files changed, 466 insertions, 8 deletions
diff --git a/FindTorch.cmake b/FindTorch.cmake
new file mode 100644
index 0000000..8ada8cc
--- /dev/null
+++ b/FindTorch.cmake
@@ -0,0 +1,71 @@
+
+# Find Torch (luaT+TH)
+
+if (TORCH_PREFIX)
+ find_program (TORCH_EXECUTABLE lua ${TORCH_PREFIX}/bin NO_DEFAULT_PATH)
+endif (TORCH_PREFIX)
+
+if (NOT TORCH_EXECUTABLE)
+ find_program (TORCH_EXECUTABLE lua PATH)
+endif (NOT TORCH_EXECUTABLE)
+
+if (TORCH_EXECUTABLE)
+ get_filename_component (TORCH_BIN_DIR ${TORCH_EXECUTABLE} PATH)
+endif (TORCH_EXECUTABLE)
+
+find_library (TORCH_TH TH ${TORCH_BIN_DIR}/../lib)
+find_library (TORCH_luaT luaT ${TORCH_BIN_DIR}/../lib)
+find_library (TORCH_lua lua ${TORCH_BIN_DIR}/../lib)
+
+set (TORCH_LIBRARIES ${TORCH_TH} ${TORCH_luaT} ${TORCH_lua})
+
+find_path (TORCH_INCLUDE_DIR lua.h
+ ${TORCH_BIN_DIR}/../include/
+ NO_DEFAULT_PATH)
+
+set (TORCH_INCLUDE_DIR ${TORCH_INCLUDE_DIR} ${TORCH_INCLUDE_DIR}/TH)
+
+set (TORCH_PACKAGE_PATH "${TORCH_BIN_DIR}/../share/lua/5.1" CACHE PATH "where Lua searches for Lua packages")
+set (TORCH_PACKAGE_CPATH "${TORCH_BIN_DIR}/../lib/lua/5.1" CACHE PATH "where Lua searches for library packages")
+
+set (TORCH_PREFIX ${TORCH_BIN_DIR}/..)
+
+mark_as_advanced (
+ TORCH_PREFIX
+ TORCH_EXECUTABLE
+ TORCH_LIBRARIES
+ TORCH_INCLUDE_DIR
+ TORCH_PACKAGE_PATH
+ TORCH_PACKAGE_CPATH
+)
+
+set (TORCH_FOUND 1)
+if (NOT TORCH_TH)
+ set (TORCH_FOUND 0)
+endif (NOT TORCH_TH)
+if (NOT TORCH_lua)
+ set (TORCH_FOUND 0)
+endif (NOT TORCH_lua)
+if (NOT TORCH_luaT)
+ set (TORCH_FOUND 0)
+endif (NOT TORCH_luaT)
+if (NOT TORCH_EXECUTABLE)
+ set (TORCH_FOUND 0)
+endif (NOT TORCH_EXECUTABLE)
+if (NOT TORCH_INCLUDE_DIR)
+ set (TORCH_FOUND 0)
+endif (NOT TORCH_INCLUDE_DIR)
+
+if (NOT TORCH_FOUND AND Lua_FIND_REQUIRED)
+ message (FATAL_ERROR "Could not find Torch/Lua -- please install it!")
+elseif (NOT TORCH_FOUND AND Lua_FIND_REQUIRED)
+ message (STATUS "Lua bin found in " ${TORCH_BIN_DIR})
+endif (NOT TORCH_FOUND AND Lua_FIND_REQUIRED)
+
+if (NOT Lua_FIND_QUIETLY)
+ if (TORCH_FOUND)
+ message (STATUS "Lua bin found in " ${TORCH_BIN_DIR})
+ else (TORCH_FOUND)
+ message (STATUS "Lua bin not found. Please specify location")
+ endif (TORCH_FOUND)
+endif (NOT Lua_FIND_QUIETLY)
diff --git a/SpatialLinear.lua b/SpatialLinear.lua
new file mode 100644
index 0000000..6e16359
--- /dev/null
+++ b/SpatialLinear.lua
@@ -0,0 +1,87 @@
+local SpatialLinear, parent = torch.class('nn.SpatialLinear', 'nn.Module')
+
+function SpatialLinear:__init(fanin, fanout)
+ parent.__init(self)
+
+ self.fanin = fanin or 1
+ self.fanout = fanout or 1
+
+ self.weightDecay = 0
+ self.weight = torch.Tensor(self.fanout, self.fanin)
+ self.bias = torch.Tensor(self.fanout)
+ self.gradWeight = torch.Tensor(self.fanout, self.fanin)
+ self.gradBias = torch.Tensor(self.fanout)
+
+ self.output = torch.Tensor(fanout,1,1)
+ self.gradInput = torch.Tensor(fanin,1,1)
+
+ self:reset()
+end
+
+function SpatialLinear:reset(stdv)
+ if stdv then
+ stdv = stdv * math.sqrt(3)
+ else
+ stdv = 1./math.sqrt(self.weight:size(1))
+ end
+ for i=1,self.weight:size(2) do
+ self.weight:select(2, i):apply(function()
+ return random.uniform(-stdv, stdv)
+ end)
+ self.bias[i] = random.uniform(-stdv, stdv)
+ end
+end
+
+function SpatialLinear:zeroGradParameters(momentum)
+ if momentum then
+ self.gradWeight:mul(momentum)
+ self.gradBias:mul(momentum)
+ else
+ self.gradWeight:zero()
+ self.gradBias:zero()
+ end
+end
+
+function SpatialLinear:updateParameters(learningRate)
+ self.weight:add(-learningRate, self.gradWeight)
+ self.bias:add(-learningRate, self.gradBias)
+end
+
+function SpatialLinear:decayParameters(decay)
+ self.weight:add(-decay, self.weight)
+ self.bias:add(-decay, self.bias)
+end
+
+function SpatialLinear:forward(input)
+ self.output:resize(self.fanout, input:size(2), input:size(3))
+ input.nn.SpatialLinear_forward(self, input)
+ return self.output
+end
+
+function SpatialLinear:backward(input, gradOutput)
+ self.gradInput:resize(self.fanin, input:size(2), input:size(3))
+ input.nn.SpatialLinear_backward(self, input, gradOutput)
+ return self.gradInput
+end
+
+function SpatialLinear:write(file)
+ parent.write(self, file)
+ file:writeInt(self.fanin)
+ file:writeInt(self.fanout)
+ file:writeDouble(self.weightDecay)
+ file:writeObject(self.weight)
+ file:writeObject(self.bias)
+ file:writeObject(self.gradWeight)
+ file:writeObject(self.gradBias)
+end
+
+function SpatialLinear:read(file)
+ parent.read(self, file)
+ self.fanin = file:readInt()
+ self.fanout = file:readInt()
+ self.weightDecay = file:readDouble()
+ self.weight = file:readObject()
+ self.bias = file:readObject()
+ self.gradWeight = file:readObject()
+ self.gradBias = file:readObject()
+end
diff --git a/generic/SpatialLinear.c b/generic/SpatialLinear.c
new file mode 100644
index 0000000..487d2f8
--- /dev/null
+++ b/generic/SpatialLinear.c
@@ -0,0 +1,131 @@
+#ifndef TH_GENERIC_FILE
+#define TH_GENERIC_FILE "generic/SpatialLinear.c"
+#else
+
+static int nn_(SpatialLinear_forward)(lua_State *L)
+{
+ // get all params
+ THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id));
+ THTensor *bias = luaT_getfieldcheckudata(L, 1, "bias", torch_(Tensor_id));
+ THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_(Tensor_id));
+ THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_(Tensor_id));
+
+ // dims
+ int iwidth = input->size[2];
+ int iheight = input->size[1];
+ int ichannels = input->size[0];
+ int owidth = iwidth;
+ int oheight = iheight;
+ int ochannels = output->size[0];
+
+ // select planes
+ THTensor *outputPlane = THTensor_(new)();
+ THTensor *inputPlane = THTensor_(new)();
+
+ // process each plane
+ int ok,ik;
+ for (ok=0; ok<ochannels; ok++) {
+ // get output plane
+ THTensor_(select)(outputPlane, output, 0, ok);
+ THTensor_(fill)(outputPlane, THTensor_(get1d)(bias,ok));
+ for (ik=0; ik<ichannels; ik++) {
+ // get input plane
+ THTensor_(select)(inputPlane, input, 0, ik);
+ THTensor_(cadd)(outputPlane, THTensor_(get2d)(weight,ok,ik), inputPlane);
+ }
+ }
+
+ // cleanup
+ THTensor_(free)(inputPlane);
+ THTensor_(free)(outputPlane);
+ return 1;
+}
+
+static int nn_(SpatialLinear_backward)(lua_State *L)
+{
+ // get all params
+ THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id));
+ THTensor *gradOutput = luaT_checkudata(L, 3, torch_(Tensor_id));
+ THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_(Tensor_id));
+ THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_(Tensor_id));
+ THTensor *bias = luaT_getfieldcheckudata(L, 1, "bias", torch_(Tensor_id));
+ THTensor *gradWeight = luaT_getfieldcheckudata(L, 1, "gradWeight", torch_(Tensor_id));
+ THTensor *gradBias = luaT_getfieldcheckudata(L, 1, "gradBias", torch_(Tensor_id));
+ int weightDecay = luaT_getfieldcheckint(L, 1, "weightDecay");
+
+ // dims
+ int iwidth = input->size[2];
+ int iheight = input->size[1];
+ int ichannels = input->size[0];
+ int owidth = gradOutput->size[2];
+ int oheight = gradOutput->size[1];
+ int ochannels = gradOutput->size[0];
+
+ // resize gradInput
+ THTensor_(zero)(gradInput);
+
+ // select planes
+ THTensor *gradOutput_xy = THTensor_(new)();
+ THTensor *gradOutput_y = THTensor_(new)();
+ THTensor *gradInput_xy = THTensor_(new)();
+ THTensor *gradInput_y = THTensor_(new)();
+ THTensor *input_xy = THTensor_(new)();
+ THTensor *input_y = THTensor_(new)();
+
+ // transpose weight
+ THTensor *weight_t = THTensor_(newTranspose)(weight,0,1);
+
+ // compute gradient
+ int x,y;
+ for (y=0; y<oheight; y++) {
+
+ // select rows
+ THTensor_(select)(gradOutput_y, gradOutput, 1, y);
+ THTensor_(select)(gradInput_y, gradInput, 1, y);
+ THTensor_(select)(input_y, input, 1, y);
+
+ for (x=0; x<owidth; x++) {
+
+ // (select) cols
+ THTensor_(select)(gradOutput_xy, gradOutput_y, 1, x);
+ THTensor_(select)(gradInput_xy, gradInput_y, 1, x);
+ THTensor_(select)(input_xy, input_y, 1, x);
+
+ // compute dE/dW and dE/dB
+ THTensor_(addr)(gradWeight, 1, gradOutput_xy, input_xy);
+ THTensor_(cadd)(gradBias, 1, gradOutput_xy);
+
+ // weight decay
+ if (weightDecay != 0) {
+ THTensor_(cadd)(gradWeight, 1, weight);
+ }
+
+ // compute dE/dI
+ THTensor_(addmv)(gradInput_xy, 1, weight_t, gradOutput_xy);
+ }
+ }
+
+ // cleanup
+ THTensor_(free)(gradInput_xy);
+ THTensor_(free)(gradInput_y);
+ THTensor_(free)(gradOutput_xy);
+ THTensor_(free)(gradOutput_y);
+ THTensor_(free)(input_xy);
+ THTensor_(free)(input_y);
+ return 1;
+}
+
+static const struct luaL_Reg nn_(SpatialLinear__) [] = {
+ {"SpatialLinear_forward", nn_(SpatialLinear_forward)},
+ {"SpatialLinear_backward", nn_(SpatialLinear_backward)},
+ {NULL, NULL}
+};
+
+static void nn_(SpatialLinear_init)(lua_State *L)
+{
+ luaT_pushmetaclass(L, torch_(Tensor_id));
+ luaT_registeratname(L, nn_(SpatialLinear__), "nn");
+ lua_pop(L,1);
+}
+
+#endif
diff --git a/init.c b/init.c
new file mode 100644
index 0000000..8721306
--- /dev/null
+++ b/init.c
@@ -0,0 +1,23 @@
+#include "TH.h"
+#include "luaT.h"
+
+#define torch_(NAME) TH_CONCAT_3(torch_, Real, NAME)
+#define torch_string_(NAME) TH_CONCAT_STRING_3(torch., Real, NAME)
+#define nn_(NAME) TH_CONCAT_3(nn_, Real, NAME)
+
+static const void* torch_FloatTensor_id = NULL;
+static const void* torch_DoubleTensor_id = NULL;
+
+#include "generic/SpatialLinear.c"
+#include "THGenerateFloatTypes.h"
+
+DLL_EXPORT int luaopen_libnnx(lua_State *L)
+{
+ torch_FloatTensor_id = luaT_checktypename2id(L, "torch.FloatTensor");
+ torch_DoubleTensor_id = luaT_checktypename2id(L, "torch.DoubleTensor");
+
+ nn_FloatSpatialLinear_init(L);
+ nn_DoubleSpatialLinear_init(L);
+
+ return 1;
+}
diff --git a/init.lua b/init.lua
index ff92c6b..dcb7533 100644
--- a/init.lua
+++ b/init.lua
@@ -1,6 +1,8 @@
require 'torch'
require 'nn'
-require 'nnx'
+
+require 'libnnx'
torch.include('nnx', 'Narrow.lua')
+torch.include('nnx', 'SpatialLinear.lua')
diff --git a/nnx-1.0-1.rockspec b/nnx-1.0-1.rockspec
index 2d4073a..48946a2 100644
--- a/nnx-1.0-1.rockspec
+++ b/nnx-1.0-1.rockspec
@@ -21,7 +21,8 @@ description = {
dependencies = {
"lua >= 5.1",
- "xlua"
+ "torch",
+ "lunit"
}
build = {
@@ -38,15 +39,17 @@ build = {
find_package (Torch REQUIRED)
- SET(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
+ set (CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
- include_directories (${TORCH_INCLUDE_DIR})
- #add_library (nnx SHARED init.c)
- #link_directories (${TORCH_LIBRARY_DIR})
- #target_link_libraries (nnx ${TORCH_LIBRARIES})
+ include_directories (${TORCH_INCLUDE_DIR} ${PROJECT_SOURCE_DIR})
+ add_library (nnx SHARED init.c)
+ link_directories (${TORCH_LIBRARY_DIR})
+ target_link_libraries (nnx ${TORCH_LIBRARIES})
install_files(/lua/nnx init.lua)
- #install_targets(/lib nnx)
+ install_files(/lua/nnx Narrow.lua)
+ install_files(/lua/nnx SpatialLinear.lua)
+ install_targets(/lib nnx)
]],
variables = {
diff --git a/test/jacobian.lua b/test/jacobian.lua
new file mode 100644
index 0000000..2ceb08f
--- /dev/null
+++ b/test/jacobian.lua
@@ -0,0 +1,109 @@
+
+require 'torch'
+require 'nn'
+require 'random'
+
+function get_jac_bprop (module, input, param, dparam)
+ local doparam = 0
+ if param then
+ doparam = 1
+ end
+ param = param or input
+ -- output deriv
+ local dout = torch.Tensor():resizeAs(module:forward(input))
+ -- 1D view
+ local sdout = torch.Tensor(dout:storage(),1,dout:nElement())
+ -- jacobian matrix to calculate
+ local jacobian = torch.Tensor(param:nElement(),dout:nElement()):zero()
+
+ for i=1,sdout:nElement() do
+ dout:zero()
+ sdout[i] = 1
+ module:zeroGradParameters()
+ local din = module:backward(input, dout)
+ if doparam == 1 then
+ jacobian:select(2,i):copy(dparam)
+ else
+ jacobian:select(2,i):copy(din)
+ end
+ end
+ return jacobian
+end
+
+function get_jac_fprop(module, input, param)
+ param = param or input
+ -- perturbation amount
+ local small = 1e-6
+ -- 1D view of input
+ local tst = param:storage()
+ local sin = torch.Tensor(tst,1,tst:size())
+ -- jacobian matrix to calculate
+ local jacobian = torch.Tensor():resize(param:nElement(),module:forward(input):nElement())
+
+ local outa = torch.Tensor(jacobian:size(2))
+ local outb = torch.Tensor(jacobian:size(2))
+
+ for i=1,sin:nElement() do
+ sin[i] = sin[i] - small
+ outa:copy(module:forward(input))
+ sin[i] = sin[i] + 2*small
+ outb:copy(module:forward(input))
+ sin[i] = sin[i] - small
+
+ outb:add(-1,outa):div(2*small)
+ jacobian:select(1,i):copy(outb)
+ end
+ return jacobian
+end
+
+function test_jac (module, input, minval, maxval)
+ minval = minval or -2
+ maxval = maxval or 2
+ local inrange = maxval - minval
+ input:copy(lab.rand(input:nElement()):mul(inrange):add(minval))
+ local jac_fprop = get_jac_fprop(module,input)
+ local jac_bprop = get_jac_bprop(module,input)
+ local error = jac_fprop:dist(jac_bprop,2)
+ return error
+end
+
+function test_jac_param (module, input, param, dparam, minval, maxval)
+ minval = minval or -2
+ maxval = maxval or 2
+ local inrange = maxval - minval
+ input:copy(lab.rand(input:nElement()):mul(inrange):add(minval))
+ param:copy(lab.rand(param:nElement()):mul(inrange):add(minval))
+ jac_bprop = get_jac_bprop(module, input, param, dparam)
+ jac_fprop = get_jac_fprop(module, input, param)
+ local error = jac_fprop:dist(jac_bprop,2)
+ return error
+end
+
+function testwriting(module,input, minval, maxval)
+ minval = minval or -2
+ maxval = maxval or 2
+ local inrange = maxval - minval
+
+ -- run module
+ module:forward(input)
+ local go = torch.Tensor():resizeAs(module.output):copy(lab.rand(module.output:nElement()):mul(inrange):add(minval))
+ module:backward(input,go)
+
+ local fo = torch.Tensor():resizeAs(module.output):copy(module.output)
+ local bo = torch.Tensor():resizeAs(module.gradInput):copy(module.gradInput)
+
+ -- write module
+ local f = torch.DiskFile('tmp.bin','w'):binary()
+ f:writeObject(module)
+ f:close()
+ -- read module
+ local m = torch.DiskFile('tmp.bin'):binary():readObject()
+ m:forward(input)
+ m:backward(input,go)
+ -- cleanup
+ os.execute('rm tmp.bin')
+
+ local fo2 = torch.Tensor():resizeAs(m.output):copy(m.output)
+ local bo2 = torch.Tensor():resizeAs(m.gradInput):copy(m.gradInput)
+ return fo:dist(fo2),bo:dist(bo2)
+end
diff --git a/test/test-all.lua b/test/test-all.lua
new file mode 100644
index 0000000..ce2e003
--- /dev/null
+++ b/test/test-all.lua
@@ -0,0 +1,32 @@
+
+require 'nnx'
+require 'lunit'
+require 'jacobian'
+
+module("test_ops", lunit.testcase, package.seeall)
+
+precision = 1e-6
+
+function test_SpatialLinear()
+ local fanin = math.random(1,10)
+ local fanout = math.random(1,10)
+ local sizex = math.random(4,16)
+ local sizey = math.random(4,16)
+ local module = nn.SpatialLinear(fanin, fanout)
+ local input = lab.rand(fanin,sizey,sizex)
+
+ local error = test_jac(module, input)
+ assert_equal((error < precision), true, 'error on state: ' .. error)
+
+ local error = test_jac_param(module, input, module.weight, module.gradWeight)
+ assert_equal((error < precision), true, 'error on weight: ' .. error)
+
+ local error = test_jac_param(module, input, module.bias, module.gradBias)
+ assert_equal((error < precision), true, 'error on bias: ' .. error)
+
+ local ferr, berr = testwriting(module, input)
+ assert_equal(0, ferr, 'error in forward after i/o')
+ assert_equal(0, berr, 'error in backward after i/o')
+end
+
+lunit.main()