diff options
author | Andreas Köpf <andreas.koepf@xamla.com> | 2015-12-12 01:47:10 +0300 |
---|---|---|
committer | soumith <soumith@gmail.com> | 2015-12-30 00:38:06 +0300 |
commit | ad1efeed343a6f82593a15e78ea7e7bd6ceb041c (patch) | |
tree | da68f51f86f3d570c9c233dab642941aba16e439 | |
parent | a4b99ea770b38362dd376544aa4b8dffc5bd89cb (diff) |
Add THNN/ffi conversion of Abs
-rw-r--r-- | Abs.lua | 19 | ||||
-rw-r--r-- | CMakeLists.txt | 2 | ||||
-rw-r--r-- | THNN.lua | 117 | ||||
-rw-r--r-- | generic/Abs.c | 41 | ||||
-rw-r--r-- | init.c | 5 | ||||
-rw-r--r-- | init.lua | 2 | ||||
-rw-r--r-- | lib/CMakeLists.txt | 2 | ||||
-rw-r--r-- | lib/THNN/CMakeLists.txt | 10 | ||||
-rw-r--r-- | lib/THNN/README.md | 72 | ||||
-rw-r--r-- | lib/THNN/THNN.h | 14 | ||||
-rw-r--r-- | lib/THNN/generic/Abs.c | 20 | ||||
-rw-r--r-- | lib/THNN/generic/THNN.h | 19 | ||||
-rw-r--r-- | lib/THNN/init.c | 8 |
13 files changed, 283 insertions, 48 deletions
@@ -1,3 +1,5 @@ +local THNN = require('nn.THNN') + local Abs, parent = torch.class('nn.Abs', 'nn.Module') function Abs:__init() @@ -5,11 +7,24 @@ function Abs:__init() end function Abs:updateOutput(input) - input.nn.Abs_updateOutput(self, input) + THNN.runKernel( + 'Abs_updateOutput', + input:type(), + THNN.getState(), + input:cdata(), + self.output:cdata() + ) return self.output end function Abs:updateGradInput(input, gradOutput) - input.nn.Abs_updateGradInput(self, input, gradOutput) + THNN.runKernel( + 'Abs_updateGradInput', + input:type(), + THNN.getState(), + input:cdata(), + gradOutput:cdata(), + self.gradInput:cdata() + ) return self.gradInput end diff --git a/CMakeLists.txt b/CMakeLists.txt index 1bc2bad..535cdc0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -41,6 +41,8 @@ ENDIF (WITH_OPENMP) LINK_DIRECTORIES("${Torch_INSTALL_LIB}") +ADD_SUBDIRECTORY(lib) + SET(src init.c) FILE(GLOB luasrc *.lua) diff --git a/THNN.lua b/THNN.lua new file mode 100644 index 0000000..adb07de --- /dev/null +++ b/THNN.lua @@ -0,0 +1,117 @@ +local ffi = require 'ffi' + +local THNN = {} + +local generic_THNN_h = [[ +TH_API void THNN_(Abs_updateOutput)( + THNNState *state, + THTensor *input, + THTensor *output); +TH_API void THNN_(Abs_updateGradInput)( + THNNState *state, + THTensor *input, + THTensor *gradOutput, + THTensor *gradInput); +]] + +-- THGenerator struct declaration copied from torch7/lib/TH/THRandom.h +local base_declarations = [[ +typedef void THNNState; + +typedef struct { + unsigned long the_initial_seed; + int left; + int seeded; + unsigned long next; + unsigned long state[624]; /* the array for the state vector 624 = _MERSENNE_STATE_N */ + double normal_x; + double normal_y; + double normal_rho; + int normal_is_valid; +} THGenerator; +]] + +ffi.cdef(base_declarations) + +-- expand macros, allow to use original lines from lib/THNN/generic/THNN.h +local preprocessed = string.gsub(generic_THNN_h, 'TH_API void THNN_%(([%a%d_]+)%)', 'void THNN_TYPE%1') + +local replacements = +{ + { ['TYPE'] = 'Double', ['real'] = 'double', ['THTensor'] = 'THDoubleTensor', ['THIndexTensor'] = 'THLongTensor' }, + { ['TYPE'] = 'Float', ['real'] = 'float', ['THTensor'] = 'THFloatTensor', ['THIndexTensor'] = 'THLongTensor' } +} + +for i=1,#replacements do + local r = replacements[i] + local s = preprocessed + for k,v in pairs(r) do + s = string.gsub(s, k, v) + end + ffi.cdef(s) +end + +THNN.NULL = ffi.NULL or nil + +function THNN.getState() + return ffi.NULL or nil +end + +function THNN.optionalTensor(t) + return t and t:cdata() or THNN.NULL +end + +local ok,result +if ffi.os == "OSX" then + ok,result = pcall(ffi.load, 'libTHNN.dylib') +else + ok,result = pcall(ffi.load, 'libTHNN.so') +end +if not ok then + print(result) + error("Ops, could not load 'libTHNN' CPU backend library.") +else + THNN.C = result +end + +local function extract_function_names(s) + local t = {} + for n in string.gmatch(s, 'TH_API void THNN_%(([%a%d_]+)%)') do + t[#t+1] = n + end + return t +end + +function THNN.bind(lib, base_names, type_name) + local ftable = {} + local prefix = 'THNN_' .. type_name + for i,n in ipairs(base_names) do + -- use pcall since some libs might not support all functions (e.g. cunn) + local ok,v = pcall(function() return lib[prefix .. n] end) + if ok then + ftable[n] = v + end + end + return ftable +end + +-- build function table +local function_names = extract_function_names(generic_THNN_h) + +THNN.kernels = {} +THNN.kernels['torch.FloatTensor'] = THNN.bind(THNN.C, function_names, 'Float') +THNN.kernels['torch.DoubleTensor'] = THNN.bind(THNN.C, function_names, 'Double') + +function THNN.runKernel(f, type, ...) + local ftable = THNN.kernels[type] + if not ftable then + error('Unsupported tensor type: '..type) + end + local f = ftable[f] + if not f then + error(string.format("Function '%s' not found for tensor type '%s'.", f, type)) + end + f(...) +end + +return THNN diff --git a/generic/Abs.c b/generic/Abs.c deleted file mode 100644 index 0d258f8..0000000 --- a/generic/Abs.c +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/Abs.c" -#else - -static int nn_(Abs_updateOutput)(lua_State *L) -{ - THTensor *input = luaT_checkudata(L, 2, torch_Tensor); - THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor); - - THTensor_(resizeAs)(output, input); - THTensor_(abs)(output, input); - return 1; -} - -static int nn_(Abs_updateGradInput)(lua_State *L) -{ - THTensor *input = luaT_checkudata(L, 2, torch_Tensor); - THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor); - THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor); - - THTensor_(resizeAs)(gradInput, input); - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, \ - real z = *input_data; \ - *gradInput_data = *gradOutput_data * (z >= 0 ? 1 : -1);) - return 1; -} - -static const struct luaL_Reg nn_(Abs__) [] = { - {"Abs_updateOutput", nn_(Abs_updateOutput)}, - {"Abs_updateGradInput", nn_(Abs_updateGradInput)}, - {NULL, NULL} -}; - -static void nn_(Abs_init)(lua_State *L) -{ - luaT_pushmetatable(L, torch_Tensor); - luaT_registeratname(L, nn_(Abs__), "nn"); - lua_pop(L,1); -} - -#endif @@ -29,9 +29,6 @@ #include "generic/Tanh.c" #include "THGenerateFloatTypes.h" -#include "generic/Abs.c" -#include "THGenerateFloatTypes.h" - #include "generic/HardShrink.c" #include "THGenerateFloatTypes.h" @@ -172,7 +169,6 @@ int luaopen_libnn(lua_State *L) nn_FloatSoftMax_init(L); nn_FloatSoftPlus_init(L); nn_FloatTanh_init(L); - nn_FloatAbs_init(L); nn_FloatHardShrink_init(L); nn_FloatSoftShrink_init(L); nn_FloatThreshold_init(L); @@ -221,7 +217,6 @@ int luaopen_libnn(lua_State *L) nn_DoubleSoftMax_init(L); nn_DoubleSoftPlus_init(L); nn_DoubleTanh_init(L); - nn_DoubleAbs_init(L); nn_DoubleHardShrink_init(L); nn_DoubleSoftShrink_init(L); nn_DoubleThreshold_init(L); @@ -1,8 +1,10 @@ require('torch') require('libnn') +require('nn.THNN') include('utils.lua') + include('ErrorMessages.lua') include('Module.lua') diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt new file mode 100644 index 0000000..1678b8f --- /dev/null +++ b/lib/CMakeLists.txt @@ -0,0 +1,2 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 2.6 FATAL_ERROR) +ADD_SUBDIRECTORY(THNN)
\ No newline at end of file diff --git a/lib/THNN/CMakeLists.txt b/lib/THNN/CMakeLists.txt new file mode 100644 index 0000000..e94fca0 --- /dev/null +++ b/lib/THNN/CMakeLists.txt @@ -0,0 +1,10 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 2.6 FATAL_ERROR) + +SET(src init.c) +ADD_LIBRARY(THNN SHARED init.c) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) +TARGET_LINK_LIBRARIES(THNN TH) + +INSTALL(TARGETS THNN + RUNTIME DESTINATION ${Torch_INSTALL_LIB_SUBDIR} + LIBRARY DESTINATION ${Torch_INSTALL_LIB_SUBDIR}) diff --git a/lib/THNN/README.md b/lib/THNN/README.md new file mode 100644 index 0000000..dec5ffc --- /dev/null +++ b/lib/THNN/README.md @@ -0,0 +1,72 @@ +## API design guidelines + +All functions should accept arguments in the following order. Dots represent any module-specific parameters or buffers, disregarding whether they are used for writing or reading. They should follow the order +``` +[weight], [bias], [any buffers], [additional arguments], [optional arugments] +``` + +### Modules +``` +updateOutput: state, input, output, ... +updateGradInput: state, input, gradOutput, gradInput, ... +accGradParameters: state, input, gradOutput, [gradWeight], [gradBias], ... +``` + +e.g. +```C +void THNN_(HardShrink_updateGradInput)( + THNNState* state, + THTensor *input, + THTensor *gradOutput, + THTensor *gradInput, + real lambda) +``` + +### Criterions +``` +updateOutput: state, input, target, output, ... +updateGradInput: state, input, target, gradInput, ... +``` + +e.g. + +```C +void THNN_(ClassNLLCriterion_updateOutput)( + THNNState* state, + THTensor *input, + THLongTensor *target, + THTensor *output, + THTensor *weights, + THTensor *total_weight, + bool sizeAverage) +``` + +## Code style guide + +```C +void THNN_Linear_updateOutput( + THTensor *input, + THTensor *output, + THTensor *weight, + THTensor *bias); +//<- 10 -> +``` + +All arguments should start on a new line after function name, and they should be indented using 10 spaces. + +Use 2 spaces for block indentation. + + +### Conversion Steps + +1. copy old .c file to lib/THNN/generic + - replace static int nn_ -> void THNN_ + - replace lua_State \*L with 'actual' parameters (+ add THNNState\* state) + - remove any numeric values from return statements, remove the return at the end of the function body + - remove old luaL_Reg & _init function +2. add forward declarations to generic/THNN.h +3. include the generic/xyz.c file in init.c +4. add functions to ffi.lua +5. copy & adapt lua file: specify module THNN for torch.class(), use THNN.errcheck +6. include module lua file in init.lua +7. add & run unit test to lua/tests/test.lua diff --git a/lib/THNN/THNN.h b/lib/THNN/THNN.h new file mode 100644 index 0000000..3968d2b --- /dev/null +++ b/lib/THNN/THNN.h @@ -0,0 +1,14 @@ +#ifndef THNN_H +#define THNN_H + +#include <stdbool.h> +#include <TH.h> + +#define THNN_(NAME) TH_CONCAT_3(THNN_, Real, NAME) + +typedef void THNNState; + +#include "generic/THNN.h" +#include <THGenerateFloatTypes.h> + +#endif
\ No newline at end of file diff --git a/lib/THNN/generic/Abs.c b/lib/THNN/generic/Abs.c new file mode 100644 index 0000000..cc96d5d --- /dev/null +++ b/lib/THNN/generic/Abs.c @@ -0,0 +1,20 @@ +#ifndef TH_GENERIC_FILE +#define TH_GENERIC_FILE "generic/Abs.c" +#else + +void THNN_(Abs_updateOutput)(THNNState *state, THTensor *input, THTensor *output) +{ + THTensor_(resizeAs)(output, input); + THTensor_(abs)(output, input); +} + +void THNN_(Abs_updateGradInput)(THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput) +{ + THTensor_(resizeAs)(gradInput, input); + TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, + real z = *input_data; + *gradInput_data = *gradOutput_data * (z >= 0 ? 1 : -1); + ); +} + +#endif diff --git a/lib/THNN/generic/THNN.h b/lib/THNN/generic/THNN.h new file mode 100644 index 0000000..8d74ae1 --- /dev/null +++ b/lib/THNN/generic/THNN.h @@ -0,0 +1,19 @@ +#ifndef TH_GENERIC_FILE +#define TH_GENERIC_FILE "generic/THNN.h" +#else + +#ifndef THIndexTensor +#define THIndexTensor THLongTensor +#endif + +TH_API void THNN_(Abs_updateOutput)( + THNNState *state, + THTensor *input, + THTensor *output); +TH_API void THNN_(Abs_updateGradInput)( + THNNState *state, + THTensor *input, + THTensor *gradOutput, + THTensor *gradInput); + +#endif diff --git a/lib/THNN/init.c b/lib/THNN/init.c new file mode 100644 index 0000000..4488afc --- /dev/null +++ b/lib/THNN/init.c @@ -0,0 +1,8 @@ +#include "TH.h" +#include "THNN.h" + +#define torch_(NAME) TH_CONCAT_3(torch_, Real, NAME) +#define nn_(NAME) TH_CONCAT_3(nn_, Real, NAME) + +#include "generic/Abs.c" +#include "THGenerateFloatTypes.h" |