Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torch/nn.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPáidí Creed <paidi@swiftkey.net>2014-01-28 14:42:17 +0400
committerPáidí Creed <paidi@swiftkey.net>2014-01-28 14:42:17 +0400
commit2152758d904b4cab0ace02817203a65d92acbb10 (patch)
tree6a58c70942ce189262e1b270132e77a87195a016
parentaa60b6e2be23beb899b3eca28c762793afea52a6 (diff)
parent60947473ba346a04c794dd63335633640351ae46 (diff)
Merge remote-tracking branch 'upstream/master'
-rw-r--r--CMakeLists.txt8
-rw-r--r--PairwiseDistance.lua53
-rw-r--r--generic/SoftPlus.c2
-rw-r--r--test/test.lua54
4 files changed, 107 insertions, 10 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 2bfd582..b432b65 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -2,9 +2,11 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.6 FATAL_ERROR)
CMAKE_POLICY(VERSION 2.6)
FIND_PACKAGE(Torch REQUIRED)
-IF("${CMAKE_C_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_C_COMPILER_ID}" STREQUAL "GNU")
- SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c89 -pedantic")
-ENDIF()
+INCLUDE(CheckCCompilerFlag)
+CHECK_C_COMPILER_FLAG(-pedantic C_HAS_PEDANTIC)
+IF (C_HAS_PEDANTIC)
+ SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pedantic")
+ENDIF ()
SET(src init.c)
diff --git a/PairwiseDistance.lua b/PairwiseDistance.lua
index d9e6f81..affc2e5 100644
--- a/PairwiseDistance.lua
+++ b/PairwiseDistance.lua
@@ -6,6 +6,7 @@ function PairwiseDistance:__init(p)
-- state
self.gradInput = {torch.Tensor(), torch.Tensor()}
self.output = torch.Tensor(1)
+ self.diff = torch.Tensor()
self.norm=p
end
@@ -17,8 +18,8 @@ function PairwiseDistance:updateOutput(input)
self.diff:resizeAs(input[1])
local diff = self.diff:zero()
- --local diff = torch.add(input[1], -1, input[2])
diff:add(input[1], -1, input[2])
+ diff:abs()
self.output:resize(input[1]:size(1))
self.output:zero()
@@ -27,7 +28,10 @@ function PairwiseDistance:updateOutput(input)
else
error('input must be vector or matrix')
end
-
+ if input[1]:dim() > 2 then
+ error('input must be vector or matrix')
+ end
+
return self.output
end
@@ -37,16 +41,39 @@ local function mathsign(x)
end
function PairwiseDistance:updateGradInput(input, gradOutput)
+ if input[1]:dim() > 2 then
+ error('input must be vector or matrix')
+ end
+
self.gradInput[1]:resize(input[1]:size())
self.gradInput[2]:resize(input[2]:size())
self.gradInput[1]:copy(input[1])
- self.gradInput[1]:add(-1, input[2])
+ self.gradInput[1]:add(-1, input[2])
+
if self.norm==1 then
self.gradInput[1]:apply(mathsign)
+ else
+ -- Note: derivative of p-norm:
+ -- d/dx_k(||x||_p) = (x_k * abs(x_k)^(p-2)) / (||x||_p)^(p-1)
+ if (self.norm > 2) then
+ self.gradInput[1]:cmul(self.gradInput[1]:clone():abs():pow(self.norm-2))
+ end
+
+ if (input[1]:dim() > 1) then
+ self.outExpand = self.outExpand or self.output.new()
+ self.outExpand:resize(self.output:size(1), 1)
+ self.outExpand:copy(self.output)
+ self.outExpand:add(1.0e-6) -- Prevent divide by zero errors
+ self.outExpand:pow(-(self.norm-1))
+ self.gradInput[1]:cmul(self.outExpand:expand(self.gradInput[1]:size(1),
+ self.gradInput[1]:size(2)))
+ else
+ self.gradInput[1]:mul(math.pow(self.output[1] + 1e-6, -(self.norm-1)))
+ end
end
if input[1]:dim() == 1 then
self.gradInput[1]:mul(gradOutput[1])
- elseif input[1]:dim() == 2 then
+ else
self.grad = self.grad or gradOutput.new()
self.ones = self.ones or gradOutput.new()
@@ -55,9 +82,23 @@ function PairwiseDistance:updateGradInput(input, gradOutput)
self.grad:addr(gradOutput, self.ones)
self.gradInput[1]:cmul(self.grad)
- else
- error('input must be vector or matrix')
end
self.gradInput[2]:zero():add(-1, self.gradInput[1])
return self.gradInput
end
+
+-- save away Module:type(type) for later use.
+PairwiseDistance._parent_type = parent.type
+
+-- Fix the bug where tmp = nn.PairwiseDistance:cuda() fails to convert table
+-- contents. We could, and probably should, change Module.lua to loop over
+-- and convert all the table elements in a module, but that might have
+-- repercussions, so this is a safer solution.
+function PairwiseDistance:type(type)
+ self:_parent_type(type) -- Call the parent (Module) type function
+ -- Now convert the left over table elements
+ self.gradInput[1] = self.gradInput[1]:type(type)
+ self.gradInput[2] = self.gradInput[2]:type(type)
+ return self
+end
+
diff --git a/generic/SoftPlus.c b/generic/SoftPlus.c
index b4f62f7..49f50a7 100644
--- a/generic/SoftPlus.c
+++ b/generic/SoftPlus.c
@@ -10,7 +10,7 @@ static int nn_(SoftPlus_updateOutput)(lua_State *L)
THTensor_(resizeAs)(output, input);
TH_TENSOR_APPLY2(real, output, real, input, \
- *output_data = log1p(exp(*input_data));)
+ *output_data = THLog1p(exp(*input_data));)
return 1;
}
diff --git a/test/test.lua b/test/test.lua
index 89db059..27bb114 100644
--- a/test/test.lua
+++ b/test/test.lua
@@ -1576,6 +1576,60 @@ function nntest.Module_getParameters_7()
mytester:asserteq(p:nElement(), 121, 'error: incorrect number of elements in flat vector')
end
+function nntest.PairwiseDistance()
+ -- Note: testJacobian doesn't support table inputs, and rather than re-write
+ -- it so that it does, I'll just use a split table module on the input.
+ -- I assume both SplitTable and Sequential do not have bugs, otherwise this
+ -- test will break.
+ for p = 1,4 do -- test a few Lp norms
+ -- TEST CASE 1: non-batch input, same code path but includes a resize
+ local ini = math.random(10,20)
+ local input = torch.Tensor(2, ini):zero()
+ local module = nn.Sequential()
+ module:add(nn.SplitTable(1))
+ module:add(nn.PairwiseDistance(p))
+
+ local err = jac.testJacobian(module,input)
+ mytester:assertlt(err,precision, ' error on state ')
+
+ local ferr,berr = jac.testIO(module,input)
+ mytester:asserteq(ferr, 0, torch.typename(module)..' - i/o forward err ')
+ mytester:asserteq(berr, 0, torch.typename(module)..' - i/o backward err ')
+
+ -- Also check that the forward prop result is correct.
+ input = torch.rand(2, ini)
+ err = torch.dist(input:select(1,1), input:select(1,2), p) -
+ module:forward(input)[1]
+ mytester:assertlt(err,precision, ' error on non-batch fprop ')
+
+ -- TEST CASE 2: batch input
+ local inj = math.random(10,20)
+ input = torch.Tensor(2, inj, ini):zero()
+
+ -- (Rebuild the module to avoid correlated tests)
+ module = nn.Sequential()
+ module:add(nn.SplitTable(1))
+ module:add(nn.PairwiseDistance(p))
+
+ err = jac.testJacobian(module,input)
+ mytester:assertlt(err,precision, ' error on state ')
+
+ -- Also check that the forward prop result is correct.
+ -- manually calculate each distance separately
+ local inputa = torch.rand(inj,ini)
+ local inputb = torch.rand(inj,ini)
+ local dist_manual = torch.Tensor(inj)
+ for i=1, inputa:size(1) do
+ dist_manual[i] = torch.dist(inputa:select(1,i), inputb:select(1,i),p)
+ end
+ -- compare the distances to the module's fprop
+ local dist = module:forward(torch.cat(inputa,inputb,1):resize(2,inj,ini))
+ err = dist - dist_manual
+ mytester:assertlt(err:norm(), precision, torch.typename(module) ..
+ ' error on batch fprop ')
+ end
+end
+
mytester:add(nntest)
if not nn then