Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/soumith/cudnn.torch.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorsoumith <soumith@gmail.com>2016-01-26 18:46:42 +0300
committersoumith <soumith@gmail.com>2016-01-26 18:46:42 +0300
commit6f429d670c8290b3aa5d3977c4f73a063fb26cc2 (patch)
tree993c6955631b84e281e56d41c6cc97ebae8dbcc5
parent0f2d96a6bb565a3e0cfd890d15d828808ebad39b (diff)
cudnn.convert conflicts manually applied
-rw-r--r--README.md22
-rw-r--r--init.lua1
-rw-r--r--test/test.lua701
3 files changed, 444 insertions, 280 deletions
diff --git a/README.md b/README.md
index 94278b5..708d3a8 100644
--- a/README.md
+++ b/README.md
@@ -4,6 +4,7 @@ cudnn.torch
Torch7 FFI bindings for NVidia CuDNN (R4) kernels!
Modules are API compatible their [`nn`](https://github.com/torch/nn) equivalents. Fully unit-tested against `nn` implementations.
+Conversion between `nn` and `cudnn` is available through `cudnn.convert` function.
#### Installation
@@ -59,6 +60,27 @@ cudnn.verbose = true -- this prints out some more verbose information useful for
```
by default, `cudnn.verbose` is set to `false`.
+### Conversion between `cudnn` and `nn`
+
+Conversion is done by `cudnn.convert` function which takes a network and backend arguments and goes over
+network modules recursively substituting equivalents. No memory copy is done, just metatables are swapped.
+
+```lua
+net = nn.Sequential()
+net:add(nn.SpatialConvolution(3,96,11,11,3,3))
+net:add(nn.ReLU())
+cudnn.convert(net, cudnn)
+print(net)
+```
+
+will result in:
+```
+nn.Sequential {
+ [input -> (1) -> (2) -> output]
+ (1): cudnn.SpatialConvolution(3 -> 96, 11x11, 3,3)
+ (2): cudnn.ReLU
+}
+```
### Older versions
For version CuDNN R1, checkout the branch **R1**
diff --git a/init.lua b/init.lua
index d541e03..c228459 100644
--- a/init.lua
+++ b/init.lua
@@ -114,6 +114,7 @@ include 'SpatialBatchNormalization.lua'
include 'SpatialCrossEntropyCriterion.lua'
include 'TemporalConvolution.lua'
include 'functional.lua'
+include 'convert.lua'
return cudnn
diff --git a/test/test.lua b/test/test.lua
index e4afe5a..2a25f98 100644
--- a/test/test.lua
+++ b/test/test.lua
@@ -23,22 +23,37 @@ function cudnntest.SpatialConvolution_forward_batch()
local outj = math.random(1,64)
local ini = (outi-1)*si+ki
local inj = (outj-1)*sj+kj
+
local input = torch.randn(bs,from,inj,ini):cuda()
local sconv = nn.SpatialConvolutionMM(from,to,ki,kj,si,sj):cuda()
- local groundtruth = sconv:forward(input)
- cutorch.synchronize()
local gconv = cudnn.SpatialConvolution(from,to,ki,kj,si,sj):cuda():fastest()
gconv.weight:copy(sconv.weight)
gconv.bias:copy(sconv.bias)
- local rescuda = gconv:forward(input)
- cutorch.synchronize()
- local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward, 'error on state (forward) ')
+ local function test(sconv, gconv)
+ local groundtruth = sconv:forward(input)
+ cutorch.synchronize()
+ local rescuda = gconv:forward(input)
+ cutorch.synchronize()
+ local error = rescuda:float() - groundtruth:float()
+ mytester:assertlt(error:abs():max(), precision_forward, 'error on state (forward) ')
+
+ local gconv = cudnn.convert(sconv, cudnn)
+ mytester:asserteq(torch.typename(gconv), 'cudnn.SpatialConvolution', 'conversion type check')
+ local rescuda = gconv:forward(input)
+ cutorch.synchronize()
+ local error = rescuda:float() - groundtruth:float()
+ mytester:assertlt(error:abs():max(), precision_forward, 'error on state (forward conversion) ')
+
+ -- IO
+ local ferr,berr = jac.testIO(gconv, input)
+ mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
+ mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ end
- -- IO
- local ferr,berr = jac.testIO(gconv, input)
- mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
- mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ test(sconv, gconv)
+ local gconv = cudnn.convert(sconv, cudnn)
+ mytester:asserteq(torch.typename(gconv), 'cudnn.SpatialConvolution', 'conversion type check')
+ test(sconv, gconv)
end
@@ -75,20 +90,27 @@ function cudnntest.SpatialConvolution_backward_batch()
torch.save('modelTemp.t7', gconv)
gconv = torch.load('modelTemp.t7')
- gconv:forward(input)
- gconv:zeroGradParameters()
- local rescuda = gconv:backward(input, gradOutput, scale)
- cutorch.synchronize()
- local weightcuda = gconv.gradWeight
- local biascuda = gconv.gradBias
-
- local error = rescuda:float() - groundgrad:float()
- local werror = weightcuda:float() - groundweight:float()
- local berror = biascuda:float() - groundbias:float()
+ local function test(sconv, gconv)
+ gconv:forward(input)
+ gconv:zeroGradParameters()
+ local rescuda = gconv:backward(input, gradOutput, scale)
+ cutorch.synchronize()
+ local weightcuda = gconv.gradWeight
+ local biascuda = gconv.gradBias
+
+ local error = rescuda:float() - groundgrad:float()
+ local werror = weightcuda:float() - groundweight:float()
+ local berror = biascuda:float() - groundbias:float()
+
+ mytester:assertlt(error:abs():max(), precision_backward, 'error on state (backward) ')
+ mytester:assertlt(werror:abs():max(), precision_backward, 'error on weight (backward) ')
+ mytester:assertlt(berror:abs():max(), precision_backward, 'error on bias (backward) ')
+ end
- mytester:assertlt(error:abs():max(), precision_backward, 'error on state (backward) ')
- mytester:assertlt(werror:abs():max(), precision_backward, 'error on weight (backward) ')
- mytester:assertlt(berror:abs():max(), precision_backward, 'error on bias (backward) ')
+ test(sconv, gconv)
+ local gconv = cudnn.convert(sconv, cudnn)
+ mytester:asserteq(torch.typename(gconv), 'cudnn.SpatialConvolution', 'conversion type check')
+ test(sconv, gconv)
end
function cudnntest.SpatialConvolution_forward_single()
@@ -105,17 +127,37 @@ function cudnntest.SpatialConvolution_forward_single()
local input = torch.randn(from,inj,ini):cuda()
local sconv = nn.SpatialConvolutionMM(from,to,ki,kj,si,sj):cuda()
- local groundtruth = sconv:forward(input)
- cutorch.synchronize()
local gconv = cudnn.SpatialConvolution(from,to,ki,kj,si,sj):cuda()
gconv.weight:copy(sconv.weight)
gconv.bias:copy(sconv.bias)
- local rescuda = gconv:forward(input)
- cutorch.synchronize()
- mytester:asserteq(rescuda:dim(), 3, 'error in dimension')
- local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward,
- 'error on state (forward) ')
+
+ local function test(sconv, gconv)
+ local groundtruth = sconv:forward(input)
+ cutorch.synchronize()
+ local rescuda = gconv:forward(input)
+ cutorch.synchronize()
+ mytester:asserteq(rescuda:dim(), 3, 'error in dimension')
+ local error = rescuda:float() - groundtruth:float()
+ mytester:assertlt(error:abs():max(), precision_forward,
+ 'error on state (forward) ')
+
+ local gconv = cudnn.convert(sconv, cudnn)
+ mytester:asserteq(torch.typename(gconv), 'cudnn.SpatialConvolution', 'conversion type check')
+ local rescuda = gconv:forward(input)
+ cutorch.synchronize()
+ local error = rescuda:float() - groundtruth:float()
+ mytester:assertlt(error:abs():max(), precision_forward, 'error on state (forward conversion) ')
+
+ -- IO
+ local ferr,berr = jac.testIO(gconv, input)
+ mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
+ mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ end
+
+ test(sconv, gconv)
+ local gconv = cudnn.convert(sconv, cudnn)
+ mytester:asserteq(torch.typename(gconv), 'cudnn.SpatialConvolution', 'conversion type check')
+ test(sconv, gconv)
end
@@ -144,30 +186,37 @@ function cudnntest.SpatialConvolution_backward_single()
local gconv = cudnn.SpatialConvolution(from,to,ki,kj,si,sj):cuda()
gconv.weight:copy(sconv.weight)
gconv.bias:copy(sconv.bias)
- gconv:forward(input)
-
- -- serialize and deserialize
- torch.save('modelTemp.t7', gconv)
- gconv = torch.load('modelTemp.t7')
-
- gconv:forward(input)
- gconv:zeroGradParameters()
- local rescuda = gconv:backward(input, gradOutput)
- cutorch.synchronize()
- mytester:asserteq(rescuda:dim(), 3, 'error in dimension')
- local weightcuda = gconv.gradWeight
- local biascuda = gconv.gradBias
-
- local error = rescuda:float() - groundgrad:float()
- local werror = weightcuda:float() - groundweight:float()
- local berror = biascuda:float() - groundbias:float()
-
- mytester:assertlt(error:abs():max(), precision_backward,
- 'error on state (backward) ')
- mytester:assertlt(werror:abs():max(), precision_backward,
- 'error on weight (backward) ')
- mytester:assertlt(berror:abs():max(), precision_backward,
- 'error on bias (backward) ')
+ local function test(sconv, gconv)
+ gconv:forward(input)
+
+ -- serialize and deserialize
+ torch.save('modelTemp.t7', gconv)
+ gconv = torch.load('modelTemp.t7')
+
+ gconv:forward(input)
+ gconv:zeroGradParameters()
+ local rescuda = gconv:backward(input, gradOutput)
+ cutorch.synchronize()
+ mytester:asserteq(rescuda:dim(), 3, 'error in dimension')
+ local weightcuda = gconv.gradWeight
+ local biascuda = gconv.gradBias
+
+ local error = rescuda:float() - groundgrad:float()
+ local werror = weightcuda:float() - groundweight:float()
+ local berror = biascuda:float() - groundbias:float()
+
+ mytester:assertlt(error:abs():max(), precision_backward,
+ 'error on state (backward) ')
+ mytester:assertlt(werror:abs():max(), precision_backward,
+ 'error on weight (backward) ')
+ mytester:assertlt(berror:abs():max(), precision_backward,
+ 'error on bias (backward) ')
+ end
+
+ test(sconv, gconv)
+ local gconv = cudnn.convert(sconv, cudnn)
+ mytester:asserteq(torch.typename(gconv), 'cudnn.SpatialConvolution', 'conversion type check')
+ test(sconv, gconv)
end
function cudnntest.TemporalConvolution_batch()
@@ -335,21 +384,29 @@ function cudnntest.VolumetricConvolution_forward_single()
local ink = (outk-1)*sk+kk
local input = torch.randn(from,ink,inj,ini):cuda()
local sconv = nn.VolumetricConvolution(from,to,kk,ki,kj,sk,si,sj):float()
- local groundtruth = sconv:forward(input:float())
- cutorch.synchronize()
local gconv = cudnn.VolumetricConvolution(from,to,kk,ki,kj,sk,si,sj):cuda()
gconv.weight:copy(sconv.weight)
gconv.bias:copy(sconv.bias)
- local rescuda = gconv:forward(input)
- cutorch.synchronize()
- local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward,
- 'error on state (forward) ')
-
- -- IO
- local ferr,berr = jac.testIO(gconv, input)
- mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
- mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ local function test(sconv, gconv)
+ local groundtruth = sconv:forward(input:float())
+ cutorch.synchronize()
+ local rescuda = gconv:forward(input)
+ cutorch.synchronize()
+ local error = rescuda:float() - groundtruth:float()
+ mytester:assertlt(error:abs():max(), precision_forward,
+ 'error on state (forward) ')
+
+ local gconv = cudnn.convert(sconv, cudnn):cuda()
+ local rescuda = gconv:forward(input)
+ cutorch.synchronize()
+ local error = rescuda:float() - groundtruth:float()
+ mytester:assertlt(error:abs():max(), precision_forward, 'error on state (forward conversion) ')
+
+ -- IO
+ local ferr,berr = jac.testIO(gconv, input)
+ mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
+ mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ end
end
function cudnntest.VolumetricConvolution_backward_single()
@@ -380,33 +437,40 @@ function cudnntest.VolumetricConvolution_backward_single()
local gconv = cudnn.VolumetricConvolution(from,to,kk,ki,kj,sk,si,sj):cuda()
gconv.weight:copy(sconv.weight)
gconv.bias:copy(sconv.bias)
- gconv:forward(input)
- cutorch.synchronize()
-
- -- serialize and deserialize
- torch.save('modelTemp.t7', gconv)
- gconv = torch.load('modelTemp.t7')
-
- gconv:forward(input)
- gconv:zeroGradParameters()
- local rescuda = gconv:backward(input, gradOutput)
- cutorch.synchronize()
-
- mytester:asserteq(rescuda:dim(), 4, 'error in dimension')
- local weightcuda = gconv.gradWeight
- local biascuda = gconv.gradBias
-
- local error = rescuda:float() - groundgrad:float()
- local werror = weightcuda:float() - groundweight:float()
- local berror = biascuda:float() - groundbias:float()
-
- mytester:assertlt(error:abs():max(), precision_backward,
- 'error on state (backward) ')
- mytester:assertlt(werror:abs():max(), precision_backward,
- 'error on weight (backward) ')
- mytester:assertlt(berror:abs():max(), precision_backward,
- 'error on bias (backward) ')
+ local function test(sconv, gconv)
+ gconv:forward(input)
+ cutorch.synchronize()
+
+ -- serialize and deserialize
+ torch.save('modelTemp.t7', gconv)
+ gconv = torch.load('modelTemp.t7')
+
+ gconv:forward(input)
+ gconv:zeroGradParameters()
+ local rescuda = gconv:backward(input, gradOutput)
+ cutorch.synchronize()
+
+ mytester:asserteq(rescuda:dim(), 4, 'error in dimension')
+ local weightcuda = gconv.gradWeight
+ local biascuda = gconv.gradBias
+
+ local error = rescuda:float() - groundgrad:float()
+ local werror = weightcuda:float() - groundweight:float()
+ local berror = biascuda:float() - groundbias:float()
+
+ mytester:assertlt(error:abs():max(), precision_backward,
+ 'error on state (backward) ')
+ mytester:assertlt(werror:abs():max(), precision_backward,
+ 'error on weight (backward) ')
+ mytester:assertlt(berror:abs():max(), precision_backward,
+ 'error on bias (backward) ')
+ end
+
+ test(sconv, gconv)
+ local gconv = cudnn.convert(sconv, cudnn):cuda()
+ mytester:asserteq(torch.typename(gconv), 'cudnn.VolumetricConvolution', 'conversion type check')
+ test(sconv, gconv)
end
function cudnntest.VolumetricMaxPooling_batch()
@@ -428,28 +492,33 @@ function cudnntest.VolumetricMaxPooling_batch()
local gradOutput = torch.randn(bs,from,outk,outj,outi):cuda()
local sconv = nn.VolumetricMaxPooling(kk,ki,kj,sk,si,sj):float()
- local groundtruth = sconv:forward(input:float())
- local groundgrad = sconv:backward(input:float(), gradOutput:float())
- cutorch.synchronize()
local gconv = cudnn.VolumetricMaxPooling(kk,ki,kj,sk,si,sj):cuda()
- local rescuda = gconv:forward(input)
- -- serialize and deserialize
- torch.save('modelTemp.t7', gconv)
- gconv = torch.load('modelTemp.t7')
- local rescuda = gconv:forward(input)
- local resgrad = gconv:backward(input, gradOutput)
- cutorch.synchronize()
- mytester:asserteq(rescuda:dim(), 5, 'error in dimension')
- mytester:asserteq(resgrad:dim(), 5, 'error in dimension')
- local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward, 'error on state (forward) ')
- error = resgrad:float() - groundgrad:float()
- mytester:assertlt(error:abs():max(), precision_backward, 'error on state (backward) ')
+ local function test(sconv, gconv)
+ local groundtruth = sconv:forward(input:float())
+ local groundgrad = sconv:backward(input:float(), gradOutput:float())
+ cutorch.synchronize()
+
+ local rescuda = gconv:forward(input)
+ local resgrad = gconv:backward(input, gradOutput)
+ cutorch.synchronize()
+
+ mytester:asserteq(rescuda:dim(), 5, 'error in dimension')
+ mytester:asserteq(resgrad:dim(), 5, 'error in dimension')
+ local error = rescuda:float() - groundtruth:float()
+ mytester:assertlt(error:abs():max(), precision_forward, 'error on state (forward) ')
+ error = resgrad:float() - groundgrad:float()
+ mytester:assertlt(error:abs():max(), precision_backward, 'error on state (backward) ')
+
+ -- IO
+ local ferr,berr = jac.testIO(gconv, input)
+ mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
+ mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ end
- -- IO
- local ferr,berr = jac.testIO(gconv, input)
- mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
- mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ test(sconv, gconv)
+ local gconv = cudnn.convert(sconv, cudnn):cuda()
+ mytester:asserteq(torch.typename(gconv), 'cudnn.VolumetricMaxPooling', 'conversion type check')
+ test(sconv, gconv)
end
function cudnntest.VolumetricMaxPooling_single()
@@ -470,25 +539,33 @@ function cudnntest.VolumetricMaxPooling_single()
local gradOutput = torch.randn(from,outk,outj,outi):cuda()
local sconv = nn.VolumetricMaxPooling(kk,ki,kj,sk,si,sj):float()
- local groundtruth = sconv:forward(input:float())
- local groundgrad = sconv:backward(input:float(), gradOutput:float())
- cutorch.synchronize()
local gconv = cudnn.VolumetricMaxPooling(kk,ki,kj,sk,si,sj):cuda()
- local _ = gconv:forward(input)
- -- serialize and deserialize
- torch.save('modelTemp.t7', gconv)
- gconv = torch.load('modelTemp.t7')
- local rescuda = gconv:forward(input)
- local resgrad = gconv:backward(input, gradOutput)
- cutorch.synchronize()
- mytester:asserteq(rescuda:dim(), 4, 'error in dimension')
- mytester:asserteq(resgrad:dim(), 4, 'error in dimension')
- local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward,
- 'error on state (forward) ')
- error = resgrad:float() - groundgrad:float()
- mytester:assertlt(error:abs():max(), precision_backward,
- 'error on state (backward) ')
+
+ local function test(sconv, gconv)
+ local groundtruth = sconv:forward(input:float())
+ local groundgrad = sconv:backward(input:float(), gradOutput:float())
+ cutorch.synchronize()
+ local _ = gconv:forward(input)
+ -- serialize and deserialize
+ torch.save('modelTemp.t7', gconv)
+ gconv = torch.load('modelTemp.t7')
+ local rescuda = gconv:forward(input)
+ local resgrad = gconv:backward(input, gradOutput)
+ cutorch.synchronize()
+ mytester:asserteq(rescuda:dim(), 4, 'error in dimension')
+ mytester:asserteq(resgrad:dim(), 4, 'error in dimension')
+ local error = rescuda:float() - groundtruth:float()
+ mytester:assertlt(error:abs():max(), precision_forward,
+ 'error on state (forward) ')
+ error = resgrad:float() - groundgrad:float()
+ mytester:assertlt(error:abs():max(), precision_backward,
+ 'error on state (backward) ')
+ end
+
+ test(sconv, gconv)
+ local gconv = cudnn.convert(sconv, cudnn):cuda()
+ mytester:asserteq(torch.typename(gconv), 'cudnn.VolumetricMaxPooling', 'conversion type check')
+ test(sconv, gconv)
end
function cudnntest.SpatialMaxPooling_batch()
@@ -553,26 +630,34 @@ function cudnntest.SpatialMaxPooling_single()
local sconv = nn.SpatialMaxPooling(ki,kj,si,sj,padi,padj):cuda()
if ceil_mode then sconv:ceil() end
- local groundtruth = sconv:forward(input)
- local groundgrad = sconv:backward(input, gradOutput)
- cutorch.synchronize()
local gconv = cudnn.SpatialMaxPooling(ki,kj,si,sj,padi,padj):cuda()
if ceil_mode then gconv:ceil() end
- local _ = gconv:forward(input)
- -- serialize and deserialize
- torch.save('modelTemp.t7', gconv)
- gconv = torch.load('modelTemp.t7')
- local rescuda = gconv:forward(input)
- local resgrad = gconv:backward(input, gradOutput)
- cutorch.synchronize()
- mytester:asserteq(rescuda:dim(), 3, 'error in dimension')
- mytester:asserteq(resgrad:dim(), 3, 'error in dimension')
- local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward,
- 'error on state (forward) ')
- error = resgrad:float() - groundgrad:float()
- mytester:assertlt(error:abs():max(), precision_backward,
- 'error on state (backward) ')
+
+ local function test(sconv, gconv)
+ local groundtruth = sconv:forward(input)
+ local groundgrad = sconv:backward(input, gradOutput)
+ cutorch.synchronize()
+ local _ = gconv:forward(input)
+ -- serialize and deserialize
+ torch.save('modelTemp.t7', gconv)
+ gconv = torch.load('modelTemp.t7')
+ local rescuda = gconv:forward(input)
+ local resgrad = gconv:backward(input, gradOutput)
+ cutorch.synchronize()
+ mytester:asserteq(rescuda:dim(), 3, 'error in dimension')
+ mytester:asserteq(resgrad:dim(), 3, 'error in dimension')
+ local error = rescuda:float() - groundtruth:float()
+ mytester:assertlt(error:abs():max(), precision_forward,
+ 'error on state (forward) ')
+ error = resgrad:float() - groundgrad:float()
+ mytester:assertlt(error:abs():max(), precision_backward,
+ 'error on state (backward) ')
+ end
+
+ test(sconv, gconv)
+ local gconv = cudnn.convert(sconv, cudnn):cuda()
+ mytester:asserteq(torch.typename(gconv), 'cudnn.SpatialMaxPooling', 'conversion type check')
+ test(sconv, gconv)
end
function cudnntest.SpatialAveragePooling_batch()
@@ -628,25 +713,33 @@ function cudnntest.SpatialAveragePooling_single()
local gradOutput = torch.randn(from,outj,outi):cuda()
local sconv = nn.SpatialAveragePooling(ki,kj,si,sj):cuda()
- local groundtruth = sconv:forward(input):clone()
- local groundgrad = sconv:backward(input, gradOutput)
- cutorch.synchronize()
local gconv = cudnn.SpatialAveragePooling(ki,kj,si,sj):cuda()
- local _ = gconv:forward(input)
- -- serialize and deserialize
- torch.save('modelTemp.t7', gconv)
- gconv = torch.load('modelTemp.t7')
- local rescuda = gconv:forward(input)
- local resgrad = gconv:backward(input, gradOutput)
- cutorch.synchronize()
- mytester:asserteq(rescuda:dim(), 3, 'error in dimension')
- mytester:asserteq(resgrad:dim(), 3, 'error in dimension')
- local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward,
- 'error on state (forward) ')
- error = resgrad:float() - groundgrad:float()
- mytester:assertlt(error:abs():max(), precision_backward,
- 'error on state (backward) ')
+
+ local function test(sconv, gconv)
+ local groundtruth = sconv:forward(input):clone()
+ local groundgrad = sconv:backward(input, gradOutput)
+ cutorch.synchronize()
+ local _ = gconv:forward(input)
+ -- serialize and deserialize
+ torch.save('modelTemp.t7', gconv)
+ gconv = torch.load('modelTemp.t7')
+ local rescuda = gconv:forward(input)
+ local resgrad = gconv:backward(input, gradOutput)
+ cutorch.synchronize()
+ mytester:asserteq(rescuda:dim(), 3, 'error in dimension')
+ mytester:asserteq(resgrad:dim(), 3, 'error in dimension')
+ local error = rescuda:float() - groundtruth:float()
+ mytester:assertlt(error:abs():max(), precision_forward,
+ 'error on state (forward) ')
+ error = resgrad:float() - groundgrad:float()
+ mytester:assertlt(error:abs():max(), precision_backward,
+ 'error on state (backward) ')
+ end
+
+ test(sconv, gconv)
+ local gconv = cudnn.convert(sconv, cudnn):cuda()
+ mytester:asserteq(torch.typename(gconv), 'cudnn.SpatialAveragePooling', 'conversion type check')
+ test(sconv, gconv)
end
local function nonlinSingle(nonlin)
@@ -659,35 +752,46 @@ local function nonlinSingle(nonlin)
local gradOutput = torch.randn(from,outj,outi):cuda()
local sconv = nn[nonlin]():cuda()
- local groundtruth = sconv:forward(input)
- local groundgrad = sconv:backward(input, gradOutput)
- cutorch.synchronize()
- -- 50% prob to choose inplace or out-of-place
- local inplace = false
- if math.random(0,1) == 1 then
- inplace = true
- end
local gconv = cudnn[nonlin](inplace):cuda()
- local input__ = input:clone()
- local _ = gconv:forward(input__)
-
- -- serialize and deserialize
- torch.save('modelTemp.t7', gconv)
- gconv = torch.load('modelTemp.t7')
+ local function test(sconv, gconv)
+ local groundtruth = sconv:forward(input)
+ local groundgrad = sconv:backward(input, gradOutput)
+ cutorch.synchronize()
+ -- 50% prob to choose inplace or out-of-place
+ local inplace = false
+ if math.random(0,1) == 1 then
+ inplace = true
+ end
+ local input__ = input:clone()
+ local _ = gconv:forward(input__)
+
+ -- serialize and deserialize
+ torch.save('modelTemp.t7', gconv)
+ gconv = torch.load('modelTemp.t7')
+
+ local input__ = input:clone()
+ local gradOutput__ = gradOutput:clone()
+ local rescuda = gconv:forward(input__)
+ local resgrad = gconv:backward(input__, gradOutput__)
+ cutorch.synchronize()
+ mytester:asserteq(rescuda:dim(), 3, 'error in dimension')
+ mytester:asserteq(resgrad:dim(), 3, 'error in dimension')
+ local error = rescuda:float() - groundtruth:float()
+ mytester:assertlt(error:abs():max(), precision_forward,
+ 'error on state (forward) ')
+ error = resgrad:float() - groundgrad:float()
+ mytester:assertlt(error:abs():max(), precision_backward,
+ 'error on state (backward) ')
+ -- IO
+ local ferr,berr = jac.testIO(gconv, input)
+ mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
+ mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ end
- local input__ = input:clone()
- local gradOutput__ = gradOutput:clone()
- local rescuda = gconv:forward(input__)
- local resgrad = gconv:backward(input__, gradOutput__)
- cutorch.synchronize()
- mytester:asserteq(rescuda:dim(), 3, 'error in dimension')
- mytester:asserteq(resgrad:dim(), 3, 'error in dimension')
- local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward,
- 'error on state (forward) ')
- error = resgrad:float() - groundgrad:float()
- mytester:assertlt(error:abs():max(), precision_backward,
- 'error on state (backward) ')
+ test(sconv, gconv)
+ local gconv = cudnn.convert(sconv, cudnn)
+ mytester:asserteq(torch.typename(gconv), 'cudnn.'..nonlin, 'conversion type check')
+ test(sconv, gconv)
end
local function nonlinBatch(nonlin)
@@ -701,35 +805,46 @@ local function nonlinBatch(nonlin)
local gradOutput = torch.randn(bs,from,outj,outi):cuda()
local sconv = nn[nonlin]():cuda()
- local groundtruth = sconv:forward(input)
- local groundgrad = sconv:backward(input, gradOutput)
- cutorch.synchronize()
- -- 50% prob to choose inplace or out-of-place
- local inplace = false
- if math.random(0,1) == 1 then
- inplace = true
- end
local gconv = cudnn[nonlin](inplace):cuda()
- local input__ = input:clone()
- local rescuda = gconv:forward(input__)
-
- -- serialize and deserialize
- torch.save('modelTemp.t7', gconv)
- gconv = torch.load('modelTemp.t7')
+ local function test(sconv, gconv)
+ local groundtruth = sconv:forward(input)
+ local groundgrad = sconv:backward(input, gradOutput)
+ cutorch.synchronize()
+ -- 50% prob to choose inplace or out-of-place
+ local inplace = false
+ if math.random(0,1) == 1 then
+ inplace = true
+ end
+ local input__ = input:clone()
+ local rescuda = gconv:forward(input__)
+
+ -- serialize and deserialize
+ torch.save('modelTemp.t7', gconv)
+ gconv = torch.load('modelTemp.t7')
+
+ local input__ = input:clone()
+ local gradOutput__ = gradOutput:clone()
+ local rescuda = gconv:forward(input__)
+ local resgrad = gconv:backward(input__, gradOutput__)
+ cutorch.synchronize()
+ mytester:asserteq(rescuda:dim(), 4, 'error in dimension')
+ mytester:asserteq(resgrad:dim(), 4, 'error in dimension')
+ local error = rescuda:float() - groundtruth:float()
+ mytester:assertlt(error:abs():max(), precision_forward,
+ 'error on state (forward) ')
+ error = resgrad:float() - groundgrad:float()
+ mytester:assertlt(error:abs():max(), precision_backward,
+ 'error on state (backward) ')
+ -- IO
+ local ferr,berr = jac.testIO(gconv, input)
+ mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
+ mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ end
- local input__ = input:clone()
- local gradOutput__ = gradOutput:clone()
- local rescuda = gconv:forward(input__)
- local resgrad = gconv:backward(input__, gradOutput__)
- cutorch.synchronize()
- mytester:asserteq(rescuda:dim(), 4, 'error in dimension')
- mytester:asserteq(resgrad:dim(), 4, 'error in dimension')
- local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward,
- 'error on state (forward) ')
- error = resgrad:float() - groundgrad:float()
- mytester:assertlt(error:abs():max(), precision_backward,
- 'error on state (backward) ')
+ test(sconv, gconv)
+ local gconv = cudnn.convert(sconv, cudnn)
+ mytester:asserteq(torch.typename(gconv), 'cudnn.'..nonlin, 'conversion type check')
+ test(sconv, gconv)
end
function cudnntest.ReLU_single()
@@ -774,24 +889,31 @@ function cudnntest.SpatialCrossMapLRN_batch()
local sconv = nn.SpatialCrossMapLRN(size, alpha, beta, k):cuda()
local gconv = cudnn.SpatialCrossMapLRN(size, alpha, beta, k):cuda()
- local groundtruth = sconv:forward(input):clone()
- local groundgrad = sconv:backward(input, gradOutput)
- cutorch.synchronize()
- gconv:forward(input)
- -- serialize and deserialize
- torch.save('modelTemp.t7', gconv)
- gconv = torch.load('modelTemp.t7')
- local rescuda = gconv:forward(input)
- local resgrad = gconv:backward(input, gradOutput)
- cutorch.synchronize()
- mytester:asserteq(rescuda:dim(), 4, 'error in dimension')
- mytester:asserteq(resgrad:dim(), 4, 'error in dimension')
- local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward,
- 'error on state (forward) ')
- error = resgrad:float() - groundgrad:float()
- mytester:assertlt(error:abs():max(), precision_backward,
- 'error on state (backward) ')
+ local function test(sconv, gconv)
+ local groundtruth = sconv:forward(input):clone()
+ local groundgrad = sconv:backward(input, gradOutput)
+ cutorch.synchronize()
+ gconv:forward(input)
+ -- serialize and deserialize
+ torch.save('modelTemp.t7', gconv)
+ gconv = torch.load('modelTemp.t7')
+ local rescuda = gconv:forward(input)
+ local resgrad = gconv:backward(input, gradOutput)
+ cutorch.synchronize()
+ mytester:asserteq(rescuda:dim(), 4, 'error in dimension')
+ mytester:asserteq(resgrad:dim(), 4, 'error in dimension')
+ local error = rescuda:float() - groundtruth:float()
+ mytester:assertlt(error:abs():max(), precision_forward,
+ 'error on state (forward) ')
+ error = resgrad:float() - groundgrad:float()
+ mytester:assertlt(error:abs():max(), precision_backward,
+ 'error on state (backward) ')
+ end
+
+ test(sconv, gconv)
+ local gconv = cudnn.convert(sconv, cudnn)
+ mytester:asserteq(torch.typename(gconv), 'cudnn.SpatialCrossMapLRN', 'conversion type check')
+ test(sconv, gconv)
end
@@ -895,27 +1017,39 @@ function cudnntest.LogSoftMax_single()
local gradOutput = torch.randn(sz):cuda()
local sconv = nn.LogSoftMax():cuda()
- local groundtruth = sconv:forward(input)
- local groundgrad = sconv:backward(input, gradOutput)
- cutorch.synchronize()
local gconv = cudnn.LogSoftMax():cuda()
- local _ = gconv:forward(input)
- -- serialize and deserialize
- torch.save('modelTemp.t7', gconv)
- gconv = torch.load('modelTemp.t7')
+ local function test(sconv, gconv)
+ local groundtruth = sconv:forward(input)
+ local groundgrad = sconv:backward(input, gradOutput)
+ cutorch.synchronize()
+ local _ = gconv:forward(input)
- local rescuda = gconv:forward(input)
- local resgrad = gconv:backward(input, gradOutput)
- cutorch.synchronize()
- local error = rescuda:float() - groundtruth:float()
- local errmax = error:abs():max()
- mytester:assertlt(errmax, precision_forward,
- 'error on state (forward) ')
- error = resgrad:float() - groundgrad:float()
- errmax = error:abs():max()
- mytester:assertlt(errmax, precision_backward,
- 'error on state (backward) ')
+ -- serialize and deserialize
+ torch.save('modelTemp.t7', gconv)
+ gconv = torch.load('modelTemp.t7')
+
+ local rescuda = gconv:forward(input)
+ local resgrad = gconv:backward(input, gradOutput)
+ cutorch.synchronize()
+ local error = rescuda:float() - groundtruth:float()
+ local errmax = error:abs():max()
+ mytester:assertlt(errmax, precision_forward,
+ 'error on state (forward) ')
+ error = resgrad:float() - groundgrad:float()
+ errmax = error:abs():max()
+ mytester:assertlt(errmax, precision_backward,
+ 'error on state (backward) ')
+ -- IO
+ local ferr,berr = jac.testIO(gconv, input)
+ mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
+ mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ end
+
+ test(sconv, gconv)
+ local gconv = cudnn.convert(sconv, cudnn)
+ mytester:asserteq(torch.typename(gconv), 'cudnn.LogSoftMax', 'conversion type check')
+ test(sconv, gconv)
end
function cudnntest.LogSoftMax_batch()
@@ -925,33 +1059,40 @@ function cudnntest.LogSoftMax_batch()
local gradOutput = torch.randn(bs,from):cuda()
local sconv = nn.LogSoftMax():cuda()
- local groundtruth = sconv:forward(input)
- local groundgrad = sconv:backward(input, gradOutput)
- cutorch.synchronize()
local gconv = cudnn.LogSoftMax():cuda()
- local rescuda = gconv:forward(input)
-
- -- serialize and deserialize
- torch.save('modelTemp.t7', gconv)
- gconv = torch.load('modelTemp.t7')
+ local function test(sconv, gconv)
+ local groundtruth = sconv:forward(input)
+ local groundgrad = sconv:backward(input, gradOutput)
+ cutorch.synchronize()
+ local rescuda = gconv:forward(input)
+
+ -- serialize and deserialize
+ torch.save('modelTemp.t7', gconv)
+ gconv = torch.load('modelTemp.t7')
+
+ local rescuda = gconv:forward(input)
+ local resgrad = gconv:backward(input, gradOutput)
+ cutorch.synchronize()
+ mytester:asserteq(rescuda:dim(), 2, 'error in dimension')
+ mytester:asserteq(resgrad:dim(), 2, 'error in dimension')
- local rescuda = gconv:forward(input)
- local resgrad = gconv:backward(input, gradOutput)
- cutorch.synchronize()
- mytester:asserteq(rescuda:dim(), 2, 'error in dimension')
- mytester:asserteq(resgrad:dim(), 2, 'error in dimension')
+ local error = rescuda:float() - groundtruth:float()
+ mytester:assertlt(error:abs():max(),
+ precision_forward, 'error on state (forward) ')
+ error = resgrad:float() - groundgrad:float()
+ mytester:assertlt(error:abs():max(),
+ precision_backward, 'error on state (backward) ')
- local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(),
- precision_forward, 'error on state (forward) ')
- error = resgrad:float() - groundgrad:float()
- mytester:assertlt(error:abs():max(),
- precision_backward, 'error on state (backward) ')
+ -- IO
+ local ferr,berr = jac.testIO(gconv, input)
+ mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
+ mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ end
- -- IO
- local ferr,berr = jac.testIO(gconv, input)
- mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
- mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ test(sconv, gconv)
+ local gconv = cudnn.convert(sconv, cudnn)
+ mytester:asserteq(torch.typename(gconv), 'cudnn.LogSoftMax', 'conversion type check')
+ test(sconv, gconv)
end
function cudnntest.SpatialLogSoftMax()