Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/soumith/cudnn.torch.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorChristopher D. Twigg <cdtwigg@fb.com>2016-03-16 21:05:21 +0300
committerChristopher D. Twigg <cdtwigg@fb.com>2016-03-16 21:21:56 +0300
commit0574da115a132e3dfb1bc905e01967d203eeb266 (patch)
tree8282bb4891f7792b94d70d2836e30c6b55360aa1 /test
parentb80bdbab6faf66b711bf7c8a159703f62508c5e2 (diff)
Adding support for SpatialFullConvolution.
Since SpatialFullConvolution is just the transpose of the regular convolution operator, we can use CUDNN by swapping the forward and backward passes. This can be a substantial speed improvement over cunn.SpatialFullConvolution, which works by explicitly building the full matrix for a GEMM operation.
Diffstat (limited to 'test')
-rw-r--r--test/test_spatialFullConv.lua95
1 files changed, 95 insertions, 0 deletions
diff --git a/test/test_spatialFullConv.lua b/test/test_spatialFullConv.lua
new file mode 100644
index 0000000..3659864
--- /dev/null
+++ b/test/test_spatialFullConv.lua
@@ -0,0 +1,95 @@
+require 'cudnn'
+require 'cunn'
+
+local cudnntest = {}
+local precision_forward = 1e-4
+local precision_backward = 1e-2
+local precision_jac = 1e-3
+local nloop = 1
+local times = {}
+local mytester
+
+
+local function testSpatialFullConv (imageWidth, imageHeight, nPlanesIn, nPlanesOut, kW, kH, dW, dH, padW, padH, adjW, adjH)
+
+ print ("Running testSpatialFullConv (" ..
+ "imageWidth = " .. imageWidth .. ", " ..
+ "imageHeight = " .. imageHeight .. ", " ..
+ "nPlanesIn = " .. nPlanesIn .. ", " ..
+ "nPlanesOut = " .. nPlanesOut .. ", " ..
+ "kW = " .. kW .. ", " ..
+ "kH = " .. kH .. ", " ..
+ "dW = " .. dW .. ", " ..
+ "dH = " .. dH .. ", " ..
+ "padW = " .. padW .. ", " ..
+ "padH = " .. padH .. ", " ..
+ "adjW = " .. adjW .. ", " ..
+ "adjH = " .. adjH)
+
+ local layerInput = torch.randn(1, nPlanesIn, imageHeight, imageWidth):cuda()
+
+ local modelGT = nn.SpatialFullConvolution (nPlanesIn, nPlanesOut, kW, kH, dW, dH, padW, padH, adjW, adjH)
+ local modelCUDNN = cudnn.SpatialFullConvolution (nPlanesIn, nPlanesOut, kW, kH, dW, dH, padW, padH, adjW, adjH)
+ modelCUDNN.weight:copy (modelGT.weight)
+ modelCUDNN.bias:copy (modelGT.bias)
+
+ modelGT:cuda()
+ modelCUDNN:cuda()
+
+ local outputGT = modelGT:forward (layerInput)
+ local outputCUDNN = modelCUDNN:forward (layerInput)
+
+ local errorOutput = outputCUDNN:float() - outputGT:float()
+ mytester:assertlt(errorOutput:abs():max(), precision_forward, 'error on state (forward) ')
+
+ -- Now check the backwards diffs
+ local crit = nn.MSECriterion()
+ crit:cuda()
+ local target = outputGT:clone()
+ target:zero()
+ target:cuda()
+
+ local f = crit:forward (outputGT, target)
+ local df_do = crit:backward (outputGT, target)
+
+ local gradCUDNN = modelCUDNN:updateGradInput (layerInput, df_do)
+ local gradGT = modelGT:updateGradInput (layerInput, df_do)
+ local errorGradInput = gradCUDNN:float() - gradGT:float()
+ mytester:assertlt(errorGradInput:abs():max(), precision_backward, 'error on grad input (backward) ')
+
+ modelCUDNN:zeroGradParameters()
+ modelCUDNN:accGradParameters (layerInput, df_do, 1.0)
+ modelGT:zeroGradParameters()
+ modelGT:accGradParameters (layerInput, df_do:cuda(), 1.0)
+
+ local errorGradBias = (modelCUDNN.gradBias - modelGT.gradBias)
+ mytester:assertlt(errorGradBias:abs():max(), precision_backward, 'error on grad bias (backward) ')
+
+ local errorGradWeight = (modelCUDNN.gradWeight - modelGT.gradWeight)
+ mytester:assertlt(errorGradWeight:abs():max(), precision_backward, 'error on grad weight (backward) ')
+end
+
+function cudnntest.SpatialConvolution_params()
+ -- Test with a wide variety of different parameter values:
+ testSpatialFullConv (5, 5, 1, 1, 3, 3, 2, 2, 0, 0, 0, 0)
+ testSpatialFullConv (5, 5, 1, 1, 3, 3, 2, 2, 1, 1, 0, 0)
+ testSpatialFullConv (5, 7, 1, 1, 3, 1, 2, 2, 1, 1, 0, 0)
+ testSpatialFullConv (7, 5, 1, 1, 3, 1, 1, 1, 1, 1, 0, 0)
+ testSpatialFullConv (8, 5, 3, 1, 3, 3, 2, 2, 1, 1, 0, 0)
+ testSpatialFullConv (5, 5, 1, 3, 3, 3, 2, 2, 1, 1, 0, 0)
+ testSpatialFullConv (5, 5, 5, 3, 3, 3, 2, 2, 1, 1, 1, 1)
+ testSpatialFullConv (9, 9, 3, 3, 3, 5, 2, 3, 0, 1, 1, 0)
+end
+
+torch.setdefaulttensortype('torch.FloatTensor')
+math.randomseed(os.time())
+mytester = torch.Tester()
+mytester:add(cudnntest)
+
+for i=1,cutorch.getDeviceCount() do
+ print('Running test on device: ' .. i)
+ cutorch.setDevice(i)
+ mytester:run(tests)
+end
+
+os.execute('rm -f modelTemp.t7')