Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/soumith/cudnn.torch.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSoumith Chintala <soumith@gmail.com>2016-07-29 08:49:09 +0300
committerGitHub <noreply@github.com>2016-07-29 08:49:09 +0300
commit2f820e5c7a420c9b74b10e32433e7eb2c13e55f6 (patch)
tree661e5477882749291fe1343a39ebfb513514c3a4
parentb8349eb2133e36d43b8b69588eee4890eaad0281 (diff)
parentc5bf237f79ebe7f77ea295197e3804a2937774e2 (diff)
Merge pull request #206 from szagoruyko/fp16
half and double with tests
-rw-r--r--BatchNormalization.lua16
-rw-r--r--SpatialConvolution.lua34
-rw-r--r--SpatialFullConvolution.lua10
-rw-r--r--TemporalConvolution.lua4
-rw-r--r--VolumetricConvolution.lua10
-rw-r--r--functional.lua12
-rw-r--r--init.lua36
-rw-r--r--test/test.lua529
8 files changed, 373 insertions, 278 deletions
diff --git a/BatchNormalization.lua b/BatchNormalization.lua
index 83597d3..77db075 100644
--- a/BatchNormalization.lua
+++ b/BatchNormalization.lua
@@ -42,8 +42,8 @@ end
function BatchNormalization:createIODescriptors(input)
assert(input:dim() == self.nDim)
- assert(torch.typename(self.weight) == 'torch.CudaTensor' and torch.typename(self.bias) == 'torch.CudaTensor',
- 'Only CUDA tensors are supported for cudnn.BatchNormalization!')
+ assert(cudnn.typemap[torch.typename(self.weight)], 'Only Cuda supported duh!')
+ assert(cudnn.typemap[torch.typename(self.bias)] or not self.bias, 'Only Cuda supported duh!')
if not self.iDesc or not self.oDesc or not input:isSize(self.iSize) then
local nFeature = self.running_mean:numel()
self.iSize = input:size()
@@ -63,9 +63,9 @@ local scaleTens = torch.FloatTensor(1);
function BatchNormalization:updateOutput(input)
self:createIODescriptors(input)
- self.save_mean = self.save_mean or input.new()
+ self.save_mean = self.save_mean or self.running_mean.new()
self.save_mean:resizeAs(self.running_mean)
- self.save_std = self.save_std or input.new()
+ self.save_std = self.save_std or self.running_mean.new()
self.save_std:resizeAs(self.running_var)
if self.train then
@@ -137,6 +137,14 @@ function BatchNormalization:write(f)
f:writeObject(var)
end
+function BatchNormalization:type(type, tensorCache)
+ local _type = type == 'torch.CudaHalfTensor' and 'torch.CudaTensor' or type
+ parent.type(self, _type, tensorCache)
+ self.output = self.output:type(type)
+ self.gradInput = self.gradInput:type(type)
+ return self
+end
+
function BatchNormalization:clearState()
self:clearDesc()
nn.utils.clear(self, 'save_mean', 'save_std')
diff --git a/SpatialConvolution.lua b/SpatialConvolution.lua
index 1fd3ea0..58c78b2 100644
--- a/SpatialConvolution.lua
+++ b/SpatialConvolution.lua
@@ -30,10 +30,8 @@ end
-- if you change the configuration of the module manually, call this
function SpatialConvolution:resetWeightDescriptors()
- assert(torch.typename(self.weight) == 'torch.CudaTensor',
- 'Only Cuda supported duh!')
- assert(torch.typename(self.bias) == 'torch.CudaTensor' or not self.bias,
- 'Only Cuda supported duh!')
+ assert(cudnn.typemap[torch.typename(self.weight)], 'Only Cuda supported duh!')
+ assert(cudnn.typemap[torch.typename(self.bias)] or not self.bias, 'Only Cuda supported duh!')
-- for compatibility
self.groups = self.groups or 1
-- create filterDescriptor for weight
@@ -43,7 +41,7 @@ function SpatialConvolution:resetWeightDescriptors()
self.nInputPlane/self.groups,
self.kH, self.kW})
errcheck('cudnnSetFilterNdDescriptor', self.weightDesc[0],
- 'CUDNN_DATA_FLOAT', 'CUDNN_TENSOR_NCHW', 4,
+ cudnn.typemap[torch.typename(self.weight)], 'CUDNN_TENSOR_NCHW', 4,
desc:data());
local function destroyWDesc(d)
errcheck('cudnnDestroyFilterDescriptor', d[0]);
@@ -112,8 +110,8 @@ function SpatialConvolution:createIODescriptors(input)
' x ' .. input:size(3) .. ' x ' .. input:size(4))
-- create input descriptor
- local input_slice = {{},{1,self.nInputPlane/self.groups},{},{}}
- self.iDesc = cudnn.toDescriptor(input[input_slice])
+ local input_slice = input:narrow(2,1,self.nInputPlane/self.groups)
+ self.iDesc = cudnn.toDescriptor(input_slice)
-- create conv descriptor
self.convDesc = ffi.new('struct cudnnConvolutionStruct*[1]')
@@ -125,7 +123,7 @@ function SpatialConvolution:createIODescriptors(input)
errcheck('cudnnSetConvolutionNdDescriptor', self.convDesc[0],
2, pad:data(),
stride:data(), upscale:data(), 'CUDNN_CROSS_CORRELATION',
- 'CUDNN_DATA_FLOAT');
+ cudnn.configmap(torch.type(self.weight)));
local function destroyConvDesc(d)
errcheck('cudnnDestroyConvolutionDescriptor', d[0]);
end
@@ -141,8 +139,8 @@ function SpatialConvolution:createIODescriptors(input)
self.output:resize(oSize:long():storage())
-- create descriptor for output
- local output_slice = {{},{1,self.nOutputPlane/self.groups},{},{}}
- self.oDesc = cudnn.toDescriptor(self.output[output_slice])
+ local output_slice = self.output:narrow(2,1,self.nOutputPlane/self.groups)
+ self.oDesc = cudnn.toDescriptor(output_slice)
self.oDescForBias = cudnn.toDescriptor(self.output)
-----------------------------------------------------------------------
@@ -158,8 +156,8 @@ function SpatialConvolution:createIODescriptors(input)
return str
end
local autotunerHash = shape(self.weight) .. ';'
- .. shape(input[input_slice]) .. ';'
- .. shape(self.output[output_slice])
+ .. shape(input_slice) .. ';'
+ .. shape(output_slice)
local maxBufSize = 0
@@ -195,8 +193,8 @@ function SpatialConvolution:createIODescriptors(input)
.. " Weight: %15s Input: %15s Output: %15s",
perfResults[0].time, tonumber(perfResults[0].memory),
tonumber(perfResults[0].algo),
- shape(self.weight), shape(input[input_slice]),
- shape(self.output[output_slice])))
+ shape(self.weight), shape(input_slice),
+ shape(output_slice)))
end
end
else
@@ -247,8 +245,8 @@ function SpatialConvolution:createIODescriptors(input)
.. " Weight: %15s Input: %15s Output: %15s",
perfResults[0].time, tonumber(perfResults[0].memory),
tonumber(perfResults[0].algo),
- shape(self.weight), shape(input[input_slice]),
- shape(self.output[output_slice])))
+ shape(self.weight), shape(input_slice),
+ shape(output_slice)))
end
end
else
@@ -298,8 +296,8 @@ function SpatialConvolution:createIODescriptors(input)
.. " Weight: %15s Input: %15s Output: %15s\n",
perfResults[0].time, tonumber(perfResults[0].memory),
tonumber(perfResults[0].algo),
- shape(self.weight), shape(input[input_slice]),
- shape(self.output[output_slice])))
+ shape(self.weight), shape(input_slice),
+ shape(output_slice)))
end
end
else
diff --git a/SpatialFullConvolution.lua b/SpatialFullConvolution.lua
index 4aa3c11..ff91a13 100644
--- a/SpatialFullConvolution.lua
+++ b/SpatialFullConvolution.lua
@@ -10,10 +10,8 @@ autotunerCache[3] = {} -- backwardData
-- if you change the configuration of the module manually, call this
function SpatialFullConvolution:resetWeightDescriptors()
- assert(torch.typename(self.weight) == 'torch.CudaTensor',
- 'Only Cuda supported duh!')
- assert(torch.typename(self.bias) == 'torch.CudaTensor' or not self.bias,
- 'Only Cuda supported duh!')
+ assert(cudnn.typemap[torch.typename(self.weight)], 'Only Cuda supported duh!')
+ assert(cudnn.typemap[torch.typename(self.bias)] or not self.bias, 'Only Cuda supported duh!')
-- create filterDescriptor for weight
self.weightDesc = ffi.new('struct cudnnFilterStruct*[1]')
errcheck('cudnnCreateFilterDescriptor', self.weightDesc)
@@ -21,7 +19,7 @@ function SpatialFullConvolution:resetWeightDescriptors()
self.nOutputPlane,
self.kH, self.kW})
errcheck('cudnnSetFilterNdDescriptor', self.weightDesc[0],
- 'CUDNN_DATA_FLOAT', 'CUDNN_TENSOR_NCHW', 4,
+ cudnn.typemap[torch.typename(self.weight)], 'CUDNN_TENSOR_NCHW', 4,
desc:data());
local function destroyWDesc(d)
errcheck('cudnnDestroyFilterDescriptor', d[0]);
@@ -102,7 +100,7 @@ function SpatialFullConvolution:createIODescriptors(input)
errcheck('cudnnSetConvolutionNdDescriptor', self.convDesc[0],
2, pad:data(),
stride:data(), upscale:data(), 'CUDNN_CROSS_CORRELATION',
- 'CUDNN_DATA_FLOAT');
+ cudnn.configmap(torch.type(self.weight)));
local function destroyConvDesc(d)
errcheck('cudnnDestroyConvolutionDescriptor', d[0]);
end
diff --git a/TemporalConvolution.lua b/TemporalConvolution.lua
index 36a55b7..cc9e079 100644
--- a/TemporalConvolution.lua
+++ b/TemporalConvolution.lua
@@ -54,8 +54,8 @@ end
function TemporalConvolution:updateOutput(input)
local _input = inputview(input)
assert(_input:size(4) == self.inputFrameSize,'invalid input frame size')
- self.buffer = self.buffer or torch.CudaTensor()
- self._output = self._output or torch.CudaTensor()
+ self.buffer = self.buffer or input.new()
+ self._output = self._output or input.new()
if self.output:storage() then self._output:set(self.output:storage()) else self._output = self.output end
if self.buffer:storage() then self.output:set(self.buffer:storage(), 1, self.output:size()) else self.output = self.buffer end
cudnn.SpatialConvolution.updateOutput(self,_input)
diff --git a/VolumetricConvolution.lua b/VolumetricConvolution.lua
index fd5e9c7..b255467 100644
--- a/VolumetricConvolution.lua
+++ b/VolumetricConvolution.lua
@@ -10,17 +10,15 @@ autotunerCache[3] = {} -- backwardData
-- if you change the configuration of the module manually, call this
function VolumetricConvolution:resetWeightDescriptors()
- assert(torch.typename(self.weight) == 'torch.CudaTensor',
- 'Only Cuda supported duh!')
- assert(torch.typename(self.bias) == 'torch.CudaTensor',
- 'Only Cuda supported duh!')
+ assert(cudnn.typemap[torch.typename(self.weight)], 'Only Cuda supported duh!')
+ assert(cudnn.typemap[torch.typename(self.bias)] or not self.bias, 'Only Cuda supported duh!')
-- create filterDescriptor for weight
self.weightDesc = ffi.new('struct cudnnFilterStruct*[1]')
errcheck('cudnnCreateFilterDescriptor', self.weightDesc)
local desc = torch.IntTensor({self.nOutputPlane, self.nInputPlane,
self.kT, self.kH, self.kW})
errcheck('cudnnSetFilterNdDescriptor', self.weightDesc[0],
- 'CUDNN_DATA_FLOAT', 'CUDNN_TENSOR_NCHW', 5,
+ cudnn.typemap[torch.typename(self.weight)], 'CUDNN_TENSOR_NCHW', 5,
desc:data());
local function destroyWDesc(d)
errcheck('cudnnDestroyFilterDescriptor', d[0]);
@@ -87,7 +85,7 @@ function VolumetricConvolution:createIODescriptors(input)
errcheck('cudnnSetConvolutionNdDescriptor', self.convDesc[0],
3, pad:data(),
stride:data(), upscale:data(), 'CUDNN_CROSS_CORRELATION',
- 'CUDNN_DATA_FLOAT');
+ cudnn.configmap(torch.type(self.weight)));
local function destroyConvDesc(d)
errcheck('cudnnDestroyConvolutionDescriptor', d[0]);
end
diff --git a/functional.lua b/functional.lua
index 4564fb7..cea9df9 100644
--- a/functional.lua
+++ b/functional.lua
@@ -60,7 +60,7 @@ cudnn.functional.Convolution2D_updateOutput = function(handle, input, weight, ou
local nOutputPlane, nInputPlane, kH, kW
= weight:size(1), weight:size(2), weight:size(3), weight:size(4)
local desc = torch.IntTensor({nOutputPlane, nInputPlane, kH, kW})
- errcheck('cudnnSetFilterNdDescriptor', weightDesc[0], 'CUDNN_DATA_FLOAT', 'CUDNN_TENSOR_NCHW', 4,
+ errcheck('cudnnSetFilterNdDescriptor', weightDesc[0], cudnn.typemap[torch.type(input)], 'CUDNN_TENSOR_NCHW', 4,
desc:data());
local function destroyWDesc(d)
errcheck('cudnnDestroyFilterDescriptor', d[0]);
@@ -76,7 +76,7 @@ cudnn.functional.Convolution2D_updateOutput = function(handle, input, weight, ou
errcheck('cudnnSetConvolutionNdDescriptor', convDesc[0],
2, pad:data(),
stride:data(), upscale:data(), 'CUDNN_CROSS_CORRELATION',
- 'CUDNN_DATA_FLOAT');
+ cudnn.configmap(torch.type(weight)));
local function destroyConvDesc(d)
errcheck('cudnnDestroyConvolutionDescriptor', d[0]);
end
@@ -139,7 +139,7 @@ cudnn.functional.Convolution2D_updateGradInput = function(handle, input, weight,
local nOutputPlane, nInputPlane, kH, kW
= weight:size(1), weight:size(2), weight:size(3), weight:size(4)
local desc = torch.IntTensor({nOutputPlane, nInputPlane, kH, kW})
- errcheck('cudnnSetFilterNdDescriptor', weightDesc[0], 'CUDNN_DATA_FLOAT', 'CUDNN_TENSOR_NCHW', 4,
+ errcheck('cudnnSetFilterNdDescriptor', weightDesc[0], cudnn.typemap[torch.type(input)], 'CUDNN_TENSOR_NCHW', 4,
desc:data());
local function destroyWDesc(d)
errcheck('cudnnDestroyFilterDescriptor', d[0]);
@@ -155,7 +155,7 @@ cudnn.functional.Convolution2D_updateGradInput = function(handle, input, weight,
errcheck('cudnnSetConvolutionNdDescriptor', convDesc[0],
2, pad:data(),
stride:data(), upscale:data(), 'CUDNN_CROSS_CORRELATION',
- 'CUDNN_DATA_FLOAT');
+ cudnn.configmap(torch.type(weight)));
local function destroyConvDesc(d)
errcheck('cudnnDestroyConvolutionDescriptor', d[0]);
end
@@ -204,7 +204,7 @@ cudnn.functional.Convolution2D_accGradParameters = function(handle, input, gradW
local nOutputPlane, nInputPlane, kH, kW
= gradWeight:size(1), gradWeight:size(2), gradWeight:size(3), gradWeight:size(4)
local desc = torch.IntTensor({nOutputPlane, nInputPlane, kH, kW})
- errcheck('cudnnSetFilterNdDescriptor', weightDesc[0], 'CUDNN_DATA_FLOAT', 'CUDNN_TENSOR_NCHW', 4,
+ errcheck('cudnnSetFilterNdDescriptor', weightDesc[0], cudnn.typemap[torch.type(input)], 'CUDNN_TENSOR_NCHW', 4,
desc:data());
local function destroyWDesc(d)
errcheck('cudnnDestroyFilterDescriptor', d[0]);
@@ -220,7 +220,7 @@ cudnn.functional.Convolution2D_accGradParameters = function(handle, input, gradW
errcheck('cudnnSetConvolutionNdDescriptor', convDesc[0],
2, pad:data(),
stride:data(), upscale:data(), 'CUDNN_CROSS_CORRELATION',
- 'CUDNN_DATA_FLOAT');
+ cudnn.configmap(torch.type(gradWeight)));
local function destroyConvDesc(d)
errcheck('cudnnDestroyConvolutionDescriptor', d[0]);
end
diff --git a/init.lua b/init.lua
index f8f5db0..4b1c6e5 100644
--- a/init.lua
+++ b/init.lua
@@ -30,6 +30,36 @@ local function destroy(handle)
end
ffi.gc(cudnn.handle, destroy)
+cudnn.typemap = {
+ ['torch.CudaHalfTensor'] = 'CUDNN_DATA_HALF',
+ ['torch.CudaTensor'] = 'CUDNN_DATA_FLOAT',
+ ['torch.CudaDoubleTensor'] = 'CUDNN_DATA_DOUBLE',
+}
+
+-- TODO: determine if device supports true half and use true half on it
+-- so far use float for half and float, double for double
+local function determineHalfCapability(dev)
+ local prop = cutorch.getDeviceProperties(dev)
+ if prop.major >= 6 or prop.name:find'X1' then
+ return 'CUDNN_DATA_HALF'
+ else
+ return 'CUDNN_DATA_FLOAT'
+ end
+end
+
+local configmaps = {}
+for i=1,cutorch.getDeviceCount() do
+ configmaps[i] = {
+ ['torch.CudaHalfTensor'] = determineHalfCapability(i),
+ ['torch.CudaTensor'] = 'CUDNN_DATA_FLOAT',
+ ['torch.CudaDoubleTensor'] = 'CUDNN_DATA_DOUBLE',
+ }
+end
+
+cudnn.configmap = function(tensortype)
+ return configmaps[cutorch.getDevice()][tensortype]
+end
+
function cudnn.getHandle()
local device = cutorch.getDevice()
local stream = cutorch.getStream() -- starts from 0
@@ -61,7 +91,8 @@ end
cudnn.errcheck = errcheck
function cudnn.toDescriptor(t)
- assert(torch.typename(t) == 'torch.CudaTensor')
+ local typename = torch.typename(t)
+ assert(cudnn.typemap[typename])
local descriptor = ffi.new('struct cudnnTensorStruct*[1]')
-- create descriptor
errcheck('cudnnCreateTensorDescriptor', descriptor)
@@ -79,7 +110,8 @@ function cudnn.toDescriptor(t)
-- set descriptor
local size = torch.LongTensor(t:size()):int()
local stride = torch.LongTensor(t:stride()):int()
- errcheck('cudnnSetTensorNdDescriptor', descriptor[0], 'CUDNN_DATA_FLOAT',
+
+ errcheck('cudnnSetTensorNdDescriptor', descriptor[0], cudnn.typemap[typename],
t:dim(), size:data(), stride:data())
return descriptor
end
diff --git a/test/test.lua b/test/test.lua
index 47305ae..c612771 100644
--- a/test/test.lua
+++ b/test/test.lua
@@ -2,15 +2,60 @@ require 'cudnn'
require 'cunn'
local cudnntest = torch.TestSuite()
-local precision_forward = 1e-4
-local precision_backward = 1e-2
-local precision_jac = 1e-3
-local precision_io = 1e-5
local nloop = 1
local times = {}
local mytester
local jac = nn.Jacobian
+
+local testparams_half = {
+ test_type = 'torch.CudaHalfTensor',
+ precision_forward = 2e-1,
+ precision_backward = 3,
+ precision_jac = 1e-3,
+ precision_io = 1e-1,
+}
+
+local testparams_float = {
+ test_type = 'torch.CudaTensor',
+ precision_forward = 1e-4,
+ precision_backward = 1e-2,
+ precision_jac = 1e-3,
+ precision_io = 1e-5,
+}
+
+-- TODO: find out why the errors are so huge
+local testparams_double = {
+ test_type = 'torch.CudaDoubleTensor',
+ precision_forward = 1e+4,
+ precision_backward = 1e+4,
+ precision_jac = 1e-3,
+ precision_io = 1e-5,
+}
+
+local testparams = testparams_half
+
+local function cast(input)
+ return input:type(testparams.test_type)
+end
+
+-- workarounds
+function torch.CudaHalfTensor:abs()
+ return self:cuda():abs():cudaHalf()
+end
+
+function torch.CudaDoubleTensor:abs()
+ return self:cuda():abs():cudaDouble()
+end
+
+function torch.CudaHalfTensor:mean()
+ return self:cuda():mean()
+end
+
+function torch.CudaDoubleTensor:mean()
+ return self:cuda():mean()
+end
+
function cudnntest.SpatialConvolution_forward_batch()
local bs = math.random(1,32)
local from = math.random(1,32)
@@ -26,26 +71,26 @@ function cudnntest.SpatialConvolution_forward_batch()
local input = torch.randn(bs,from,inj,ini):cuda()
local sconv = nn.SpatialConvolution(from,to,ki,kj,si,sj):cuda()
- local gconv = cudnn.SpatialConvolution(from,to,ki,kj,si,sj):cuda():fastest()
+ local gconv = cast(cudnn.SpatialConvolution(from,to,ki,kj,si,sj)):fastest()
gconv.weight:copy(sconv.weight)
gconv.bias:copy(sconv.bias)
local function test(sconv, gconv)
local groundtruth = sconv:forward(input)
cutorch.synchronize()
- local rescuda = gconv:forward(input)
+ local rescuda = gconv:forward(cast(input))
cutorch.synchronize()
local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward, 'error on state (forward) ')
+ mytester:assertlt(error:abs():max(), testparams.precision_forward, 'error on state (forward) ')
-- IO
- local ferr,berr = jac.testIO(gconv, input)
- mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
- mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ local ferr,berr = jac.testIO(gconv, cast(input))
+ mytester:assertlt(ferr, testparams.precision_io, torch.typename(gconv) .. ' - i/o forward err ')
+ mytester:assertlt(berr, testparams.precision_io, torch.typename(gconv) .. ' - i/o backward err ')
end
test(sconv, gconv)
- local gconv = cudnn.convert(sconv, cudnn)
+ local gconv = cast(cudnn.convert(sconv, cudnn))
mytester:asserteq(torch.typename(gconv), 'cudnn.SpatialConvolution', 'conversion type check')
test(sconv, gconv)
end
@@ -75,19 +120,19 @@ function cudnntest.SpatialConvolution_backward_batch()
local groundweight = sconv.gradWeight
local groundbias = sconv.gradBias
- local gconv = cudnn.SpatialConvolution(from,to,ki,kj,si,sj):cuda():fastest()
+ local gconv = cast(cudnn.SpatialConvolution(from,to,ki,kj,si,sj)):fastest()
gconv.weight:copy(sconv.weight)
gconv.bias:copy(sconv.bias)
- gconv:forward(input)
+ gconv:forward(cast(input))
-- serialize and deserialize
torch.save('modelTemp.t7', gconv)
gconv = torch.load('modelTemp.t7')
local function test(sconv, gconv)
- gconv:forward(input)
+ gconv:forward(cast(input))
gconv:zeroGradParameters()
- local rescuda = gconv:backward(input, gradOutput, scale)
+ local rescuda = gconv:backward(cast(input), cast(gradOutput), scale)
cutorch.synchronize()
local weightcuda = gconv.gradWeight
local biascuda = gconv.gradBias
@@ -96,13 +141,13 @@ function cudnntest.SpatialConvolution_backward_batch()
local werror = weightcuda:float() - groundweight:float()
local berror = biascuda:float() - groundbias:float()
- mytester:assertlt(error:abs():max(), precision_backward, 'error on state (backward) ')
- mytester:assertlt(werror:abs():max(), precision_backward, 'error on weight (backward) ')
- mytester:assertlt(berror:abs():max(), precision_backward, 'error on bias (backward) ')
+ mytester:assertlt(error:abs():max(), testparams.precision_backward, 'error on state (backward) ')
+ mytester:assertlt(werror:abs():max(), testparams.precision_backward, 'error on weight (backward) ')
+ mytester:assertlt(berror:abs():max(), testparams.precision_backward, 'error on bias (backward) ')
end
test(sconv, gconv)
- local gconv = cudnn.convert(sconv, cudnn)
+ local gconv = cast(cudnn.convert(sconv, cudnn))
mytester:asserteq(torch.typename(gconv), 'cudnn.SpatialConvolution', 'conversion type check')
test(sconv, gconv)
end
@@ -121,28 +166,28 @@ function cudnntest.SpatialConvolution_forward_single()
local input = torch.randn(from,inj,ini):cuda()
local sconv = nn.SpatialConvolution(from,to,ki,kj,si,sj):cuda()
- local gconv = cudnn.SpatialConvolution(from,to,ki,kj,si,sj):cuda()
+ local gconv = cast(cudnn.SpatialConvolution(from,to,ki,kj,si,sj))
gconv.weight:copy(sconv.weight)
gconv.bias:copy(sconv.bias)
local function test(sconv, gconv)
local groundtruth = sconv:forward(input)
cutorch.synchronize()
- local rescuda = gconv:forward(input)
+ local rescuda = gconv:forward(cast(input))
cutorch.synchronize()
mytester:asserteq(rescuda:dim(), 3, 'error in dimension')
local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward,
+ mytester:assertlt(error:abs():max(), testparams.precision_forward,
'error on state (forward) ')
-- IO
- local ferr,berr = jac.testIO(gconv, input)
- mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
- mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ local ferr,berr = jac.testIO(gconv, cast(input))
+ mytester:assertlt(ferr, testparams.precision_io, torch.typename(gconv) .. ' - i/o forward err ')
+ mytester:assertlt(berr, testparams.precision_io, torch.typename(gconv) .. ' - i/o backward err ')
end
test(sconv, gconv)
- local gconv = cudnn.convert(sconv, cudnn)
+ local gconv = cast(cudnn.convert(sconv, cudnn))
mytester:asserteq(torch.typename(gconv), 'cudnn.SpatialConvolution', 'conversion type check')
test(sconv, gconv)
end
@@ -170,19 +215,19 @@ function cudnntest.SpatialConvolution_backward_single()
local groundweight = sconv.gradWeight
local groundbias = sconv.gradBias
- local gconv = cudnn.SpatialConvolution(from,to,ki,kj,si,sj):cuda()
+ local gconv = cast(cudnn.SpatialConvolution(from,to,ki,kj,si,sj))
gconv.weight:copy(sconv.weight)
gconv.bias:copy(sconv.bias)
local function test(sconv, gconv)
- gconv:forward(input)
+ gconv:forward(cast(input))
-- serialize and deserialize
torch.save('modelTemp.t7', gconv)
gconv = torch.load('modelTemp.t7')
- gconv:forward(input)
+ gconv:forward(cast(input))
gconv:zeroGradParameters()
- local rescuda = gconv:backward(input, gradOutput)
+ local rescuda = gconv:backward(cast(input), cast(gradOutput))
cutorch.synchronize()
mytester:asserteq(rescuda:dim(), 3, 'error in dimension')
local weightcuda = gconv.gradWeight
@@ -192,16 +237,16 @@ function cudnntest.SpatialConvolution_backward_single()
local werror = weightcuda:float() - groundweight:float()
local berror = biascuda:float() - groundbias:float()
- mytester:assertlt(error:abs():max(), precision_backward,
+ mytester:assertlt(error:abs():max(), testparams.precision_backward,
'error on state (backward) ')
- mytester:assertlt(werror:abs():max(), precision_backward,
+ mytester:assertlt(werror:abs():max(), testparams.precision_backward,
'error on weight (backward) ')
- mytester:assertlt(berror:abs():max(), precision_backward,
+ mytester:assertlt(berror:abs():max(), testparams.precision_backward,
'error on bias (backward) ')
end
test(sconv, gconv)
- local gconv = cudnn.convert(sconv, cudnn)
+ local gconv = cast(cudnn.convert(sconv, cudnn))
mytester:asserteq(torch.typename(gconv), 'cudnn.SpatialConvolution', 'conversion type check')
test(sconv, gconv)
end
@@ -221,26 +266,26 @@ function cudnntest.SpatialFullConvolution_forward_batch()
local input = torch.randn(bs,from,inj,ini):cuda()
local sconv = nn.SpatialFullConvolution(from,to,ki,kj,si,sj):cuda()
- local gconv = cudnn.SpatialFullConvolution(from,to,ki,kj,si,sj):cuda():fastest()
+ local gconv = cast(cudnn.SpatialFullConvolution(from,to,ki,kj,si,sj)):fastest()
gconv.weight:copy(sconv.weight)
gconv.bias:copy(sconv.bias)
local function test(sconv, gconv)
local groundtruth = sconv:forward(input)
cutorch.synchronize()
- local rescuda = gconv:forward(input)
+ local rescuda = gconv:forward(cast(input))
cutorch.synchronize()
local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward, 'error on state (forward) ')
+ mytester:assertlt(error:abs():max(), testparams.precision_forward, 'error on state (forward) ')
-- IO
- local ferr,berr = jac.testIO(gconv, input)
- mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
- mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ local ferr,berr = jac.testIO(gconv, cast(input))
+ mytester:assertlt(ferr, testparams.precision_io, torch.typename(gconv) .. ' - i/o forward err ')
+ mytester:assertlt(berr, testparams.precision_io, torch.typename(gconv) .. ' - i/o backward err ')
end
test(sconv, gconv)
- local gconv = cudnn.convert(sconv, cudnn)
+ local gconv = cast(cudnn.convert(sconv, cudnn))
mytester:asserteq(torch.typename(gconv), 'cudnn.SpatialFullConvolution', 'conversion type check')
test(sconv, gconv)
end
@@ -269,19 +314,19 @@ function cudnntest.SpatialFullConvolution_backward_batch()
local groundweight = sconv.gradWeight
local groundbias = sconv.gradBias
- local gconv = cudnn.SpatialFullConvolution(from,to,ki,kj,si,sj):cuda():fastest()
+ local gconv = cast(cudnn.SpatialFullConvolution(from,to,ki,kj,si,sj):cuda():fastest())
gconv.weight:copy(sconv.weight)
gconv.bias:copy(sconv.bias)
- gconv:forward(input)
+ gconv:forward(cast(input))
-- serialize and deserialize
torch.save('modelTemp.t7', gconv)
gconv = torch.load('modelTemp.t7')
local function test(sconv, gconv)
- gconv:forward(input)
+ gconv:forward(cast(input))
gconv:zeroGradParameters()
- local rescuda = gconv:backward(input, gradOutput, scale)
+ local rescuda = gconv:backward(cast(input), cast(gradOutput), scale)
cutorch.synchronize()
local weightcuda = gconv.gradWeight
local biascuda = gconv.gradBias
@@ -290,13 +335,13 @@ function cudnntest.SpatialFullConvolution_backward_batch()
local werror = weightcuda:float() - groundweight:float()
local berror = biascuda:float() - groundbias:float()
- mytester:assertlt(error:abs():max(), precision_backward, 'error on state (backward) ')
- mytester:assertlt(werror:abs():max(), precision_backward, 'error on weight (backward) ')
- mytester:assertlt(berror:abs():max(), precision_backward, 'error on bias (backward) ')
+ mytester:assertlt(error:abs():max(), testparams.precision_backward, 'error on state (backward) ')
+ mytester:assertlt(werror:abs():max(), testparams.precision_backward, 'error on weight (backward) ')
+ mytester:assertlt(berror:abs():max(), testparams.precision_backward, 'error on bias (backward) ')
end
test(sconv, gconv)
- local gconv = cudnn.convert(sconv, cudnn)
+ local gconv = cast(cudnn.convert(sconv, cudnn))
mytester:asserteq(torch.typename(gconv), 'cudnn.SpatialFullConvolution', 'conversion type check')
test(sconv, gconv)
end
@@ -322,18 +367,18 @@ function cudnntest.TemporalConvolution_batch()
local groundweight = sconv.gradWeight
local groundbias = sconv.gradBias
- local gconv = cudnn.TemporalConvolution(inputFrameSize,outputFrameSize, ki, si):cuda():fastest()
+ local gconv = cast(cudnn.TemporalConvolution(inputFrameSize,outputFrameSize, ki, si):cuda():fastest())
gconv.weight:copy(sconv.weight:view(gconv.weight:size()))
gconv.bias:copy(sconv.bias)
- gconv:forward(input)
+ gconv:forward(cast(input))
-- serialize and deserialize
torch.save('modelTemp.t7', gconv)
gconv = torch.load('modelTemp.t7')
- local cudaForward = gconv:forward(input)
+ local cudaForward = gconv:forward(cast(input))
gconv:zeroGradParameters()
- local rescuda = gconv:backward(input, gradOutput, scale)
+ local rescuda = gconv:backward(cast(input), cast(gradOutput), scale)
cutorch.synchronize()
local weightcuda = gconv.gradWeight
local biascuda = gconv.gradBias
@@ -342,10 +387,10 @@ function cudnntest.TemporalConvolution_batch()
local error = rescuda:float() - groundgrad:float()
local werror = weightcuda:float() - groundweight:float()
local berror = biascuda:float() - groundbias:float()
- mytester:assertlt(ferror:abs():max(), precision_forward, 'error on forward ')
- mytester:assertlt(error:abs():max(), precision_backward, 'error on state (backward) ')
- mytester:assertlt(werror:abs():max(), precision_backward, 'error on weight (backward) ')
- mytester:assertlt(berror:abs():max(), precision_backward, 'error on bias (backward) ')
+ mytester:assertlt(ferror:abs():max(), testparams.precision_forward, 'error on forward ')
+ mytester:assertlt(error:abs():max(), testparams.precision_backward, 'error on state (backward) ')
+ mytester:assertlt(werror:abs():max(), testparams.precision_backward, 'error on weight (backward) ')
+ mytester:assertlt(berror:abs():max(), testparams.precision_backward, 'error on bias (backward) ')
end
function cudnntest.TemporalConvolution_padding_batch()
@@ -375,18 +420,18 @@ function cudnntest.TemporalConvolution_padding_batch()
local groundweight = sconv.gradWeight
local groundbias = sconv.gradBias
- local gconv = cudnn.TemporalConvolution(inputFrameSize,outputFrameSize, ki, si,pad_h):cuda():fastest()
+ local gconv = cast(cudnn.TemporalConvolution(inputFrameSize,outputFrameSize, ki, si,pad_h):cuda():fastest())
gconv.weight:copy(sconv.weight:view(gconv.weight:size()))
gconv.bias:copy(sconv.bias)
- gconv:forward(input)
+ gconv:forward(cast(input))
-- serialize and deserialize
torch.save('modelTemp.t7', gconv)
gconv = torch.load('modelTemp.t7')
- local cudaForward = gconv:forward(input)
+ local cudaForward = gconv:forward(cast(input))
gconv:zeroGradParameters()
- local rescuda = gconv:backward(input, gradOutput, scale)
+ local rescuda = gconv:backward(cast(input), cast(gradOutput), scale)
cutorch.synchronize()
local weightcuda = gconv.gradWeight
local biascuda = gconv.gradBias
@@ -396,10 +441,10 @@ function cudnntest.TemporalConvolution_padding_batch()
local error = rescuda:float() - groundgrad:float()
local werror = weightcuda:float() - groundweight:float()
local berror = biascuda:float() - groundbias:float()
- mytester:assertlt(ferror:abs():max(), precision_forward, 'error on forward ')
- mytester:assertlt(error:abs():max(), precision_backward, 'error on state (backward) ')
- mytester:assertlt(werror:abs():max(), precision_backward, 'error on weight (backward) ')
- mytester:assertlt(berror:abs():max(), precision_backward, 'error on bias (backward) ')
+ mytester:assertlt(ferror:abs():max(), testparams.precision_forward, 'error on forward ')
+ mytester:assertlt(error:abs():max(), testparams.precision_backward, 'error on state (backward) ')
+ mytester:assertlt(werror:abs():max(), testparams.precision_backward, 'error on weight (backward) ')
+ mytester:assertlt(berror:abs():max(), testparams.precision_backward, 'error on bias (backward) ')
end
@@ -422,18 +467,18 @@ function cudnntest.TemporalConvolution_single()
local groundweight = sconv.gradWeight
local groundbias = sconv.gradBias
- local gconv = cudnn.TemporalConvolution(inputFrameSize,outputFrameSize, ki, si):cuda():fastest()
+ local gconv = cast(cudnn.TemporalConvolution(inputFrameSize,outputFrameSize, ki, si):cuda():fastest())
gconv.weight:copy(sconv.weight:view(gconv.weight:size()))
gconv.bias:copy(sconv.bias)
- gconv:forward(input)
+ gconv:forward(cast(input))
-- serialize and deserialize
torch.save('modelTemp.t7', gconv)
gconv = torch.load('modelTemp.t7')
- local cudaForward = gconv:forward(input)
+ local cudaForward = gconv:forward(cast(input))
gconv:zeroGradParameters()
- local rescuda = gconv:backward(input, gradOutput, scale)
+ local rescuda = gconv:backward(cast(input), cast(gradOutput), scale)
cutorch.synchronize()
local weightcuda = gconv.gradWeight
local biascuda = gconv.gradBias
@@ -442,10 +487,10 @@ function cudnntest.TemporalConvolution_single()
local error = rescuda:float() - groundgrad:float()
local werror = weightcuda:float() - groundweight:float()
local berror = biascuda:float() - groundbias:float()
- mytester:assertlt(ferror:abs():max(), precision_forward, 'error on forward ')
- mytester:assertlt(error:abs():max(), precision_backward, 'error on state (backward) ')
- mytester:assertlt(werror:abs():max(), precision_backward, 'error on weight (backward) ')
- mytester:assertlt(berror:abs():max(), precision_backward, 'error on bias (backward) ')
+ mytester:assertlt(ferror:abs():max(), testparams.precision_forward, 'error on forward ')
+ mytester:assertlt(error:abs():max(), testparams.precision_backward, 'error on state (backward) ')
+ mytester:assertlt(werror:abs():max(), testparams.precision_backward, 'error on weight (backward) ')
+ mytester:assertlt(berror:abs():max(), testparams.precision_backward, 'error on bias (backward) ')
end
function cudnntest.TemporalConvolution_reduceBatchSize()
@@ -458,13 +503,12 @@ function cudnntest.TemporalConvolution_reduceBatchSize()
local batchSize = 128
local smallerBatchSize = batchSize/2
- local input
- input = torch.randn(batchSize,ini,inputFrameSize):cuda()
- local conv = cudnn.TemporalConvolution(inputFrameSize,outputFrameSize,ki,si):cuda()
+ local input = cast(torch.randn(batchSize,ini,inputFrameSize))
+ local conv = cast(cudnn.TemporalConvolution(inputFrameSize,outputFrameSize,ki,si):cuda())
local o1 = conv:updateOutput(input)
mytester:asserteq(o1:size(1), batchSize, 'batch size didn\'t match')
- input = torch.randn(smallerBatchSize,ini,inputFrameSize):cuda()
+ input = cast(torch.randn(smallerBatchSize,ini,inputFrameSize))
local o2 = conv:updateOutput(input)
mytester:asserteq(o2:size(1), smallerBatchSize, 'batch size didn\'t match')
-- do this again to check it doesn't crash
@@ -490,26 +534,26 @@ function cudnntest.VolumetricConvolution_forward_single()
local ink = (outk-1)*sk+kk
local input = torch.randn(from,ink,inj,ini):cuda()
local sconv = nn.VolumetricConvolution(from,to,kk,ki,kj,sk,si,sj):float()
- local gconv = cudnn.VolumetricConvolution(from,to,kk,ki,kj,sk,si,sj):cuda()
+ local gconv = cast(cudnn.VolumetricConvolution(from,to,kk,ki,kj,sk,si,sj))
gconv.weight:copy(sconv.weight)
gconv.bias:copy(sconv.bias)
local function test(sconv, gconv)
local groundtruth = sconv:forward(input:float())
cutorch.synchronize()
- local rescuda = gconv:forward(input)
+ local rescuda = gconv:forward(cast(input))
cutorch.synchronize()
local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward,
+ mytester:assertlt(error:abs():max(), testparams.precision_forward,
'error on state (forward) ')
-- IO
- local ferr,berr = jac.testIO(gconv, input)
- mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
- mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ local ferr,berr = jac.testIO(gconv, cast(input))
+ mytester:assertlt(ferr, testparams.precision_io, torch.typename(gconv) .. ' - i/o forward err ')
+ mytester:assertlt(berr, testparams.precision_io, torch.typename(gconv) .. ' - i/o backward err ')
end
test(sconv, gconv)
- local gconv = cudnn.convert(sconv, cudnn):cuda()
+ local gconv = cast(cudnn.convert(sconv, cudnn):cuda())
mytester:asserteq(torch.typename(gconv), 'cudnn.VolumetricConvolution', 'conversion type check')
test(sconv, gconv)
end
@@ -539,21 +583,21 @@ function cudnntest.VolumetricConvolution_backward_single()
local groundweight = sconv.gradWeight
local groundbias = sconv.gradBias
- local gconv = cudnn.VolumetricConvolution(from,to,kk,ki,kj,sk,si,sj):cuda()
+ local gconv = cast(cudnn.VolumetricConvolution(from,to,kk,ki,kj,sk,si,sj))
gconv.weight:copy(sconv.weight)
gconv.bias:copy(sconv.bias)
local function test(sconv, gconv)
- gconv:forward(input)
+ gconv:forward(cast(input))
cutorch.synchronize()
-- serialize and deserialize
torch.save('modelTemp.t7', gconv)
gconv = torch.load('modelTemp.t7')
- gconv:forward(input)
+ gconv:forward(cast(input))
gconv:zeroGradParameters()
- local rescuda = gconv:backward(input, gradOutput)
+ local rescuda = gconv:backward(cast(input), cast(gradOutput))
cutorch.synchronize()
mytester:asserteq(rescuda:dim(), 4, 'error in dimension')
@@ -564,21 +608,22 @@ function cudnntest.VolumetricConvolution_backward_single()
local werror = weightcuda:float() - groundweight:float()
local berror = biascuda:float() - groundbias:float()
- mytester:assertlt(error:abs():max(), precision_backward,
+ mytester:assertlt(error:abs():max(), testparams.precision_backward,
'error on state (backward) ')
- mytester:assertlt(werror:abs():max(), precision_backward,
+ mytester:assertlt(werror:abs():max(), testparams.precision_backward,
'error on weight (backward) ')
- mytester:assertlt(berror:abs():max(), precision_backward,
+ mytester:assertlt(berror:abs():max(), testparams.precision_backward,
'error on bias (backward) ')
end
test(sconv, gconv)
- local gconv = cudnn.convert(sconv, cudnn):cuda()
+ local gconv = cast(cudnn.convert(sconv, cudnn):cuda())
mytester:asserteq(torch.typename(gconv), 'cudnn.VolumetricConvolution', 'conversion type check')
test(sconv, gconv)
end
function cudnntest.VolumetricMaxPooling_batch()
+ if testparams.test_type ~= 'torch.CudaTensor' then return end
local bs = math.random(1,4)
local from = math.random(1,4)
local ki = math.random(2,4)
@@ -597,36 +642,37 @@ function cudnntest.VolumetricMaxPooling_batch()
local gradOutput = torch.randn(bs,from,outk,outj,outi):cuda()
local sconv = nn.VolumetricMaxPooling(kk,ki,kj,sk,si,sj):float()
- local gconv = cudnn.VolumetricMaxPooling(kk,ki,kj,sk,si,sj):cuda()
+ local gconv = cast(cudnn.VolumetricMaxPooling(kk,ki,kj,sk,si,sj))
local function test(sconv, gconv)
local groundtruth = sconv:forward(input:float())
local groundgrad = sconv:backward(input:float(), gradOutput:float())
cutorch.synchronize()
- local rescuda = gconv:forward(input)
- local resgrad = gconv:backward(input, gradOutput)
+ local rescuda = gconv:forward(cast(input))
+ local resgrad = gconv:backward(cast(input), cast(gradOutput))
cutorch.synchronize()
mytester:asserteq(rescuda:dim(), 5, 'error in dimension')
mytester:asserteq(resgrad:dim(), 5, 'error in dimension')
local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward, 'error on state (forward) ')
+ mytester:assertlt(error:abs():max(), testparams.precision_forward, 'error on state (forward) ')
error = resgrad:float() - groundgrad:float()
- mytester:assertlt(error:abs():max(), precision_backward, 'error on state (backward) ')
+ mytester:assertlt(error:abs():max(), testparams.precision_backward, 'error on state (backward) ')
-- IO
- local ferr,berr = jac.testIO(gconv, input)
- mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
- mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ local ferr,berr = jac.testIO(gconv, cast(input))
+ mytester:assertlt(ferr, testparams.precision_io, torch.typename(gconv) .. ' - i/o forward err ')
+ mytester:assertlt(berr, testparams.precision_io, torch.typename(gconv) .. ' - i/o backward err ')
end
test(sconv, gconv)
- local gconv = cudnn.convert(sconv, cudnn):cuda()
+ local gconv = cast(cudnn.convert(sconv, cudnn):cuda())
mytester:asserteq(torch.typename(gconv), 'cudnn.VolumetricMaxPooling', 'conversion type check')
test(sconv, gconv)
end
function cudnntest.VolumetricMaxPooling_single()
+ if not testparams.test_type == 'torch.CudaTensor' then return end
local from = math.random(1,32)
local ki = math.random(2,4)
local kj = math.random(2,4)
@@ -644,36 +690,37 @@ function cudnntest.VolumetricMaxPooling_single()
local gradOutput = torch.randn(from,outk,outj,outi):cuda()
local sconv = nn.VolumetricMaxPooling(kk,ki,kj,sk,si,sj):float()
- local gconv = cudnn.VolumetricMaxPooling(kk,ki,kj,sk,si,sj):cuda()
+ local gconv = cast(cudnn.VolumetricMaxPooling(kk,ki,kj,sk,si,sj))
local function test(sconv, gconv)
local groundtruth = sconv:forward(input:float())
local groundgrad = sconv:backward(input:float(), gradOutput:float())
cutorch.synchronize()
- local _ = gconv:forward(input)
+ local _ = gconv:forward(cast(input))
-- serialize and deserialize
torch.save('modelTemp.t7', gconv)
gconv = torch.load('modelTemp.t7')
- local rescuda = gconv:forward(input)
- local resgrad = gconv:backward(input, gradOutput)
+ local rescuda = gconv:forward(cast(input))
+ local resgrad = gconv:backward(cast(input), cast(gradOutput))
cutorch.synchronize()
mytester:asserteq(rescuda:dim(), 4, 'error in dimension')
mytester:asserteq(resgrad:dim(), 4, 'error in dimension')
local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward,
+ mytester:assertlt(error:abs():max(), testparams.precision_forward,
'error on state (forward) ')
error = resgrad:float() - groundgrad:float()
- mytester:assertlt(error:abs():max(), precision_backward,
+ mytester:assertlt(error:abs():max(), testparams.precision_backward,
'error on state (backward) ')
end
test(sconv, gconv)
- local gconv = cudnn.convert(sconv, cudnn):cuda()
+ local gconv = cast(cudnn.convert(sconv, cudnn):cuda())
mytester:asserteq(torch.typename(gconv), 'cudnn.VolumetricMaxPooling', 'conversion type check')
test(sconv, gconv)
end
function cudnntest.SpatialMaxPooling_batch()
+ if testparams.test_type ~= 'torch.CudaTensor' then return end
local bs = math.random(1,32)
local from = math.random(1,32)
local ki = math.random(2,4)
@@ -695,26 +742,26 @@ function cudnntest.SpatialMaxPooling_batch()
local groundtruth = sconv:forward(input)
local groundgrad = sconv:backward(input, gradOutput)
cutorch.synchronize()
- local gconv = cudnn.SpatialMaxPooling(ki,kj,si,sj,padi,padj):cuda()
+ local gconv = cast(cudnn.SpatialMaxPooling(ki,kj,si,sj,padi,padj))
if ceil_mode then gconv:ceil() end
- local rescuda = gconv:forward(input)
+ local rescuda = gconv:forward(cast(input))
-- serialize and deserialize
torch.save('modelTemp.t7', gconv)
gconv = torch.load('modelTemp.t7')
- local rescuda = gconv:forward(input)
- local resgrad = gconv:backward(input, gradOutput)
+ local rescuda = gconv:forward(cast(input))
+ local resgrad = gconv:backward(cast(input), cast(gradOutput))
cutorch.synchronize()
mytester:asserteq(rescuda:dim(), 4, 'error in dimension')
mytester:asserteq(resgrad:dim(), 4, 'error in dimension')
local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward, 'error on state (forward) ')
+ mytester:assertlt(error:abs():max(), testparams.precision_forward, 'error on state (forward) ')
error = resgrad:float() - groundgrad:float()
- mytester:assertlt(error:abs():max(), precision_backward, 'error on state (backward) ')
+ mytester:assertlt(error:abs():max(), testparams.precision_backward, 'error on state (backward) ')
-- IO
- local ferr,berr = jac.testIO(gconv, input)
- mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
- mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ local ferr,berr = jac.testIO(gconv, cast(input))
+ mytester:assertlt(ferr, testparams.precision_io, torch.typename(gconv) .. ' - i/o forward err ')
+ mytester:assertlt(berr, testparams.precision_io, torch.typename(gconv) .. ' - i/o backward err ')
end
function cudnntest.SpatialMaxPooling_single()
@@ -735,37 +782,38 @@ function cudnntest.SpatialMaxPooling_single()
local sconv = nn.SpatialMaxPooling(ki,kj,si,sj,padi,padj):cuda()
if ceil_mode then sconv:ceil() end
- local gconv = cudnn.SpatialMaxPooling(ki,kj,si,sj,padi,padj):cuda()
+ local gconv = cast(cudnn.SpatialMaxPooling(ki,kj,si,sj,padi,padj))
if ceil_mode then gconv:ceil() end
local function test(sconv, gconv)
local groundtruth = sconv:forward(input)
local groundgrad = sconv:backward(input, gradOutput)
cutorch.synchronize()
- local _ = gconv:forward(input)
+ local _ = gconv:forward(cast(input))
-- serialize and deserialize
torch.save('modelTemp.t7', gconv)
gconv = torch.load('modelTemp.t7')
- local rescuda = gconv:forward(input)
- local resgrad = gconv:backward(input, gradOutput)
+ local rescuda = gconv:forward(cast(input))
+ local resgrad = gconv:backward(cast(input), cast(gradOutput))
cutorch.synchronize()
mytester:asserteq(rescuda:dim(), 3, 'error in dimension')
mytester:asserteq(resgrad:dim(), 3, 'error in dimension')
local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward,
+ mytester:assertlt(error:abs():max(), testparams.precision_forward,
'error on state (forward) ')
error = resgrad:float() - groundgrad:float()
- mytester:assertlt(error:abs():max(), precision_backward,
+ mytester:assertlt(error:abs():max(), testparams.precision_backward,
'error on state (backward) ')
end
test(sconv, gconv)
- local gconv = cudnn.convert(sconv, cudnn):cuda()
+ local gconv = cast(cudnn.convert(sconv, cudnn):cuda())
mytester:asserteq(torch.typename(gconv), 'cudnn.SpatialMaxPooling', 'conversion type check')
test(sconv, gconv)
end
function cudnntest.SpatialAveragePooling_batch()
+ if testparams.test_type ~= 'torch.CudaTensor' then return end
local bs = math.random(1,32)
local from = math.random(1,32)
local ki = math.random(2,4)
@@ -783,25 +831,25 @@ function cudnntest.SpatialAveragePooling_batch()
local groundtruth = sconv:forward(input):clone()
local groundgrad = sconv:backward(input, gradOutput)
cutorch.synchronize()
- local gconv = cudnn.SpatialAveragePooling(ki,kj,si,sj):cuda()
- local rescuda = gconv:forward(input)
+ local gconv = cast(cudnn.SpatialAveragePooling(ki,kj,si,sj))
+ local rescuda = gconv:forward(cast(input))
-- serialize and deserialize
torch.save('modelTemp.t7', gconv)
gconv = torch.load('modelTemp.t7')
- local rescuda = gconv:forward(input)
- local resgrad = gconv:backward(input, gradOutput)
+ local rescuda = gconv:forward(cast(input))
+ local resgrad = gconv:backward(cast(input), gradOutput)
cutorch.synchronize()
mytester:asserteq(rescuda:dim(), 4, 'error in dimension')
mytester:asserteq(resgrad:dim(), 4, 'error in dimension')
local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward, 'error on state (forward) ')
+ mytester:assertlt(error:abs():max(), testparams.precision_forward, 'error on state (forward) ')
error = resgrad:float() - groundgrad:float()
- mytester:assertlt(error:abs():max(), precision_backward, 'error on state (backward) ')
+ mytester:assertlt(error:abs():max(), testparams.precision_backward, 'error on state (backward) ')
-- IO
- local ferr,berr = jac.testIO(gconv, input)
- mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
- mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ local ferr,berr = jac.testIO(gconv, cast(input))
+ mytester:assertlt(ferr, testparams.precision_io, torch.typename(gconv) .. ' - i/o forward err ')
+ mytester:assertlt(berr, testparams.precision_io, torch.typename(gconv) .. ' - i/o backward err ')
end
function cudnntest.SpatialAveragePooling_single()
@@ -818,7 +866,7 @@ function cudnntest.SpatialAveragePooling_single()
local gradOutput = torch.randn(from,outj,outi):cuda()
local sconv = nn.SpatialAveragePooling(ki,kj,si,sj):cuda()
- local gconv = cudnn.SpatialAveragePooling(ki,kj,si,sj):cuda()
+ local gconv = cast(cudnn.SpatialAveragePooling(ki,kj,si,sj))
mytester:assert(cudnn.C.CUDNN_POOLING_AVERAGE ~= nil, 'back-compat broken')
@@ -826,25 +874,25 @@ function cudnntest.SpatialAveragePooling_single()
local groundtruth = sconv:forward(input):clone()
local groundgrad = sconv:backward(input, gradOutput)
cutorch.synchronize()
- local _ = gconv:forward(input)
+ local _ = gconv:forward(cast(input))
-- serialize and deserialize
torch.save('modelTemp.t7', gconv)
gconv = torch.load('modelTemp.t7')
- local rescuda = gconv:forward(input)
- local resgrad = gconv:backward(input, gradOutput)
+ local rescuda = gconv:forward(cast(input))
+ local resgrad = gconv:backward(cast(input), cast(gradOutput))
cutorch.synchronize()
mytester:asserteq(rescuda:dim(), 3, 'error in dimension')
mytester:asserteq(resgrad:dim(), 3, 'error in dimension')
local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward,
+ mytester:assertlt(error:abs():max(), testparams.precision_forward,
'error on state (forward) ')
error = resgrad:float() - groundgrad:float()
- mytester:assertlt(error:abs():max(), precision_backward,
+ mytester:assertlt(error:abs():max(), testparams.precision_backward,
'error on state (backward) ')
end
test(sconv, gconv)
- local gconv = cudnn.convert(sconv, cudnn):cuda()
+ local gconv = cast(cudnn.convert(sconv, cudnn):cuda())
mytester:asserteq(torch.typename(gconv), 'cudnn.SpatialAveragePooling', 'conversion type check')
test(sconv, gconv)
end
@@ -859,7 +907,7 @@ local function nonlinSingle(nonlin)
local gradOutput = torch.randn(from,outj,outi):cuda()
local sconv = nn[nonlin]():cuda()
- local gconv = cudnn[nonlin](inplace):cuda()
+ local gconv = cast(cudnn[nonlin](inplace))
local function test(sconv, gconv)
local groundtruth = sconv:forward(input)
local groundgrad = sconv:backward(input, gradOutput)
@@ -869,34 +917,34 @@ local function nonlinSingle(nonlin)
if math.random(0,1) == 1 then
inplace = true
end
- local input__ = input:clone()
+ local input__ = cast(input:clone())
local _ = gconv:forward(input__)
-- serialize and deserialize
torch.save('modelTemp.t7', gconv)
gconv = torch.load('modelTemp.t7')
- local input__ = input:clone()
- local gradOutput__ = gradOutput:clone()
+ local input__ = cast(input:clone())
+ local gradOutput__ = cast(gradOutput:clone())
local rescuda = gconv:forward(input__)
local resgrad = gconv:backward(input__, gradOutput__)
cutorch.synchronize()
mytester:asserteq(rescuda:dim(), 3, 'error in dimension')
mytester:asserteq(resgrad:dim(), 3, 'error in dimension')
local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward,
+ mytester:assertlt(error:abs():max(), testparams.precision_forward,
'error on state (forward) ')
error = resgrad:float() - groundgrad:float()
- mytester:assertlt(error:abs():max(), precision_backward,
+ mytester:assertlt(error:abs():max(), testparams.precision_backward,
'error on state (backward) ')
-- IO
- local ferr,berr = jac.testIO(gconv, input)
- mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
- mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ local ferr,berr = jac.testIO(gconv, cast(input))
+ mytester:assertlt(ferr, testparams.precision_io, torch.typename(gconv) .. ' - i/o forward err ')
+ mytester:assertlt(berr, testparams.precision_io, torch.typename(gconv) .. ' - i/o backward err ')
end
test(sconv, gconv)
- local gconv = cudnn.convert(sconv, cudnn)
+ local gconv = cast(cudnn.convert(sconv, cudnn))
mytester:asserteq(torch.typename(gconv), 'cudnn.'..nonlin, 'conversion type check')
test(sconv, gconv)
end
@@ -912,7 +960,7 @@ local function nonlinBatch(nonlin)
local gradOutput = torch.randn(bs,from,outj,outi):cuda()
local sconv = nn[nonlin]():cuda()
- local gconv = cudnn[nonlin](inplace):cuda()
+ local gconv = cast(cudnn[nonlin](inplace))
local function test(sconv, gconv)
local groundtruth = sconv:forward(input)
local groundgrad = sconv:backward(input, gradOutput)
@@ -922,34 +970,34 @@ local function nonlinBatch(nonlin)
if math.random(0,1) == 1 then
inplace = true
end
- local input__ = input:clone()
+ local input__ = cast(input:clone())
local rescuda = gconv:forward(input__)
-- serialize and deserialize
torch.save('modelTemp.t7', gconv)
gconv = torch.load('modelTemp.t7')
- local input__ = input:clone()
- local gradOutput__ = gradOutput:clone()
+ local input__ = cast(input:clone())
+ local gradOutput__ = cast(gradOutput:clone())
local rescuda = gconv:forward(input__)
local resgrad = gconv:backward(input__, gradOutput__)
cutorch.synchronize()
mytester:asserteq(rescuda:dim(), 4, 'error in dimension')
mytester:asserteq(resgrad:dim(), 4, 'error in dimension')
local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward,
+ mytester:assertlt(error:abs():max(), testparams.precision_forward,
'error on state (forward) ')
error = resgrad:float() - groundgrad:float()
- mytester:assertlt(error:abs():max(), precision_backward,
+ mytester:assertlt(error:abs():max(), testparams.precision_backward,
'error on state (backward) ')
-- IO
- local ferr,berr = jac.testIO(gconv, input)
- mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
- mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ local ferr,berr = jac.testIO(gconv, cast(input))
+ mytester:assertlt(ferr, testparams.precision_io, torch.typename(gconv) .. ' - i/o forward err ')
+ mytester:assertlt(berr, testparams.precision_io, torch.typename(gconv) .. ' - i/o backward err ')
end
test(sconv, gconv)
- local gconv = cudnn.convert(sconv, cudnn)
+ local gconv = cast(cudnn.convert(sconv, cudnn))
mytester:asserteq(torch.typename(gconv), 'cudnn.'..nonlin, 'conversion type check')
test(sconv, gconv)
end
@@ -1005,31 +1053,31 @@ function cudnntest.SpatialCrossMapLRN_batch()
local input = torch.rand(bs, nbfeatures, inputSize, inputSize):cuda()
local gradOutput = torch.rand(input:size()):cuda()
local sconv = nn.SpatialCrossMapLRN(size, alpha, beta, k):cuda()
- local gconv = cudnn.SpatialCrossMapLRN(size, alpha, beta, k):cuda()
+ local gconv = cast(cudnn.SpatialCrossMapLRN(size, alpha, beta, k))
local function test(sconv, gconv)
local groundtruth = sconv:forward(input):clone()
local groundgrad = sconv:backward(input, gradOutput)
cutorch.synchronize()
- gconv:forward(input)
+ gconv:forward(cast(input))
-- serialize and deserialize
torch.save('modelTemp.t7', gconv)
gconv = torch.load('modelTemp.t7')
- local rescuda = gconv:forward(input)
- local resgrad = gconv:backward(input, gradOutput)
+ local rescuda = gconv:forward(cast(input))
+ local resgrad = gconv:backward(cast(input), cast(gradOutput))
cutorch.synchronize()
mytester:asserteq(rescuda:dim(), 4, 'error in dimension')
mytester:asserteq(resgrad:dim(), 4, 'error in dimension')
local error = rescuda:float() - groundtruth:float()
- mytester:assertlt(error:abs():max(), precision_forward,
+ mytester:assertlt(error:abs():max(), testparams.precision_forward,
'error on state (forward) ')
error = resgrad:float() - groundgrad:float()
- mytester:assertlt(error:abs():max(), precision_backward,
+ mytester:assertlt(error:abs():max(), testparams.precision_backward,
'error on state (backward) ')
end
test(sconv, gconv)
- local gconv = cudnn.convert(sconv, cudnn)
+ local gconv = cast(cudnn.convert(sconv, cudnn))
mytester:asserteq(torch.typename(gconv), 'cudnn.SpatialCrossMapLRN', 'conversion type check')
test(sconv, gconv)
end
@@ -1044,15 +1092,15 @@ function cudnntest.SoftMax_single()
local groundtruth = sconv:forward(input)
local groundgrad = sconv:backward(input, gradOutput)
cutorch.synchronize()
- local gconv = cudnn.SoftMax():cuda()
- local _ = gconv:forward(input)
+ local gconv = cast(cudnn.SoftMax())
+ local _ = gconv:forward(cast(input))
-- serialize and deserialize
torch.save('modelTemp.t7', gconv)
gconv = torch.load('modelTemp.t7')
- local rescuda = gconv:forward(input)
- local resgrad = gconv:backward(input, gradOutput)
+ local rescuda = gconv:forward(cast(input))
+ local resgrad = gconv:backward(cast(input), cast(gradOutput))
cutorch.synchronize()
local error = rescuda:float() - groundtruth:float()
local errmax = error:abs():max()
@@ -1068,7 +1116,7 @@ function cudnntest.SoftMax_single()
torch.save('badSoftMax.t7', state)
print(#input)
end
- mytester:assertlt(errmax, precision_forward,
+ mytester:assertlt(errmax, testparams.precision_forward,
'error on state (forward) ')
error = resgrad:float() - groundgrad:float()
errmax = error:abs():max()
@@ -1084,7 +1132,7 @@ function cudnntest.SoftMax_single()
torch.save('badSoftMax.t7', state)
print(#input)
end
- mytester:assertlt(errmax, precision_backward,
+ mytester:assertlt(errmax, testparams.precision_backward,
'error on state (backward) ')
end
@@ -1102,30 +1150,30 @@ function cudnntest.SoftMax_batch()
local groundtruth = sconv:forward(input:view(bs,-1))
local groundgrad = sconv:backward(input, gradOutput)
cutorch.synchronize()
- local gconv = cudnn.SoftMax():cuda()
- local rescuda = gconv:forward(input)
+ local gconv = cast(cudnn.SoftMax())
+ local rescuda = gconv:forward(cast(input))
-- serialize and deserialize
torch.save('modelTemp.t7', gconv)
gconv = torch.load('modelTemp.t7')
- local rescuda = gconv:forward(input)
- local resgrad = gconv:backward(input, gradOutput)
+ local rescuda = gconv:forward(cast(input))
+ local resgrad = gconv:backward(cast(input), cast(gradOutput))
cutorch.synchronize()
mytester:asserteq(rescuda:dim(), 4, 'error in dimension')
mytester:asserteq(resgrad:dim(), 4, 'error in dimension')
local error = rescuda:float() - groundtruth:float()
mytester:assertlt(error:abs():max(),
- precision_forward, 'error on state (forward) ')
+ testparams.precision_forward, 'error on state (forward) ')
error = resgrad:float() - groundgrad:float()
mytester:assertlt(error:abs():max(),
- precision_backward, 'error on state (backward) ')
+ testparams.precision_backward, 'error on state (backward) ')
-- IO
- local ferr,berr = jac.testIO(gconv, input)
- mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
- mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ local ferr,berr = jac.testIO(gconv, cast(input))
+ mytester:assertlt(ferr, testparams.precision_io, torch.typename(gconv) .. ' - i/o forward err ')
+ mytester:assertlt(berr, testparams.precision_io, torch.typename(gconv) .. ' - i/o backward err ')
end
@@ -1135,37 +1183,37 @@ function cudnntest.LogSoftMax_single()
local gradOutput = torch.randn(sz):cuda()
local sconv = nn.LogSoftMax():cuda()
- local gconv = cudnn.LogSoftMax():cuda()
+ local gconv = cast(cudnn.LogSoftMax())
local function test(sconv, gconv)
local groundtruth = sconv:forward(input)
local groundgrad = sconv:backward(input, gradOutput)
cutorch.synchronize()
- local _ = gconv:forward(input)
+ local _ = gconv:forward(cast(input))
-- serialize and deserialize
torch.save('modelTemp.t7', gconv)
gconv = torch.load('modelTemp.t7')
- local rescuda = gconv:forward(input)
- local resgrad = gconv:backward(input, gradOutput)
+ local rescuda = gconv:forward(cast(input))
+ local resgrad = gconv:backward(cast(input), cast(gradOutput))
cutorch.synchronize()
local error = rescuda:float() - groundtruth:float()
local errmax = error:abs():max()
- mytester:assertlt(errmax, precision_forward,
+ mytester:assertlt(errmax, testparams.precision_forward,
'error on state (forward) ')
error = resgrad:float() - groundgrad:float()
errmax = error:abs():max()
- mytester:assertlt(errmax, precision_backward,
+ mytester:assertlt(errmax, testparams.precision_backward,
'error on state (backward) ')
-- IO
- local ferr,berr = jac.testIO(gconv, input)
- mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
- mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ local ferr,berr = jac.testIO(gconv, cast(input))
+ mytester:assertlt(ferr, testparams.precision_io, torch.typename(gconv) .. ' - i/o forward err ')
+ mytester:assertlt(berr, testparams.precision_io, torch.typename(gconv) .. ' - i/o backward err ')
end
test(sconv, gconv)
- local gconv = cudnn.convert(sconv, cudnn)
+ local gconv = cast(cudnn.convert(sconv, cudnn))
mytester:asserteq(torch.typename(gconv), 'cudnn.LogSoftMax', 'conversion type check')
test(sconv, gconv)
end
@@ -1177,38 +1225,38 @@ function cudnntest.LogSoftMax_batch()
local gradOutput = torch.randn(bs,from):cuda()
local sconv = nn.LogSoftMax():cuda()
- local gconv = cudnn.LogSoftMax():cuda()
+ local gconv = cast(cudnn.LogSoftMax())
local function test(sconv, gconv)
local groundtruth = sconv:forward(input)
local groundgrad = sconv:backward(input, gradOutput)
cutorch.synchronize()
- local rescuda = gconv:forward(input)
+ local rescuda = gconv:forward(cast(input))
-- serialize and deserialize
torch.save('modelTemp.t7', gconv)
gconv = torch.load('modelTemp.t7')
- local rescuda = gconv:forward(input)
- local resgrad = gconv:backward(input, gradOutput)
+ local rescuda = gconv:forward(cast(input))
+ local resgrad = gconv:backward(cast(input), cast(gradOutput))
cutorch.synchronize()
mytester:asserteq(rescuda:dim(), 2, 'error in dimension')
mytester:asserteq(resgrad:dim(), 2, 'error in dimension')
local error = rescuda:float() - groundtruth:float()
mytester:assertlt(error:abs():max(),
- precision_forward, 'error on state (forward) ')
+ testparams.precision_forward, 'error on state (forward) ')
error = resgrad:float() - groundgrad:float()
mytester:assertlt(error:abs():max(),
- precision_backward, 'error on state (backward) ')
+ testparams.precision_backward, 'error on state (backward) ')
-- IO
- local ferr,berr = jac.testIO(gconv, input)
- mytester:assertlt(ferr, precision_io, torch.typename(gconv) .. ' - i/o forward err ')
- mytester:assertlt(berr, precision_io, torch.typename(gconv) .. ' - i/o backward err ')
+ local ferr,berr = jac.testIO(gconv, cast(input))
+ mytester:assertlt(ferr, testparams.precision_io, torch.typename(gconv) .. ' - i/o forward err ')
+ mytester:assertlt(berr, testparams.precision_io, torch.typename(gconv) .. ' - i/o backward err ')
end
test(sconv, gconv)
- local gconv = cudnn.convert(sconv, cudnn)
+ local gconv = cast(cudnn.convert(sconv, cudnn))
mytester:asserteq(torch.typename(gconv), 'cudnn.LogSoftMax', 'conversion type check')
test(sconv, gconv)
end
@@ -1222,11 +1270,11 @@ function cudnntest.SpatialLogSoftMax()
local input = torch.zeros(bsz, numLabels, h, w):normal():cuda()
local target = torch.zeros(bsz, numLabels, h, w):normal():cuda()
- local cri = cudnn.SpatialLogSoftMax():cuda()
+ local cri = cast(cudnn.SpatialLogSoftMax())
local gcri = nn.LogSoftMax():cuda()
- local op = cri:forward(input, target)
- local gi = cri:backward(input, target)
+ local op = cri:forward(cast(input), cast(target))
+ local gi = cri:backward(cast(input), cast(target))
local gop = op:clone():zero()
local ggi = gi:clone():zero()
@@ -1242,15 +1290,15 @@ function cudnntest.SpatialLogSoftMax()
end
end
local err = (gi - ggi):abs():max()
- mytester:assertlt(err, precision_backward, 'error in difference between central difference and :backward')
+ mytester:assertlt(err, testparams.precision_backward, 'error in difference between central difference and :backward')
local err = (op - gop):abs():max()
- mytester:assertlt(err, precision_backward, 'error in difference between central difference and :backward')
+ mytester:assertlt(err, testparams.precision_backward, 'error in difference between central difference and :backward')
end
local function testBatchNormalization(moduleName, inputSize)
local input = torch.randn(table.unpack(inputSize)):cuda()
local gradOutput = torch.randn(table.unpack(inputSize)):cuda()
- local cbn = cudnn[moduleName](inputSize[2], 1e-3):cuda()
+ local cbn = cast(cudnn[moduleName](inputSize[2], 1e-3))
local gbn = nn[moduleName](inputSize[2], 1e-3):cuda()
cbn.weight:copy(gbn.weight)
cbn.bias:copy(gbn.bias)
@@ -1260,44 +1308,46 @@ local function testBatchNormalization(moduleName, inputSize)
gbn:training()
mytester:asserteq(cbn.running_mean:mean(), 0, 'error on BN running_mean init')
mytester:asserteq(cbn.running_var:mean(), 1, 'error on BN running_var init')
- local rescuda = cbn:forward(input)
+ local rescuda = cbn:forward(cast(input))
local groundtruth = gbn:forward(input)
- local resgrad = cbn:backward(input, gradOutput)
+ local resgrad = cbn:backward(cast(input), cast(gradOutput))
local groundgrad = gbn:backward(input, gradOutput)
local error = rescuda:float() - groundtruth:float()
mytester:assertlt(error:abs():max(),
- precision_forward, 'error in batch normalization (forward) ')
+ testparams.precision_forward, 'error in batch normalization (forward) ')
error = resgrad:float() - groundgrad:float()
mytester:assertlt(error:abs():max(),
- precision_backward, 'error in batch normalization (backward) ')
+ testparams.precision_backward, 'error in batch normalization (backward) ')
error = cbn.running_mean:float() - gbn.running_mean:float()
mytester:assertlt(error:abs():max(),
- precision_forward, 'error in batch normalization (running_mean) ')
+ testparams.precision_forward, 'error in batch normalization (running_mean) ')
error = cbn.running_var:float() - gbn.running_var:float()
mytester:assertlt(error:abs():max(),
- precision_forward, 'error in batch normalization (running_var) ')
+ testparams.precision_forward, 'error in batch normalization (running_var) ')
end
local function testFWD(cbn, gbn)
cbn:evaluate()
gbn:evaluate()
- local rescuda = cbn:forward(input)
+ local rescuda = cbn:forward(cast(input))
local groundtruth = gbn:forward(input)
local error = rescuda:float() - groundtruth:float()
mytester:assertlt(error:abs():max(),
- precision_forward, 'error in batch normalization (forward) ')
+ testparams.precision_forward, 'error in batch normalization (forward) ')
end
testFWDBWD(cbn, gbn)
testFWD(cbn, gbn)
- local cudnn2nn = cudnn.convert(cbn:clone(), nn)
- mytester:asserteq(torch.type(cudnn2nn), 'nn.'..moduleName, 'cudnn to nn')
- testFWD(cudnn2nn, gbn)
- local nn2cudnn = cudnn.convert(gbn:clone(), cudnn)
- mytester:asserteq(torch.type(nn2cudnn), 'cudnn.'..moduleName, 'cudnn to nn')
- testFWD(nn2cudnn, gbn)
+ if testparams.test_type == 'torch.CudaTensor' then
+ local cudnn2nn = cast(cudnn.convert(cbn:clone(), nn))
+ mytester:asserteq(torch.type(cudnn2nn), 'nn.'..moduleName, 'cudnn to nn')
+ testFWD(cudnn2nn, gbn)
+ local nn2cudnn = cast(cudnn.convert(gbn:clone(), cudnn))
+ mytester:asserteq(torch.type(nn2cudnn), 'cudnn.'..moduleName, 'cudnn to nn')
+ testFWD(nn2cudnn, gbn)
+ end
end
function cudnntest.BatchNormalization()
@@ -1330,6 +1380,7 @@ function cudnntest.VolumetricBatchNormalization()
end
function cudnntest.SpatialCrossEntropyCriterion()
+ if testparams.test_type ~= 'torch.CudaTensor' then return end
-- batch
local numLabels = math.random(5,10)
local h = math.random(5,10)
@@ -1338,12 +1389,12 @@ function cudnntest.SpatialCrossEntropyCriterion()
local input = torch.zeros(bsz, numLabels, h, w):normal():cuda()
local target = torch.Tensor(bsz, h, w):random(1, numLabels):cuda()
- local cri = cudnn.SpatialCrossEntropyCriterion():cuda()
+ local cri = cast(cudnn.SpatialCrossEntropyCriterion())
local gcri = nn.CrossEntropyCriterion():cuda()
- local op = cri:forward(input, target)
- local gi = cri:backward(input, target)
+ local op = cri:forward(cast(input), cast(target))
+ local gi = cri:backward(cast(input), cast(target))
local ggi = gi:clone():zero()
@@ -1362,7 +1413,7 @@ function cudnntest.SpatialCrossEntropyCriterion()
ggi:div(h * w)
local err = (gi - ggi):abs():max()
- mytester:assertlt(err, precision_backward, 'error in difference between central difference and :backward')
+ mytester:assertlt(err, testparams.precision_backward, 'error in difference between central difference and :backward')
end
@@ -1387,7 +1438,7 @@ function cudnntest.functional_bias2D()
cudnn.functional.bias2D_updateOutput(cudnn.getHandle(), mod.bias, result)
local error = result:float() - groundtruth:float()
mytester:assertlt(error:abs():max(),
- precision_forward, 'error on forward ')
+ testparams.precision_forward, 'error on forward ')
mod:zeroGradParameters()
local gradOutput = groundtruth:clone():normal()
@@ -1397,7 +1448,7 @@ function cudnntest.functional_bias2D()
cudnn.functional.bias2D_accGradParameters(cudnn.getHandle(), gradOutput, result, scale)
error = result:float() - groundtruth:float()
mytester:assertlt(error:abs():max(),
- precision_backward, 'error on accGradParameters ')
+ testparams.precision_backward, 'error on accGradParameters ')
end
function cudnntest.functional_convolution2d()
@@ -1414,20 +1465,20 @@ function cudnntest.functional_convolution2d()
a.weight, output, a.dH,
a.dW, a.padH, a.padW)
mytester:assertlt((output - a.output):abs():max(),
- precision_forward, 'error on forward ')
+ testparams.precision_forward, 'error on forward ')
cudnn.functional.Convolution2D_updateGradInput(cudnn.getHandle(), input,
a.weight, output, gradOutput,
gradInput,
a.dH, a.dW, a.padH, a.padW)
mytester:assertlt((gradInput - a.gradInput):abs():max(),
- precision_forward, 'error on updateGradInput ')
+ testparams.precision_forward, 'error on updateGradInput ')
cudnn.functional.Convolution2D_accGradParameters(cudnn.getHandle(), input,
gradWeight, gradOutput,
a.dH, a.dW, a.padH, a.padW)
mytester:assertlt((gradWeight - a.gradWeight):abs():max(),
- precision_forward, 'error on accGradParameters ')
+ testparams.precision_forward, 'error on accGradParameters ')
end
function cudnntest.functional_maxpooling2d()
@@ -1441,17 +1492,16 @@ function cudnntest.functional_maxpooling2d()
output, a.kH, a.kW,
a.dH, a.dW, a.padH, a.padW)
mytester:assertlt((output - a.output):abs():max(),
- precision_forward, 'error on forward ')
+ testparams.precision_forward, 'error on forward ')
cudnn.functional.MaxPooling2D_updateGradInput(cudnn.getHandle(), input,
output, gradOutput, gradInput,
a.kH, a.kW, a.dH, a.dW,
a.padH, a.padW)
mytester:assertlt((gradInput - a.gradInput):abs():max(),
- precision_forward, 'error on updateGradInput ')
+ testparams.precision_forward, 'error on updateGradInput ')
end
-
torch.setdefaulttensortype('torch.FloatTensor')
math.randomseed(os.time())
mytester = torch.Tester()
@@ -1466,6 +1516,17 @@ end
for i=1,cutorch.getDeviceCount() do
print('Running test on device: ' .. i)
cutorch.setDevice(i)
+
+ print'Testing torch.CudaHalfTensor'
+ testparams = testparams_half
+ mytester:run()
+
+ print'Testing torch.CudaTensor'
+ testparams = testparams_float
+ mytester:run()
+
+ print'Testing torch.CudaDoubleTensor'
+ testparams = testparams_double
mytester:run()
end