Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torch/threads-ffi.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGregory Chanan <gchanan@fb.com>2016-12-16 01:06:32 +0300
committerGregory Chanan <gchanan@fb.com>2016-12-16 01:06:32 +0300
commit1d8c97124e2fa67bbcf2d7f5832b730cbe1b8de0 (patch)
tree447ad99aa316dde056d6c22fd17627122c6d9837
parent46c1e283dab3ca5b8aa4f00e9df615fbb85b2d82 (diff)
Add double, half support to benchmarks.
-rw-r--r--benchmark/benchmark-threaded.lua75
-rw-r--r--benchmark/benchmark.lua77
2 files changed, 99 insertions, 53 deletions
diff --git a/benchmark/benchmark-threaded.lua b/benchmark/benchmark-threaded.lua
index bcf4e65..6220795 100644
--- a/benchmark/benchmark-threaded.lua
+++ b/benchmark/benchmark-threaded.lua
@@ -20,6 +20,7 @@ cmd:option('-convmm', false, 'use "mm" convolution code instead of standard')
cmd:option('-sub', false, 'use subsampling instead of max pooling')
cmd:option('-openmp', false, 'use openmp *package*')
cmd:option('-double', false, 'use doubles instead of floats')
+cmd:option('-half', false, 'use halves instead of floats')
cmd:option('-cuda', false, 'use CUDA instead of floats')
cmd:option('-gi', false, 'compute gradInput')
cmd:option('-v', false, 'be verbose')
@@ -97,19 +98,41 @@ if not params.sub then
end
end
-if params.double and params.cuda then
- error('make your choice between double and cuda!!')
+local function gpuType()
+ if params.double then
+ return 'torch.CudaDoubleTensor'
+ elseif params.half then
+ return 'torch.CudaHalfTensor'
+ else
+ return 'torch.CudaTensor'
+ end
+end
+
+local function cpuType()
+ if params.double then
+ return 'torch.DoubleTensor'
+ elseif params.half then
+ return 'torch.FloatTensor'
+ else
+ return 'torch.FloatTensor'
+ end
+end
+
+if params.double and params.half then
+ error('make your choice between double and half!!')
+end
+
+if params.half and not params.cuda then
+ error('half not supported without cuda')
end
-if params.double then
- torch.setdefaulttensortype('torch.DoubleTensor')
-elseif params.cuda then
+if params.cuda then
require 'cunn'
- dofile('cudahacks.lua')
- torch.setdefaulttensortype('torch.CudaTensor')
+ --dofile('cudahacks.lua')
+ torch.setdefaulttensortype(gpuType())
print( cutorch.getDeviceProperties(cutorch.getDevice()) )
else
- torch.setdefaulttensortype('torch.FloatTensor')
+ torch.setdefaulttensortype(cpuType())
end
local noutput = 10
@@ -149,8 +172,8 @@ if not params.nomlp then
mlp:add(nn.Linear(ninput, noutput))
if params.cuda then
- mlp:add(nn.Copy('torch.CudaTensor', 'torch.FloatTensor'))
- torch.setdefaulttensortype('torch.FloatTensor')
+ mlp:add(nn.Copy(gpuType(), cpuType()))
+ torch.setdefaulttensortype(cpuType())
end
mlp:add(nn.LogSoftMax())
@@ -165,7 +188,7 @@ if not params.nomlp then
local criterion = nn.ClassNLLCriterion()
if params.cuda then
- torch.setdefaulttensortype('torch.CudaTensor')
+ torch.setdefaulttensortype(gpuType())
end
local t = torch.Timer()
@@ -181,8 +204,8 @@ if not params.nomlp then
mlp:add(nn.Linear(500, noutput))
if params.cuda then
- mlp:add(nn.Copy('torch.CudaTensor', 'torch.FloatTensor'))
- torch.setdefaulttensortype('torch.FloatTensor')
+ mlp:add(nn.Copy(gpuType(), cpuType()))
+ torch.setdefaulttensortype(cpuType())
end
mlp:add(nn.LogSoftMax())
@@ -197,7 +220,7 @@ if not params.nomlp then
local criterion = nn.ClassNLLCriterion()
if params.cuda then
- torch.setdefaulttensortype('torch.CudaTensor')
+ torch.setdefaulttensortype(gpuType())
end
local t = torch.Timer()
@@ -218,8 +241,8 @@ if not params.nomlp then
mlp:add(nn.Linear(1000, noutput))
if params.cuda then
- mlp:add(nn.Copy('torch.CudaTensor', 'torch.FloatTensor'))
- torch.setdefaulttensortype('torch.FloatTensor')
+ mlp:add(nn.Copy(gpuType(), cpuType()))
+ torch.setdefaulttensortype(cpuType())
end
mlp:add(nn.LogSoftMax())
@@ -234,7 +257,7 @@ if not params.nomlp then
local criterion = nn.ClassNLLCriterion()
if params.cuda then
- torch.setdefaulttensortype('torch.CudaTensor')
+ torch.setdefaulttensortype(gpuType())
end
local t = torch.Timer()
@@ -272,8 +295,8 @@ if not params.nocnn then
mlp:add(nn.Linear(120, noutput))
if params.cuda then
- mlp:add(nn.Copy('torch.CudaTensor', 'torch.FloatTensor'))
- torch.setdefaulttensortype('torch.FloatTensor')
+ mlp:add(nn.Copy(gpuType(), cpuType()))
+ torch.setdefaulttensortype(cpuType())
end
mlp:add(nn.LogSoftMax())
@@ -288,7 +311,7 @@ if not params.nocnn then
local criterion = nn.ClassNLLCriterion()
if params.cuda then
- torch.setdefaulttensortype('torch.CudaTensor')
+ torch.setdefaulttensortype(gpuType())
end
local t = torch.Timer()
@@ -314,8 +337,8 @@ if not params.nocnn then
mlp:add(nn.Linear(120, noutput))
if params.cuda then
- mlp:add(nn.Copy('torch.CudaTensor', 'torch.FloatTensor'))
- torch.setdefaulttensortype('torch.FloatTensor')
+ mlp:add(nn.Copy(gpuType(), cpuType()))
+ torch.setdefaulttensortype(cpuType())
end
mlp:add(nn.LogSoftMax())
@@ -330,7 +353,7 @@ if not params.nocnn then
local criterion = nn.ClassNLLCriterion()
if params.cuda then
- torch.setdefaulttensortype('torch.CudaTensor')
+ torch.setdefaulttensortype(gpuType())
end
local t = torch.Timer()
@@ -356,8 +379,8 @@ if not params.nocnn then
mlp:add(nn.Linear(120, noutput))
if params.cuda then
- mlp:add(nn.Copy('torch.CudaTensor', 'torch.FloatTensor'))
- torch.setdefaulttensortype('torch.FloatTensor')
+ mlp:add(nn.Copy(gpuType(), cpuType()))
+ torch.setdefaulttensortype(cpuType())
end
mlp:add(nn.LogSoftMax())
@@ -372,7 +395,7 @@ if not params.nocnn then
local criterion = nn.ClassNLLCriterion()
if params.cuda then
- torch.setdefaulttensortype('torch.CudaTensor')
+ torch.setdefaulttensortype(gpuType())
end
local t = torch.Timer()
diff --git a/benchmark/benchmark.lua b/benchmark/benchmark.lua
index 191ac6e..42d1f64 100644
--- a/benchmark/benchmark.lua
+++ b/benchmark/benchmark.lua
@@ -17,7 +17,8 @@ cmd:option('-convmm', false, 'use "mm" convolution code instead of standard')
cmd:option('-sub', false, 'use subsampling instead of max pooling')
cmd:option('-openmp', false, 'use openmp *package*')
cmd:option('-double', false, 'use doubles instead of floats')
-cmd:option('-cuda', false, 'use CUDA instead of floats')
+cmd:option('-half', false, 'use halves instead of floats')
+cmd:option('-cuda', false, 'use CUDA with specified precision')
cmd:option('-gi', false, 'compute gradInput')
cmd:option('-v', false, 'be verbose')
cmd:option('-dir', '.', 'subdirectory to save the stuff')
@@ -93,19 +94,41 @@ if not params.sub then
end
end
-if params.double and params.cuda then
- error('make your choice between double and cuda!!')
+local function gpuType()
+ if params.double then
+ return 'torch.CudaDoubleTensor'
+ elseif params.half then
+ return 'torch.CudaHalfTensor'
+ else
+ return 'torch.CudaTensor'
+ end
+end
+
+local function cpuType()
+ if params.double then
+ return 'torch.DoubleTensor'
+ elseif params.half then
+ return 'torch.FloatTensor'
+ else
+ return 'torch.FloatTensor'
+ end
+end
+
+if params.double and params.half then
+ error('make your choice between double and half!!')
+end
+
+if params.half and not params.cuda then
+ error('half not supported without cuda')
end
-if params.double then
- torch.setdefaulttensortype('torch.DoubleTensor')
-elseif params.cuda then
+if params.cuda then
require 'cunn'
- dofile('cudahacks.lua')
- torch.setdefaulttensortype('torch.CudaTensor')
+ --dofile('cudahacks.lua')
+ torch.setdefaulttensortype(gpuType())
print( cutorch.getDeviceProperties(cutorch.getDevice()) )
else
- torch.setdefaulttensortype('torch.FloatTensor')
+ torch.setdefaulttensortype(cpuType())
end
local noutput = 10
@@ -145,8 +168,8 @@ if not params.nomlp then
mlp:add(nn.Linear(ninput, noutput))
if params.cuda then
- mlp:add(nn.Copy('torch.CudaTensor', 'torch.FloatTensor'))
- torch.setdefaulttensortype('torch.FloatTensor')
+ mlp:add(nn.Copy(gpuType(), cpuType()))
+ torch.setdefaulttensortype(cpuType())
end
mlp:add(nn.LogSoftMax())
@@ -161,7 +184,7 @@ if not params.nomlp then
local criterion = nn.ClassNLLCriterion()
if params.cuda then
- torch.setdefaulttensortype('torch.CudaTensor')
+ torch.setdefaulttensortype(gpuType())
end
local trainer = nn.StochasticGradient(mlp, criterion)
@@ -183,8 +206,8 @@ if not params.nomlp then
mlp:add(nn.Linear(500, noutput))
if params.cuda then
- mlp:add(nn.Copy('torch.CudaTensor', 'torch.FloatTensor'))
- torch.setdefaulttensortype('torch.FloatTensor')
+ mlp:add(nn.Copy(gpuType(), cpuType()))
+ torch.setdefaulttensortype(cpuType())
end
mlp:add(nn.LogSoftMax())
@@ -199,7 +222,7 @@ if not params.nomlp then
local criterion = nn.ClassNLLCriterion()
if params.cuda then
- torch.setdefaulttensortype('torch.CudaTensor')
+ torch.setdefaulttensortype(gpuType())
end
local trainer = nn.StochasticGradient(mlp, criterion)
@@ -226,8 +249,8 @@ if not params.nomlp then
mlp:add(nn.Linear(1000, noutput))
if params.cuda then
- mlp:add(nn.Copy('torch.CudaTensor', 'torch.FloatTensor'))
- torch.setdefaulttensortype('torch.FloatTensor')
+ mlp:add(nn.Copy(gpuType(), cpuType()))
+ torch.setdefaulttensortype(cpuType())
end
mlp:add(nn.LogSoftMax())
@@ -242,7 +265,7 @@ if not params.nomlp then
local criterion = nn.ClassNLLCriterion()
if params.cuda then
- torch.setdefaulttensortype('torch.CudaTensor')
+ torch.setdefaulttensortype(gpuType())
end
local trainer = nn.StochasticGradient(mlp, criterion)
@@ -307,8 +330,8 @@ if not params.nocnn then
mlp:add(nn.Linear(120, noutput))
if params.cuda then
- mlp:add(nn.Copy('torch.CudaTensor', 'torch.FloatTensor'))
- torch.setdefaulttensortype('torch.FloatTensor')
+ mlp:add(nn.Copy(gpuType(), cpuType()))
+ torch.setdefaulttensortype(cpuType())
end
mlp:add(nn.LogSoftMax())
@@ -323,7 +346,7 @@ if not params.nocnn then
local criterion = nn.ClassNLLCriterion()
if params.cuda then
- torch.setdefaulttensortype('torch.CudaTensor')
+ torch.setdefaulttensortype(gpuType())
end
local trainer = nn.StochasticGradient(mlp, criterion)
@@ -355,8 +378,8 @@ if not params.nocnn then
mlp:add(nn.Linear(120, noutput))
if params.cuda then
- mlp:add(nn.Copy('torch.CudaTensor', 'torch.FloatTensor'))
- torch.setdefaulttensortype('torch.FloatTensor')
+ mlp:add(nn.Copy(gpuType(), cpuType()))
+ torch.setdefaulttensortype(cpuType())
end
mlp:add(nn.LogSoftMax())
@@ -371,7 +394,7 @@ if not params.nocnn then
local criterion = nn.ClassNLLCriterion()
if params.cuda then
- torch.setdefaulttensortype('torch.CudaTensor')
+ torch.setdefaulttensortype(gpuType())
end
local trainer = nn.StochasticGradient(mlp, criterion)
@@ -403,8 +426,8 @@ if not params.nocnn then
mlp:add(nn.Linear(120, noutput))
if params.cuda then
- mlp:add(nn.Copy('torch.CudaTensor', 'torch.FloatTensor'))
- torch.setdefaulttensortype('torch.FloatTensor')
+ mlp:add(nn.Copy(gpuType(), cpuType()))
+ torch.setdefaulttensortype(cpuType())
end
mlp:add(nn.LogSoftMax())
@@ -419,7 +442,7 @@ if not params.nocnn then
local criterion = nn.ClassNLLCriterion()
if params.cuda then
- torch.setdefaulttensortype('torch.CudaTensor')
+ torch.setdefaulttensortype(gpuType())
end
local trainer = nn.StochasticGradient(mlp, criterion)