Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/clementfarabet/lua---nnx.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarco Scoffier <github@metm.org>2011-09-21 19:02:29 +0400
committerMarco Scoffier <github@metm.org>2011-09-21 19:02:29 +0400
commitf27c87d31fff038e5555063a31c0f0cc8a343560 (patch)
tree993c0853972a3c04a12a8a9d024b56d7ccd65ab2
parentf3d95bdb2800806bc6283511ced2896e20e7b07a (diff)
forgot to copy params from CPU to GPU
-rw-r--r--BatchOptimization.lua7
1 files changed, 4 insertions, 3 deletions
diff --git a/BatchOptimization.lua b/BatchOptimization.lua
index 53dcd77..a82c984 100644
--- a/BatchOptimization.lua
+++ b/BatchOptimization.lua
@@ -68,10 +68,11 @@ function Batch:forward_sequential(inputs, targets, options)
end
local _t_ = sys.clock()
-- reset gradients
+ self.gradParameters:zero()
if torch.getdefaulttensortype() == 'torch.CudaTensor' then
+ -- when using cuda we need to copy params to GPU
+ self.cuda_parameters:resize(self.parameters:size()):copy(self.parameters)
self.cuda_gradParameters:zero()
- else
- self.gradParameters:zero()
end
-- f is the average of all criterions
self.output = 0
@@ -100,7 +101,7 @@ function Batch:forward_sequential(inputs, targets, options)
-- normalize gradients
if torch.getdefaulttensortype() == 'torch.CudaTensor' then
self.cuda_gradParameters:div(#inputs)
- -- copy back to CPU version
+ -- copy gradients back from GPU to CPU
self.gradParameters:resize(self.cuda_gradParameters:size()):copy(self.cuda_gradParameters)
else
self.gradParameters:div(#inputs)