diff options
author | Marco Scoffier <github@metm.org> | 2011-09-21 19:02:29 +0400 |
---|---|---|
committer | Marco Scoffier <github@metm.org> | 2011-09-21 19:02:29 +0400 |
commit | f27c87d31fff038e5555063a31c0f0cc8a343560 (patch) | |
tree | 993c0853972a3c04a12a8a9d024b56d7ccd65ab2 | |
parent | f3d95bdb2800806bc6283511ced2896e20e7b07a (diff) |
forgot to copy params from CPU to GPU
-rw-r--r-- | BatchOptimization.lua | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/BatchOptimization.lua b/BatchOptimization.lua index 53dcd77..a82c984 100644 --- a/BatchOptimization.lua +++ b/BatchOptimization.lua @@ -68,10 +68,11 @@ function Batch:forward_sequential(inputs, targets, options) end local _t_ = sys.clock() -- reset gradients + self.gradParameters:zero() if torch.getdefaulttensortype() == 'torch.CudaTensor' then + -- when using cuda we need to copy params to GPU + self.cuda_parameters:resize(self.parameters:size()):copy(self.parameters) self.cuda_gradParameters:zero() - else - self.gradParameters:zero() end -- f is the average of all criterions self.output = 0 @@ -100,7 +101,7 @@ function Batch:forward_sequential(inputs, targets, options) -- normalize gradients if torch.getdefaulttensortype() == 'torch.CudaTensor' then self.cuda_gradParameters:div(#inputs) - -- copy back to CPU version + -- copy gradients back from GPU to CPU self.gradParameters:resize(self.cuda_gradParameters:size()):copy(self.cuda_gradParameters) else self.gradParameters:div(#inputs) |