diff options
author | Clement Farabet <clement.farabet@gmail.com> | 2011-09-02 03:16:08 +0400 |
---|---|---|
committer | Clement Farabet <clement.farabet@gmail.com> | 2011-09-02 03:16:08 +0400 |
commit | 3863613a4717c59aac9306e3962f8de4dd7405bd (patch) | |
tree | 3e25b752ccbab79d10fae2bf9c33abfd3b9e0e5e | |
parent | dc98c37487f72b656ff9b23da9979dd7e372cfb7 (diff) |
Fixed tabs.
-rw-r--r-- | BatchOptimization.lua | 52 |
1 files changed, 26 insertions, 26 deletions
diff --git a/BatchOptimization.lua b/BatchOptimization.lua index 27309f2..3143a3e 100644 --- a/BatchOptimization.lua +++ b/BatchOptimization.lua @@ -8,15 +8,15 @@ local Batch,parent = torch.class('nn.BatchOptimization', 'nn.Optimization') function Batch:__init(...) parent.__init(self) xlua.unpack_class(self, {...}, - 'BatchOptimization', nil, - {arg='module', type='nn.Module', help='a module to train', req=true}, - {arg='criterion', type='nn.Criterion', - help='a criterion to estimate the error', req=true}, - {arg='parallelize', type='number', - help='parallelize onto N cores (experimental!)', default=1}, - {arg='verbose', type='number', - help='verbose level during training [0-2]', default=0} - ) + 'BatchOptimization', nil, + {arg='module', type='nn.Module', help='a module to train', req=true}, + {arg='criterion', type='nn.Criterion', + help='a criterion to estimate the error', req=true}, + {arg='parallelize', type='number', + help='parallelize onto N cores (experimental!)', default=1}, + {arg='verbose', type='number', + help='verbose level during training [0-2]', default=0} + ) self.parameters = nnx.flattenParameters(nnx.getParameters(self.module)) self.gradParameters = nnx.flattenParameters(nnx.getGradParameters(self.module)) self.evalCounter = 0 @@ -45,7 +45,7 @@ function Batch:forward_sequential(inputs, targets, options) = function() -- verbose if self.verbose >= 2 then - print('<BatchOptimization> evaluating f(X) + df/dX') + print('<BatchOptimization> evaluating f(X) + df/dX') end local _t_ = sys.clock() -- reset gradients @@ -75,7 +75,7 @@ function Batch:forward_sequential(inputs, targets, options) -- normalize gradients self.gradParameters:div(#inputs) -- verbose - if self.verbose >= 2 then + if self.verbose >= 2 then print('<BatchOptimization> ' .. self.evalCounter .. 'th evaluation took ' .. (sys.clock() - _t_) .. ' sec') end -- return average f(X) @@ -160,17 +160,17 @@ function Batch:forward_mapreduce(inputs, targets, options) self.evaluate = function() -- verbose - if self.verbose >= 2 then - print('<BatchOptimization> evaluating f(X) + df/dX') + if self.verbose >= 2 then + print('<BatchOptimization> evaluating f(X) + df/dX') end local _t_ = sys.clock() -- do map/reduce - self.evaluate_map() - self.evaluate_reduce() - -- update evaluation counter + self.evaluate_map() + self.evaluate_reduce() + -- update evaluation counter self.evalCounter = self.evalCounter + 1 -- verbose - if self.verbose >= 2 then + if self.verbose >= 2 then print('<BatchOptimization> ' .. self.evalCounter .. 'th evaluation took ' .. (sys.clock() - _t_) .. ' sec') end return self.output @@ -180,12 +180,12 @@ function Batch:forward_mapreduce(inputs, targets, options) -- in separate threads self.evaluate_map = function() - -- transmit new parameters to all workers + -- transmit new parameters to all workers parallel.children:send(self.parameters) - -- then wait for all workers to return their partial gradParameters + outputs + -- then wait for all workers to return their partial gradParameters + outputs gradParametersPartial = parallel.children:receive() - outputsPartial = parallel.children:receive() - -- force cleanup + outputsPartial = parallel.children:receive() + -- force cleanup collectgarbage() end @@ -233,7 +233,7 @@ function Batch:setup_mapreduce () -- (1) define code for workers local worker_code = [[ - -- require packages + -- require packages require 'nnx' -- retrieve module + criterion at startup @@ -260,11 +260,11 @@ function Batch:setup_mapreduce () if type(inputs) == 'string' and inputs == 'break' then break end targets = parallel.parent:receive() options = parallel.parent:receive() - -- inner loop: evaluations + -- inner loop: evaluations while true do - -- receive new set of parameters + -- receive new set of parameters newParameters = parallel.parent:receive() - + if type(newParameters) == 'string' and newParameters == 'break' then break end parameters:copy(newParameters) @@ -274,7 +274,7 @@ function Batch:setup_mapreduce () local f_x = 0 -- evaluate gradients on inputs for this thread for i = 1,#inputs do - -- user hook + -- user hook if prehook then prehook(optimizer, {inputs[i], targets[i], options[i]}) end |