Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/clementfarabet/lua---nnx.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarco Scoffier <github@metm.org>2011-09-21 23:29:21 +0400
committerMarco Scoffier <github@metm.org>2011-09-21 23:29:21 +0400
commit5f2fc83d9b56aa935edbed37427553e671310f91 (patch)
treebb7d8e5e2154fa22e67a38513495920acdf03aea
parentf27c87d31fff038e5555063a31c0f0cc8a343560 (diff)
fixing counters
-rw-r--r--BatchOptimization.lua8
-rw-r--r--lbfgs.c2
2 files changed, 8 insertions, 2 deletions
diff --git a/BatchOptimization.lua b/BatchOptimization.lua
index a82c984..c4dfec6 100644
--- a/BatchOptimization.lua
+++ b/BatchOptimization.lua
@@ -38,6 +38,7 @@ function Batch:__init(...)
self.gradParameters = nnx.flattenParameters(nnx.getGradParameters(self.module))
end
self.evalCounter = 0
+ self.batchCounter = 0
self.sampleCounter = 0
if self.parallelize > 1 then
self:setup_mapreduce()
@@ -94,9 +95,12 @@ function Batch:forward_sequential(inputs, targets, options)
if self.posthook then
self.posthook(self, {inputs[i], targets[i], options[i]})
end
- end
-- update evaluation counter
self.evalCounter = self.evalCounter + 1
+ end
+
+ -- update evaluation counter
+ self.batchCounter = self.batchCounter + 1
-- normalize gradients
if torch.getdefaulttensortype() == 'torch.CudaTensor' then
@@ -109,7 +113,7 @@ function Batch:forward_sequential(inputs, targets, options)
-- verbose
if self.verbose >= 2 then
- print('<BatchOptimization> ' .. self.evalCounter .. 'th evaluation took ' .. (sys.clock() - _t_) .. ' sec')
+ print('<BatchOptimization> ' .. self.batchCounter .. 'th batch took ' .. (sys.clock() - _t_) .. ' sec')
end
-- return average f(X)
self.output = self.output/#inputs
diff --git a/lbfgs.c b/lbfgs.c
index a6ec73e..62f9ecc 100644
--- a/lbfgs.c
+++ b/lbfgs.c
@@ -1496,6 +1496,8 @@ int lbfgs_run(lua_State *L) {
if (!x) {
THError("lbfgs.init() should be called once before calling lbfgs.run()");
}
+ // reset our counter
+ nEvaluation = 0;
// Start the L-BFGS optimization; this will invoke the callback functions
// evaluate() and progress() when necessary.