Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/clementfarabet/lua---nnx.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorClement Farabet <clement.farabet@gmail.com>2013-06-09 02:41:42 +0400
committerClement Farabet <clement.farabet@gmail.com>2013-06-09 02:41:42 +0400
commitdf34525b4f7bfe9763fe3142cda3325b06b0491d (patch)
tree974f7b7825c82b31d41d5796f40431bd5aa26600 /Probe.lua
parent1d5b44e58bd0b49513c234cc058b55a6b4d7717e (diff)
better probe
Diffstat (limited to 'Probe.lua')
-rw-r--r--Probe.lua64
1 files changed, 38 insertions, 26 deletions
diff --git a/Probe.lua b/Probe.lua
index ea8527c..af6f292 100644
--- a/Probe.lua
+++ b/Probe.lua
@@ -1,44 +1,56 @@
local Probe, parent = torch.class('nn.Probe', 'nn.Module')
-function Probe:__init(...)
+function Probe:__init(name,display)
parent.__init(self)
- xlua.unpack_class(self, {...}, 'nn.Probe',
- 'print/display input/gradients of a network',
- {arg='name', type='string', help='unique name to identify probe', req=true},
- {arg='print', type='boolean', help='print full tensor', default=false},
- {arg='display', type='boolean', help='display tensor', default=false},
- {arg='size', type='boolean', help='print tensor size', default=false},
- {arg='backw', type='boolean', help='activates probe for backward()', default=false})
+ self.name = name or 'unnamed'
+ self.display = display
+ nn._ProbeTimer = nn._ProbeTimer or torch.Timer()
end
function Probe:updateOutput(input)
self.output = input
- if self.size or self.content then
- print('')
- print('<probe::' .. self.name .. '> updateOutput()')
- if self.content then print(input)
- elseif self.size then print(#input)
- end
+ local legend = '<' .. self.name .. '>.output'
+ local size = {}
+ for i = 1,input:dim() do
+ size[i] = input:size(i)
end
+ size = table.concat(size,'x')
+ local diff = nn._ProbeTimer:time().real - (nn._ProbeLast or 0)
+ nn._ProbeLast = nn._ProbeTimer:time().real
+ print('')
+ print(legend)
+ print(' + size = ' .. size)
+ print(' + mean = ' .. input:mean())
+ print(' + std = ' .. input:std())
+ print(' + min = ' .. input:min())
+ print(' + max = ' .. input:max())
+ print(' + time since last probe = ' .. string.format('%0.1f',diff*1000) .. 'ms')
if self.display then
- self.winf = image.display{image=input, win=self.winf}
+ self.winf = image.display{image=input, win=self.winf, legend=legend}
end
return self.output
end
function Probe:updateGradInput(input, gradOutput)
self.gradInput = gradOutput
- if self.backw then
- if self.size or self.content then
- print('')
- print('<probe::' .. self.name .. '> updateGradInput()')
- if self.content then print(gradOutput)
- elseif self.size then print(#gradOutput)
- end
- end
- if self.display then
- self.winb = image.display{image=gradOutput, win=self.winb}
- end
+ local legend = 'layer<' .. self.name .. '>.gradInput'
+ local size = {}
+ for i = 1,gradOutput:dim() do
+ size[i] = gradOutput:size(i)
+ end
+ size = table.concat(size,'x')
+ local diff = nn._ProbeTimer:time().real - (nn._ProbeLast or 0)
+ nn._ProbeLast = nn._ProbeTimer:time().real
+ print('')
+ print(legend)
+ print(' + size = ' .. size)
+ print(' + mean = ' .. gradOutput:mean())
+ print(' + std = ' .. gradOutput:std())
+ print(' + min = ' .. gradOutput:min())
+ print(' + max = ' .. gradOutput:max())
+ print(' + time since last probe = ' .. string.format('%0.1f',diff*1000) .. 'ms')
+ if self.display then
+ self.winb = image.display{image=gradOutput, win=self.winb, legend=legend}
end
return self.gradInput
end