Welcome to mirror list, hosted at ThFree Co, Russian Federation.

LBFGSOptimization.lua - github.com/clementfarabet/lua---nnx.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: 95b4c90906848421a86e63c3315658fb73844e05 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
local LBFGS,parent = torch.class('nn.LBFGSOptimization', 'nn.Optimization')

function LBFGS:__init(...)
   require 'liblbfgs'
   parent.__init(self)
   xlua.unpack_class(self, {...},
      'LBFGSOptimization', nil,
      {arg='module', type='nn.Module', help='a module to train', req=true},
      {arg='criterion', type='nn.Criterion', help='a criterion to estimate the error', req=true},
      {arg='maxIterations', type='number', help='maximum nb of iterations per pass (0 = no max)', default=0},
      {arg='maxLineSearch', type='number', help='maximum nb of steps in line search', default=20},
      {arg='sparsity', type='number', help='sparsity coef (Orthantwise C)', default=0},
      {arg='parallelize', type='number', help='parallelize onto N cores (experimental!)', default=1},
      {arg='verbose', type='number', help='verbose level during training [0-2]', default=0}
   )
   self.parametersT = nnx.getParameters(self.module)
   self.gradParametersT = nnx.getGradParameters(self.module)
   lbfgs.verbose = self.verbose
   if opt.parallelize then
      if not xrequire 'thread' then
         xerror('please install thread package (luarocks install thread)',
                'LBFGSOptimization')
      end
   end
end

function LBFGS:forward(inputs, targets, options)
   options = options or {}
   if self.parallelize > 1 then
      return self:forward_mapreduce(inputs, targets, options)
   else
      return self:forward_sequential(inputs, targets, options)
   end
end

function LBFGS:forward_sequential(inputs, targets, options)
   -- (1) construct a closure that compute f(inputs) + df/dW
   --     after each call to that function:
   --       + self.parameters contains the current X vector
   --       + self.gradParameters contains the estimated dF/dX vector
   --       + self.output contains the estimated (average) F(X)
   lbfgs.evaluate
      = function()
           -- set parameters from current state
           self:unflatten(self.parametersT, self.gradParametersT)
           -- reset gradients
           self.module:zeroGradParameters()
           -- f is the average of all criterions
           self.output = 0
           -- given all inputs, evaluate gradients
           for i = 1,#inputs do
              -- user hook
              if self.prehook then
                 self.prehook(self, {inputs[i], targets[i], options[i]})
              end
              -- estimate f
              local output = self.module:forward(inputs[i])
              local err = self.criterion:forward(output, targets[i])
              self.output = self.output + err
              -- estimate df/dW
              local df_do = self.criterion:backward(output, targets[i])
              self.module:backward(inputs[i], df_do)
              -- user hook
              if self.posthook then
                 self.posthook(self, {inputs[i], targets[i], options[i]})
              end
           end
           -- update state from computed parameters
           self:flatten(self.parametersT, self.gradParametersT)
           -- normalize gradients
           self.gradParameters:div(#inputs)
           -- return average f(X)
           return self.output/#inputs
        end

   -- (2) store current parameters/gradParameters
   self:flatten(self.parametersT, self.gradParametersT)

   -- (3) the magic function: will update the parameter vector
   --     according to the l-BFGS method
   self.output = lbfgs.run(self.parameters, self.gradParameters,
                           self.maxIterations, self.maxLineSearch,
                           self.sparsity)

   -- (4) last: read parameters back into the model
   self:unflatten(self.parametersT, self.gradParametersT)

   -- (5) return current output after optimization
   return self.output
end

function LBFGS:forward_mapreduce(inputs, targets, options)
   -- (0) clone module+criterion for parallel evaluations
   local modules = {}
   local criterions = {}
   local outputs = {}
   self.parametersPT = {}
   self.gradParametersPT = {}
   for m = 1,self.parallelize do
      modules[m] = self.module:clone()
      criterions[m] = self.criterion:clone()
      self.parametersPT[m] = nnx.getParameters(modules[m])
      self.gradParametersPT[m] = nnx.getGradParameters(modules[m])
   end

   -- (1) construct a closure that compute f(inputs) + df/dW
   --     after each call to that function:
   --       + self.parameters contains the current X vector
   --       + self.gradParameters contains the estimated dF/dX vector
   --       + self.output contains the estimated (average) F(X)
   lbfgs.evaluate
      = function()
           local queue = thread.queue.newqueue()
           -- dispatch all threads
           for t = 1,self.parallelize do
              thread.newthread(lbfgs.evaluate_map, {t,queue})
           end
           -- wait for all threads
           for t = 1,self.parallelize do
              queue:remove()
           end
           -- and conclude
           return lbfgs.evaluate_reduce()
        end

   -- (1a) the map part of the evaluation: compute partial gradients
   --      in separate threads
   lbfgs.evaluate_map
      = function(thread, queue)
           -- set parameters of current state
           self:unflatten(self.parametersPT[thread], self.gradParametersPT[thread])
           -- reset gradients
           modules[thread]:zeroGradParameters()
           -- f is the average of all criterions
           outputs[thread] = 0
           -- evaluate gradients on inputs for this thread
           for i = thread,#inputs,#modules do
              -- estimate f
              local output = modules[thread]:forward(inputs[i])
              local err = criterions[thread]:forward(output, targets[i])
              outputs[thread] = outputs[thread] + err
              -- estimate df/dW
              local df_do = criterions[thread]:backward(output, targets[i])
              modules[thread]:backward(inputs[i], df_do)
           end
           -- sync master thread
           queue:insert(1)
        end

   -- (1b) the reduce part of the evaluation: accumulate all
   --      partial estimates of the gradients
   lbfgs.evaluate_reduce
      = function()
           -- temp vectors for accumulation
           self.gradParametersAcc = self.gradParametersAcc or torch.Tensor()
           self.gradParametersAcc:resizeAs(self.gradParameters):zero()
           -- update state from computed parameters
           for t = 1,self.parallelize do
              self:flatten(self.parametersPT[t], self.gradParametersPT[t])
              self.gradParametersAcc:add(self.gradParameters)
           end
           self.gradParameters:copy(self.gradParametersAcc)
           -- normalize gradients
           self.gradParameters:div(#inputs)
           -- return average f(X)
           self.output = 0
           for t = 1,self.parallelize do
              self.output = self.output + outputs[t]
           end
           return self.output/#inputs
        end

   -- (2) store current parameters/gradParameters
   self:flatten(self.parametersT, self.gradParametersT)

   -- (3) the magic function: will update the parameter vector
   --     according to the l-BFGS method
   self.output = lbfgs.run(self.parameters, self.gradParameters,
                           self.maxIterations, self.maxLineSearch,
                           self.sparsity)

   -- (4) last: read parameters back into the main (not parrallel) model
   self:unflatten(self.parametersT, self.gradParametersT)

   -- (5) return current output after optimization
   return self.output
end