Welcome to mirror list, hosted at ThFree Co, Russian Federation.

StochasticTrainer.lua - github.com/clementfarabet/lua---nnx.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: 526ec49411af33a7084ce015b9e27ab4452c6620 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
local StochasticTrainer, parent = torch.class('nn.StochasticTrainer','nn.Trainer')

function StochasticTrainer:__init(...)
   parent.__init(self)
   -- unpack args
   xlua.unpack_class(self, {...},
      'StochasticTrainer', 

      'A general-purpose stochastic trainer class.\n'
         .. 'Provides 4 user hooks to perform extra work after each sample, or each epoch:\n'
         .. '> trainer = nn.StochasticTrainer(...) \n'
         .. '> trainer.hookTrainSample = function(trainer, sample) ... end \n'
         .. '> trainer.hookTrainEpoch = function(trainer) ... end \n'
         .. '> trainer.hookTestSample = function(trainer, sample) ... end \n'
         .. '> trainer.hookTestEpoch = function(trainer) ... end \n'
         .. '> ',

      {arg='module', type='nn.Module', help='a module to train', req=true},
      {arg='criterion', type='nn.Module', help='a criterion to estimate the error'},
      {arg='preprocessor', type='nn.Module', help='a preprocessor to prime the data before the module'},

      {arg='learningRate', type='number', help='learning rate (W = W - rate*dE/dW)', default=1e-2},
      {arg='learningRateDecay', type='number', help='learning rate decay (rate = rate * (1-decay), at each epoch)', default=0},
      {arg='weightDecay', type='number', help='amount of weight decay (W = W - decay*W)', default=0},
      {arg='momentum', type='number', help='amount of momentum on weights (dE/W = dE/dW + momentum*prev(dE/dW))', default=0},
      {arg='maxIteration', type='number', help='maximum number of epochs', default=50},

      {arg='maxTarget', type='boolean', help='replaces an CxHxW target map by a HxN target of max values (for NLL criterions)', default=false},
      {arg='dispProgress', type='boolean', help='display a progress bar during training/testing', default=true},
      {arg='skipUniformTargets', type='boolean', help='skip uniform (flat) targets during training', default=false}
   )
   -- detect criterion type
   if torch.typename(self.criterion) == 'nn.ClassNLLCriterion' then
      self.maxTarget = true
   end
   -- private params
   self.errorArray = self.skipUniformTargets
   self.trainOffset = 0
   self.testOffset = 0
end

function StochasticTrainer:train(dataset)
   self.epoch = self.epoch or 1
   local currentLearningRate = self.learningRate
   local module = self.module
   local criterion = self.criterion
   self.trainset = dataset

   local shuffledIndices = {}
   if not self.shuffleIndices then
      for t = 1,dataset:size() do
         shuffledIndices[t] = t
      end
   else
      shuffledIndices = lab.randperm(dataset:size())
   end
   
   while true do
      print('<trainer> on training set:')
      print("<trainer> stochastic gradient descent epoch # " .. self.epoch)

      module:zeroGradParameters()

      self.currentError = 0
      for t = 1,dataset:size() do
         -- disp progress
         if self.dispProgress then
            xlua.dispProgress(t, dataset:size())
         end

         -- load new sample
         local sample = dataset[self.trainOffset + shuffledIndices[t]]
         local input = sample[1]
         local target = sample[2]
         local sample_x = sample.x
         local sample_y = sample.y

         -- get max of target ?
         if self.maxTarget then
            target = torch.Tensor(target:nElement()):copy(target)
            _,target = lab.max(target)
            target = target[1]
         end

         -- is target uniform ?
         local isUniform = false
         if self.errorArray and target:min() == target:max() then
            isUniform = true
         end

         -- perform SGD step
         if not (self.skipUniformTargets and isUniform) then
            -- optional preprocess
            if self.preprocessor then input = self.preprocessor:forward(input) end

            -- forward through model and criterion 
            -- (if no criterion, it is assumed to be contained in the model)
            local modelOut, error
            if criterion then
               modelOut = module:forward(input)
               error = criterion:forward(modelOut, target)
            else
               modelOut, error = module:forward(input, target, sample_x, sample_y)
            end

            -- accumulate error
            self.currentError = self.currentError + error

            -- backward through model
            -- (if no criterion, it is assumed that derror is internally generated)
            module:zeroGradParameters(self.momentum)
            if criterion then
               local derror = criterion:backward(module.output, target)
               module:backward(input, derror)
            else
               module:backward(input)
            end

            -- weight decay ?
            if self.weightDecay ~= 0 then
               module:decayParameters(self.weightDecay)
            end

            -- update parameters in the model
            module:updateParameters(currentLearningRate)
         end

         -- call user hook, if any
         if self.hookTrainSample then
            self.hookTrainSample(self, sample)
         end
      end

      self.currentError = self.currentError / dataset:size()
      print("<trainer> current error = " .. self.currentError)

      if self.hookTrainEpoch then
         self.hookTrainEpoch(self)
      end

      self.epoch = self.epoch + 1
      currentLearningRate = self.learningRate/(1+self.epoch*self.learningRateDecay)

      if self.maxEpoch > 0 and self.epoch > self.maxEpoch then
         print("<trainer> you have reached the maximum number of epochs")
         break
      end

      if dataset.infiniteSet then
         self.trainOffset = self.trainOffset + dataset:size()
      end
   end
end


function StochasticTrainer:test(dataset)
   print('<trainer> on testing Set:')

   local module = self.module
   local shuffledIndices = {}
   local criterion = self.criterion
   self.currentError = 0
   self.testset = dataset


   local shuffledIndices = {}
   if not self.shuffleIndices then
      for t = 1,dataset:size() do
         shuffledIndices[t] = t
      end
   else
      shuffledIndices = lab.randperm(dataset:size())
   end
   
   for t = 1,dataset:size() do
      -- disp progress
      if self.dispProgress then
         xlua.dispProgress(t, dataset:size())
      end

      -- get new sample
      local sample = dataset[self.testOffset + shuffledIndices[t]]
      local input = sample[1]
      local target = sample[2]

      -- max target ?
      if self.maxTarget then
         target = torch.Tensor(target:nElement()):copy(target)
         _,target = lab.max(target)
         target = target[1]
      end
      
      -- test sample through current model
      if self.preprocessor then input = self.preprocessor:forward(input) end
      if criterion then
         self.currentError = self.currentError + 
	    criterion:forward(module:forward(input), target)
      else
         local _,error = module:forward(input, target)
         self.currentError = self.currentError + error
      end

      -- user hook
      if self.hookTestSample then
         self.hookTestSample(self, sample)
      end
   end

   self.currentError = self.currentError / dataset:size()
   print("<trainer> test current error = " .. self.currentError)

   if self.hookTestEpoch then
      self.hookTestEpoch(self)
   end

   if dataset.infiniteSet then
      self.testOffset = self.testOffset + dataset:size()
   end

   return self.currentError
end

function StochasticTrainer:write(file)
   parent.write(self,file)
   file:writeObject(self.module)
   file:writeObject(self.criterion)
end

function StochasticTrainer:read(file)
   parent.read(self,file)
   self.module = file:readObject()
   self.criterion = file:readObject()
end