Welcome to mirror list, hosted at ThFree Co, Russian Federation.

DistNLLCriterion.lua - github.com/clementfarabet/lua---nnx.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: 22204fcfe249a8303ea4ff4a3a00d944cbc4c6c2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
local DistNLLCriterion, parent = torch.class('nn.DistNLLCriterion', 'nn.Criterion')

function DistNLLCriterion:__init()
   parent.__init(self)
   -- user options
   self.inputIsADistance = false
   self.inputIsProbability = false
   self.inputIsLogProbability = false
   self.targetIsProbability = false
   -- internal
   self.targetSoftMax = nn.SoftMax()
   self.inputLogSoftMax = nn.LogSoftMax()
   self.gradLogInput = torch.Tensor()
   self.input = torch.Tensor()
end

function DistNLLCriterion:normalize(input, target)
   -- normalize target
   if not self.targetIsProbability then
      self.probTarget = self.targetSoftMax:updateOutput(target)
   else
      self.probTarget = target
   end

   -- flip input if a distance
   if self.inputIsADistance then
      self.input:resizeAs(input):copy(input):mul(-1)
   else
      self.input = input
   end

   -- normalize input
   if not self.inputIsLogProbability and not self.inputIsProbability then
      self.logProbInput = self.inputLogSoftMax:updateOutput(self.input)
   elseif not self.inputIsLogProbability then
      print('TODO: implement nn.Log()')
   else
      self.logProbInput = self.input
   end
end

function DistNLLCriterion:denormalize()
   -- denormalize gradients
   if not self.inputIsLogProbability and not self.inputIsProbability then
      self.gradInput = self.inputLogSoftMax:updateGradInput(self.input, self.gradLogInput)
   elseif not self.inputIsLogProbability then
      print('TODO: implement nn.Log()')
   else
      self.gradInput = self.gradLogInput
   end

   -- if input is a distance, then flip gradients back
   if self.inputIsADistance then
      self.gradInput:mul(-1)
   end
end

function DistNLLCriterion:updateOutput(input, target)
   self:normalize(input, target)
   self.output = 0
   for i = 1,input:size(1) do
      self.output = self.output - self.logProbInput[i] * self.probTarget[i]
   end
   return self.output
end

function DistNLLCriterion:updateGradInput(input, target)
   self:normalize(input, target)
   self.gradLogInput:resizeAs(input)
   for i = 1,input:size(1) do
      self.gradLogInput[i] = -self.probTarget[i]
   end
   self:denormalize()
   return self.gradInput
end