From 4df3893abd1b9f840f1d9a8c1859799ccbf941de Mon Sep 17 00:00:00 2001 From: Ronan Collobert Date: Wed, 25 Jan 2012 14:55:20 +0100 Subject: initial revamp of torch7 tree --- SparseLinear.lua | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 SparseLinear.lua (limited to 'SparseLinear.lua') diff --git a/SparseLinear.lua b/SparseLinear.lua new file mode 100644 index 0000000..ec8845e --- /dev/null +++ b/SparseLinear.lua @@ -0,0 +1,42 @@ +local SparseLinear, parent = torch.class('nn.SparseLinear', 'nn.Module') + +function SparseLinear:__init(inputSize, outputSize) + parent.__init(self) + + self.weightDecay = 0 + self.weight = torch.Tensor(outputSize, inputSize) + self.bias = torch.Tensor(outputSize) + self.gradWeight = torch.Tensor(outputSize, inputSize) + self.gradBias = torch.Tensor(outputSize) + self.lastInput = torch.Tensor() + -- state + self.gradInput:resize(inputSize) + self.output:resize(outputSize) + + self:reset() +end + +function SparseLinear:reset(stdv) + if stdv then + stdv = stdv * math.sqrt(3) + else + stdv = 1./math.sqrt(self.weight:size(1)) + end + + -- we do this so the initialization is exactly + -- the same than in previous torch versions + for i=1,self.weight:size(1) do + self.weight:select(1, i):apply(function() + return torch.uniform(-stdv, stdv) + end) + self.bias[i] = torch.uniform(-stdv, stdv) * 0.000001 + end +end + +function SparseLinear:updateOutput(input) + return input.nn.SparseLinear_updateOutput(self, input) +end + +function SparseLinear:accGradParameters(input, gradOutput, scale) + return input.nn.SparseLinear_accGradParameters(self, input, gradOutput, scale) +end -- cgit v1.2.3