Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/soumith/cudnn.torch.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSoumith Chintala <soumith@gmail.com>2016-07-05 13:52:49 +0300
committerGitHub <noreply@github.com>2016-07-05 13:52:49 +0300
commitebf64b49dc63d40cea6d842e16c25950775f7fa0 (patch)
tree457fd7c28170f4f5af51afb5fd94909111ba2ffb
parent66be8c95088608420c39bf9d367670eed3de5e4b (diff)
parent39a636428877a9f7e251299f424301e1535603a8 (diff)
Merge pull request #215 from jpuigcerver/PR
Dropout argument for recurrent modules & BGRU
-rw-r--r--BGRU.lua9
-rw-r--r--BLSTM.lua4
-rw-r--r--GRU.lua4
-rw-r--r--LSTM.lua4
-rw-r--r--RNN.lua10
-rw-r--r--RNNReLU.lua4
-rw-r--r--RNNTanh.lua4
-rw-r--r--cudnn-scm-1.rockspec2
8 files changed, 25 insertions, 16 deletions
diff --git a/BGRU.lua b/BGRU.lua
new file mode 100644
index 0000000..31d5d92
--- /dev/null
+++ b/BGRU.lua
@@ -0,0 +1,9 @@
+local BGRU, parent = torch.class('cudnn.BGRU', 'cudnn.RNN')
+
+function BGRU:__init(inputSize, hiddenSize, numLayers, batchFirst, dropout)
+ parent.__init(self, inputSize, hiddenSize, numLayers, batchFirst, dropout)
+ self.bidirectional = 'CUDNN_BIDIRECTIONAL'
+ self.mode = 'CUDNN_GRU'
+ self.numDirections = 2
+ self:reset()
+end
diff --git a/BLSTM.lua b/BLSTM.lua
index 8feebf1..d17dbca 100644
--- a/BLSTM.lua
+++ b/BLSTM.lua
@@ -1,7 +1,7 @@
local BLSTM, parent = torch.class('cudnn.BLSTM', 'cudnn.RNN')
-function BLSTM:__init(inputSize, hiddenSize, numLayers, batchFirst)
- parent.__init(self,inputSize, hiddenSize, numLayers, batchFirst)
+function BLSTM:__init(inputSize, hiddenSize, numLayers, batchFirst, dropout)
+ parent.__init(self, inputSize, hiddenSize, numLayers, batchFirst, dropout)
self.bidirectional = 'CUDNN_BIDIRECTIONAL'
self.mode = 'CUDNN_LSTM'
self.numDirections = 2
diff --git a/GRU.lua b/GRU.lua
index 615bdf7..c41fc5d 100644
--- a/GRU.lua
+++ b/GRU.lua
@@ -1,7 +1,7 @@
local GRU, parent = torch.class('cudnn.GRU', 'cudnn.RNN')
-function GRU:__init(inputSize, hiddenSize, numLayers, batchFirst)
- parent.__init(self,inputSize, hiddenSize, numLayers, batchFirst)
+function GRU:__init(inputSize, hiddenSize, numLayers, batchFirst, dropout)
+ parent.__init(self,inputSize, hiddenSize, numLayers, batchFirst, dropout)
self.mode = 'CUDNN_GRU'
self:reset()
end
diff --git a/LSTM.lua b/LSTM.lua
index 29c199c..d9f10a2 100644
--- a/LSTM.lua
+++ b/LSTM.lua
@@ -1,7 +1,7 @@
local LSTM, parent = torch.class('cudnn.LSTM', 'cudnn.RNN')
-function LSTM:__init(inputSize, hiddenSize, numLayers, batchFirst)
- parent.__init(self,inputSize, hiddenSize, numLayers, batchFirst)
+function LSTM:__init(inputSize, hiddenSize, numLayers, batchFirst, dropout)
+ parent.__init(self,inputSize, hiddenSize, numLayers, batchFirst, dropout)
self.mode = 'CUDNN_LSTM'
self:reset()
end
diff --git a/RNN.lua b/RNN.lua
index 17faa3a..4871676 100644
--- a/RNN.lua
+++ b/RNN.lua
@@ -4,7 +4,7 @@ local errcheck = cudnn.errcheck
local DESCS = {'rnnDesc', 'dropoutDesc', 'wDesc', 'xDescs', 'yDescs', 'hxDesc', 'hyDesc', 'cxDesc', 'cyDesc'}
-function RNN:__init(inputSize, hiddenSize, numLayers, batchFirst)
+function RNN:__init(inputSize, hiddenSize, numLayers, batchFirst, dropout)
parent.__init(self)
self.datatype = 'CUDNN_DATA_FLOAT'
@@ -17,7 +17,7 @@ function RNN:__init(inputSize, hiddenSize, numLayers, batchFirst)
self.numDirections = 1 -- set to 2 for bi-directional.
self.inputMode = 'CUDNN_LINEAR_INPUT'
self.mode = 'CUDNN_RNN_RELU'
- self.dropout = 0
+ self.dropout = dropout or 0
self.seed = 0x01234567
self.batchFirst = batchFirst or false -- Set to true for batch x time x inputdim.
@@ -46,7 +46,7 @@ function RNN:reset(stdv)
errcheck('cudnnGetRNNParamsSize',
cudnn.getHandle(),
self.rnnDesc[0],
- self.xDescs[0],
+ self.xDescs[0],
weightSize:data(),
self.datatype)
weightSize[1] = (weightSize[1] + 3) / 4 -- sizeof(float)
@@ -119,7 +119,7 @@ end
function RNN:resetRNNDescriptor()
if not self.rnnDesc then
self.rnnDesc = self:createRNNDescriptors(1)
- end
+ end
errcheck('cudnnSetRNNDescriptor',
self.rnnDesc[0],
self.hiddenSize,
@@ -279,7 +279,7 @@ function RNN:updateOutput(input)
local oSize = torch.LongStorage({self.seqLength, self.miniBatch, self.hiddenSize * self.numDirections})
local oStride = torch.LongStorage({self.miniBatch * self.hiddenSize * self.numDirections, self.hiddenSize * self.numDirections, 1})
self.output:resize(oSize, oStride)
- local y = self.output
+ local y = self.output
local w = self.weight
local hy = self:resizeHidden(self.hiddenOutput):zero()
local cy = self:resizeHidden(self.cellOutput):zero()
diff --git a/RNNReLU.lua b/RNNReLU.lua
index 3aa8ee9..fc262e2 100644
--- a/RNNReLU.lua
+++ b/RNNReLU.lua
@@ -1,7 +1,7 @@
local RNNReLU, parent = torch.class('cudnn.RNNReLU', 'cudnn.RNN')
-function RNNReLU:__init(inputSize, hiddenSize, numLayers, batchFirst)
- parent.__init(self,inputSize, hiddenSize, numLayers, batchFirst)
+function RNNReLU:__init(inputSize, hiddenSize, numLayers, batchFirst, dropout)
+ parent.__init(self,inputSize, hiddenSize, numLayers, batchFirst, dropout)
self.mode = 'CUDNN_RNN_RELU'
self:reset()
end
diff --git a/RNNTanh.lua b/RNNTanh.lua
index 98fa87c..3382a52 100644
--- a/RNNTanh.lua
+++ b/RNNTanh.lua
@@ -1,7 +1,7 @@
local RNNTanh, parent = torch.class('cudnn.RNNTanh', 'cudnn.RNN')
-function RNNTanh:__init(inputSize, hiddenSize, numLayers, batchFirst)
- parent.__init(self,inputSize, hiddenSize, numLayers, batchFirst)
+function RNNTanh:__init(inputSize, hiddenSize, numLayers, batchFirst, dropout)
+ parent.__init(self,inputSize, hiddenSize, numLayers, batchFirst, dropout)
self.mode = 'CUDNN_RNN_TANH'
self:reset()
end
diff --git a/cudnn-scm-1.rockspec b/cudnn-scm-1.rockspec
index bc36117..44c5f28 100644
--- a/cudnn-scm-1.rockspec
+++ b/cudnn-scm-1.rockspec
@@ -8,7 +8,7 @@ source = {
description = {
summary = "Torch7 FFI bindings for NVIDIA CuDNN kernels!",
detailed = [[
- All CuDNN modules exposed as nn.Module derivatives so
+ All CuDNN modules exposed as nn.Module derivatives so
that they can be used with torch's neural network package
]],
homepage = "https://github.com/soumith/cudnn.torch",