Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/soumith/cudnn.torch.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoan Puigcerver <joapuipe@gmail.com>2016-06-24 10:53:57 +0300
committerJoan Puigcerver <joapuipe@gmail.com>2016-06-24 10:53:57 +0300
commit27cc2ac4b28724ebd261f548836c7360c72a05a5 (patch)
treee9c7b95a5a6682c2b4a518578c6abb95028d59fb
parent8fe112b6ea816bde1b7030eacac2d27a8d890513 (diff)
Dropout enabled, but it is not deterministic for bidirectional rnn. See PR #198
-rw-r--r--RNN.lua10
-rw-r--r--cudnn-scm-1.rockspec4
2 files changed, 7 insertions, 7 deletions
diff --git a/RNN.lua b/RNN.lua
index daf3b19..f22c9ef 100644
--- a/RNN.lua
+++ b/RNN.lua
@@ -44,7 +44,7 @@ function RNN:reset(stdv)
errcheck('cudnnGetRNNParamsSize',
cudnn.getHandle(),
self.rnnDesc[0],
- self.xDescs[0],
+ self.xDescs[0],
weightSize:data(),
self.datatype)
weightSize[1] = (weightSize[1] + 3) / 4 -- sizeof(float)
@@ -117,7 +117,7 @@ end
function RNN:resetRNNDescriptor()
if not self.rnnDesc then
self.rnnDesc = self:createRNNDescriptors(1)
- end
+ end
errcheck('cudnnSetRNNDescriptor',
self.rnnDesc[0],
self.hiddenSize,
@@ -236,7 +236,7 @@ function RNN:updateOutput(input)
input = input:transpose(1, 2)
end
assert(input:dim() == 3, 'input must have 3 dimensions: seqLength, miniBatch, inputSize')
- assert(self.dropout == 0, 'dropout currently not supported')
+ -- assert(self.dropout == 0, 'dropout currently not supported')
-- Decide which descriptors/tensors need to be updated.
local resetRNN = not self.dropoutDesc or not self.rnnDesc
local resetIO = not self.xDescs or not self.yDescs
@@ -364,7 +364,7 @@ function RNN:updateGradInput(input, gradOutput)
gradOutput = gradOutput:transpose(1, 2)
self.output = self.output:transpose(1, 2)
end
- assert(self.dropout == 0, 'dropout currently not supported')
+ -- assert(self.dropout == 0, 'dropout currently not supported')
assert(input:dim() == 3, 'input should have 3 dimensions: seqLength, miniBatch, inputSize')
assert(input:size(1) == self.seqLength, 'input has incorrect sequence length!')
assert(input:size(2) == self.miniBatch, 'input has incorrect minibatch size!')
@@ -448,7 +448,7 @@ function RNN:accGradParameters(input, gradOutput, scale)
end
scale = scale or 1
if scale == 0 then return end
- assert(self.dropout == 0, 'dropout currently not supported')
+ -- assert(self.dropout == 0, 'dropout currently not supported')
assert(input:dim() == 3, 'input should have 3 dimensions: seqLength, miniBatch, inputSize')
assert(input:size(1) == self.seqLength, 'input has incorrect sequence length!')
assert(input:size(2) == self.miniBatch, 'input has incorrect minibatch size!')
diff --git a/cudnn-scm-1.rockspec b/cudnn-scm-1.rockspec
index bc36117..463eb20 100644
--- a/cudnn-scm-1.rockspec
+++ b/cudnn-scm-1.rockspec
@@ -2,13 +2,13 @@ package = "cudnn"
version = "scm-1"
source = {
- url = "git://github.com/soumith/cudnn.torch.git",
+ url = "git://github.com/jpuigcerver/cudnn.torch.git",
}
description = {
summary = "Torch7 FFI bindings for NVIDIA CuDNN kernels!",
detailed = [[
- All CuDNN modules exposed as nn.Module derivatives so
+ All CuDNN modules exposed as nn.Module derivatives so
that they can be used with torch's neural network package
]],
homepage = "https://github.com/soumith/cudnn.torch",