Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/soumith/cudnn.torch.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBoris Fomitchev <bfomitchev@nvidia.com>2017-02-23 01:33:04 +0300
committerBoris Fomitchev <bfomitchev@nvidia.com>2017-02-23 01:33:04 +0300
commitf2a1e328cf18290df9ca4ea9609d843443f5e571 (patch)
tree7adebe3328b43bd0539feb329547fe1e6a10445f
parentf4ec352c55a29be06b14f01ca77087d0f9b0e1a4 (diff)
parent19bee86405a4c46458ccbf2d5b005be4e8a2672e (diff)
Fixed renaming issue
-rw-r--r--CMakeLists.txt1
-rw-r--r--README.md2
-rw-r--r--init.lua2
3 files changed, 3 insertions, 2 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index a7abd1a..efc52fc 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -19,6 +19,7 @@ IF(NOT CUDNN_FOUND)
FIND_PACKAGE(CUDNN 6 EXACT REQUIRED)
ENDIF()
+
FILE(GLOB luasrc *.lua)
SET(src "")
ADD_TORCH_PACKAGE(cudnn "${src}" "${luasrc}" "NVIDIA CuDNN Bindings")
diff --git a/README.md b/README.md
index 1271bbe..c356988 100644
--- a/README.md
+++ b/README.md
@@ -91,7 +91,7 @@ If you don't want to convert all modules you can pass a function as the third ar
It will be called at each step, with a module that is currently converted. It is meant to exclude
modules i.e. if it returns `true`, they will be left untouched, otherwise they will be subject to conversion.
-`Note that you cannot do backward pass when using cuDNN and when your model has batch normaliation layers and is in evaluate mode.`
+`Note that you cannot do backward pass when using cuDNN and when your model has batch normalization layers and is in evaluate mode.`
```lua
net = nn.Sequential()
diff --git a/init.lua b/init.lua
index 35575f2..13a77fd 100644
--- a/init.lua
+++ b/init.lua
@@ -219,7 +219,7 @@ function cudnn.setConvolutionDescriptor(data, desc)
-- make sure we have references to these tensors so gc doesn't clean them up
local padATensor = torch.IntTensor(data.padA)
local filterStrideATensor = torch.IntTensor(data.filterStrideA)
- local upscaleATensor = torch.IntTensor(data.upscaleA)
+ local upscaleATensor = torch.IntTensor(data.dilationA)
errcheck('cudnnSetConvolutionNdDescriptor', myDesc[0],
data.arrayLength,
padATensor:data(),