diff options
author | Boris Fomitchev <bfomitchev@nvidia.com> | 2017-02-23 01:33:04 +0300 |
---|---|---|
committer | Boris Fomitchev <bfomitchev@nvidia.com> | 2017-02-23 01:33:04 +0300 |
commit | f2a1e328cf18290df9ca4ea9609d843443f5e571 (patch) | |
tree | 7adebe3328b43bd0539feb329547fe1e6a10445f | |
parent | f4ec352c55a29be06b14f01ca77087d0f9b0e1a4 (diff) | |
parent | 19bee86405a4c46458ccbf2d5b005be4e8a2672e (diff) |
Fixed renaming issue
-rw-r--r-- | CMakeLists.txt | 1 | ||||
-rw-r--r-- | README.md | 2 | ||||
-rw-r--r-- | init.lua | 2 |
3 files changed, 3 insertions, 2 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index a7abd1a..efc52fc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -19,6 +19,7 @@ IF(NOT CUDNN_FOUND) FIND_PACKAGE(CUDNN 6 EXACT REQUIRED) ENDIF() + FILE(GLOB luasrc *.lua) SET(src "") ADD_TORCH_PACKAGE(cudnn "${src}" "${luasrc}" "NVIDIA CuDNN Bindings") @@ -91,7 +91,7 @@ If you don't want to convert all modules you can pass a function as the third ar It will be called at each step, with a module that is currently converted. It is meant to exclude modules i.e. if it returns `true`, they will be left untouched, otherwise they will be subject to conversion. -`Note that you cannot do backward pass when using cuDNN and when your model has batch normaliation layers and is in evaluate mode.` +`Note that you cannot do backward pass when using cuDNN and when your model has batch normalization layers and is in evaluate mode.` ```lua net = nn.Sequential() @@ -219,7 +219,7 @@ function cudnn.setConvolutionDescriptor(data, desc) -- make sure we have references to these tensors so gc doesn't clean them up local padATensor = torch.IntTensor(data.padA) local filterStrideATensor = torch.IntTensor(data.filterStrideA) - local upscaleATensor = torch.IntTensor(data.upscaleA) + local upscaleATensor = torch.IntTensor(data.dilationA) errcheck('cudnnSetConvolutionNdDescriptor', myDesc[0], data.arrayLength, padATensor:data(), |