Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/soumith/cudnn.torch.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md3
-rw-r--r--ffi.lua23
-rw-r--r--test/test.lua3
3 files changed, 17 insertions, 12 deletions
diff --git a/README.md b/README.md
index 321f4e6..94278b5 100644
--- a/README.md
+++ b/README.md
@@ -70,7 +70,7 @@ R4 Release Notes:
- Rather than resolving v3-v4 diffs, I have imported new cudnn.h with its entirety and converted comments and defines. This should be less error-prone.
- addTensor_v2 uses changed to new AddTensor API.
-R4 TODO:
+R4 TODO:
per-activation BN code needs to be added (new .lua similar to SpatialBN.lua, as per Andrei:
I believe we have at least one thing missing - per-activation BN (Torch implementation in nn.BatchNormalization.lua).
What I believe we have now is an integration of implementation for nn.SpatialBatchNormalization.lua
@@ -79,4 +79,3 @@ This is very similar to SpatialBatchNormalizaiton.lua but should use a different
For Spatial BN normalization is performed over N with 1CHW result and for per-activation it's done over NHW with 1C11 result.
Per-activation BN is only used after non-convolutional layers where spatially-invariant behavior is not expected.
-
diff --git a/ffi.lua b/ffi.lua
index 61ac9ce..e2e5ab2 100644
--- a/ffi.lua
+++ b/ffi.lua
@@ -567,7 +567,7 @@ cudnnStatus_t cudnnConvolutionBackwardFilter_v3(
const void *beta,
const cudnnFilterDescriptor_t dwDesc,
void *dw );
-
+
/*********************************************************/
/* helper function to provide the convolution algo that fit best the requirement */
typedef enum
@@ -937,7 +937,7 @@ cudnnStatus_t cudnnCreateLRNDescriptor(
typedef enum { CUDNN_LRN_MIN_N = 1, /* minimum allowed lrnN */
CUDNN_LRN_MAX_N = 16 } /* maximum allowed lrnN */
LRN_MinMaxFakeEnum;
-
+
/* define CUDNN_LRN_MIN_K 1e-5 -- minimum allowed lrnK */
/* define CUDNN_LRN_MIN_BETA 0.01 -- minimum allowed lrnBeta */
@@ -1228,7 +1228,7 @@ cudnnStatus_t cudnnGetConvolutionNdDescriptor_v2(
int strideA[],
int upscaleA[],
cudnnConvolutionMode_t *mode );
-
+
cudnnStatus_t cudnnAddTensor_v2(
cudnnHandle_t handle,
cudnnAddMode_t mode,
@@ -1238,7 +1238,7 @@ cudnnStatus_t cudnnAddTensor_v2(
const void *beta,
cudnnTensorDescriptor_t yDesc,
void *y );
-
+
cudnnStatus_t cudnnConvolutionBackwardFilter_v2(
cudnnHandle_t handle,
const void *alpha,
@@ -1250,7 +1250,7 @@ cudnnStatus_t cudnnConvolutionBackwardFilter_v2(
const void *beta,
const cudnnFilterDescriptor_t dxDesc,
void *dx );
-
+
cudnnStatus_t cudnnConvolutionBackwardData_v2(
cudnnHandle_t handle,
const void *alpha,
@@ -1264,12 +1264,19 @@ cudnnStatus_t cudnnConvolutionBackwardData_v2(
void *dx );
]]
-local ok,err = pcall(function() cudnn.C = ffi.load('libcudnn') end)
+local libnames = {'libcudnn.so.4', 'libcudnn.4.dylib'}
+
+local ok = false
+for i=1,#libnames do
+ ok = pcall(function () cudnn.C = ffi.load(libnames[i]) end)
+ if ok then break; end
+end
+
if not ok then
print(err)
- error([['libcudnn not found in library path.
+ error([['libcudnn (R4) not found in library path.
Please install CuDNN from https://developer.nvidia.com/cuDNN
-Then make sure all the files named as libcudnn.so* are placed in your library load path (for example /usr/local/lib , or manually add a path to LD_LIBRARY_PATH)
+Then make sure files named as libcudnn.so.4 or libcudnn.4.dylib are placed in your library load path (for example /usr/local/lib , or manually add a path to LD_LIBRARY_PATH)
]])
end
diff --git a/test/test.lua b/test/test.lua
index 9b85499..9d13dfd 100644
--- a/test/test.lua
+++ b/test/test.lua
@@ -731,6 +731,7 @@ function cudnntest.LogSoftMax_batch()
end
function cudnntest.SpatialLogSoftMax()
+<<<<<<< HEAD
-- batch
local numLabels = math.random(5,10)
local h = math.random(5,10)
@@ -824,8 +825,6 @@ function cudnntest.SpatialCrossEntropyCriterion()
end
-
-
function cudnntest.functional_bias2D()
local bs = math.random(1,32)
local from = math.random(1,32)