Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torch/cutorch.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdam Lerer <alerer@fb.com>2015-08-21 10:12:22 +0300
committerAdam Lerer <alerer@fb.com>2015-12-26 01:01:04 +0300
commitdf463695f0cd387736917e02abaafa63c00bbed3 (patch)
treeded8f3bd463e47a4a9c4b13f744e4010e6345703 /lib/THC/generic/THCTensor.c
parentd3c6fa5e2648f902f497fe81d0fa30c62552e4e9 (diff)
Add generic CudaTensor types to cutorch
Diffstat (limited to 'lib/THC/generic/THCTensor.c')
-rw-r--r--lib/THC/generic/THCTensor.c293
1 files changed, 147 insertions, 146 deletions
diff --git a/lib/THC/generic/THCTensor.c b/lib/THC/generic/THCTensor.c
index fa325e3..6645c0b 100644
--- a/lib/THC/generic/THCTensor.c
+++ b/lib/THC/generic/THCTensor.c
@@ -1,51 +1,50 @@
-#include "THCGeneral.h"
-#include "THCTensor.h"
-#include "THCTensorCopy.h"
-#include "THAtomic.h"
+#ifndef THC_GENERIC_FILE
+#define THC_GENERIC_FILE "generic/THCTensor.c"
+#else
/**** access methods ****/
-THCudaStorage *THCudaTensor_storage(THCState *state, const THCudaTensor *self)
+THCStorage *THCTensor_(storage)(THCState *state, const THCTensor *self)
{
return self->storage;
}
-long THCudaTensor_storageOffset(THCState *state, const THCudaTensor *self)
+long THCTensor_(storageOffset)(THCState *state, const THCTensor *self)
{
return self->storageOffset;
}
-int THCudaTensor_nDimension(THCState *state, const THCudaTensor *self)
+int THCTensor_(nDimension)(THCState *state, const THCTensor *self)
{
return self->nDimension;
}
-long THCudaTensor_size(THCState *state, const THCudaTensor *self, int dim)
+long THCTensor_(size)(THCState *state, const THCTensor *self, int dim)
{
THArgCheck((dim >= 0) && (dim < self->nDimension), 2, "out of range");
return self->size[dim];
}
-long THCudaTensor_stride(THCState *state, const THCudaTensor *self, int dim)
+long THCTensor_(stride)(THCState *state, const THCTensor *self, int dim)
{
THArgCheck((dim >= 0) && (dim < self->nDimension), 2, "out of range");
return self->stride[dim];
}
-THLongStorage *THCudaTensor_newSizeOf(THCState *state, THCudaTensor *self)
+THLongStorage *THCTensor_(newSizeOf)(THCState *state, THCTensor *self)
{
THLongStorage *size = THLongStorage_newWithSize(self->nDimension);
THLongStorage_rawCopy(size, self->size);
return size;
}
-THLongStorage *THCudaTensor_newStrideOf(THCState *state, THCudaTensor *self)
+THLongStorage *THCTensor_(newStrideOf)(THCState *state, THCTensor *self)
{
THLongStorage *stride = THLongStorage_newWithSize(self->nDimension);
THLongStorage_rawCopy(stride, self->stride);
return stride;
}
-float *THCudaTensor_data(THCState *state, const THCudaTensor *self)
+real *THCTensor_(data)(THCState *state, const THCTensor *self)
{
if(self->storage)
return (self->storage->data+self->storageOffset);
@@ -53,36 +52,36 @@ float *THCudaTensor_data(THCState *state, const THCudaTensor *self)
return NULL;
}
-void THCudaTensor_setFlag(THCState *state, THCudaTensor *self, const char flag)
+void THCTensor_(setFlag)(THCState *state, THCTensor *self, const char flag)
{
self->flag |= flag;
}
-void THCudaTensor_clearFlag(THCState *state, THCudaTensor *self, const char flag)
+void THCTensor_(clearFlag)(THCState *state, THCTensor *self, const char flag)
{
self->flag &= ~flag;
}
/**** creation methods ****/
-static void THCudaTensor_rawInit(THCState *state, THCudaTensor *self);
-static void THCudaTensor_rawSet(THCState *state, THCudaTensor *self, THCudaStorage *storage, long storageOffset, int nDimension, long *size, long *stride);
+static void THCTensor_(rawInit)(THCState *state, THCTensor *self);
+static void THCTensor_(rawSet)(THCState *state, THCTensor *self, THCStorage *storage, long storageOffset, int nDimension, long *size, long *stride);
/* Empty init */
-THCudaTensor *THCudaTensor_new(THCState *state)
+THCTensor *THCTensor_(new)(THCState *state)
{
- THCudaTensor *self = (THCudaTensor*)THAlloc(sizeof(THCudaTensor));
- THCudaTensor_rawInit(state, self);
+ THCTensor *self = (THCTensor*)THAlloc(sizeof(THCTensor));
+ THCTensor_(rawInit)(state, self);
return self;
}
/* Pointer-copy init */
-THCudaTensor *THCudaTensor_newWithTensor(THCState *state, THCudaTensor *tensor)
+THCTensor *THCTensor_(newWithTensor)(THCState *state, THCTensor *tensor)
{
- THCudaTensor *self = (THCudaTensor*)THAlloc(sizeof(THCudaTensor));
- THCudaTensor_rawInit(state, self);
- THCudaTensor_rawSet(state,
+ THCTensor *self = (THCTensor*)THAlloc(sizeof(THCTensor));
+ THCTensor_(rawInit)(state, self);
+ THCTensor_(rawSet)(state,
self,
tensor->storage,
tensor->storageOffset,
@@ -93,14 +92,14 @@ THCudaTensor *THCudaTensor_newWithTensor(THCState *state, THCudaTensor *tensor)
}
/* Storage init */
-THCudaTensor *THCudaTensor_newWithStorage(THCState *state, THCudaStorage *storage, long storageOffset, THLongStorage *size, THLongStorage *stride)
+THCTensor *THCTensor_(newWithStorage)(THCState *state, THCStorage *storage, long storageOffset, THLongStorage *size, THLongStorage *stride)
{
- THCudaTensor *self = (THCudaTensor*)THAlloc(sizeof(THCudaTensor));
+ THCTensor *self = (THCTensor*)THAlloc(sizeof(THCTensor));
if(size && stride)
THArgCheck(size->size == stride->size, 4, "inconsistent size");
- THCudaTensor_rawInit(state, self);
- THCudaTensor_rawSet(state,
+ THCTensor_(rawInit)(state, self);
+ THCTensor_(rawSet)(state,
self,
storage,
storageOffset,
@@ -110,28 +109,28 @@ THCudaTensor *THCudaTensor_newWithStorage(THCState *state, THCudaStorage *storag
return self;
}
-THCudaTensor *THCudaTensor_newWithStorage1d(THCState *state, THCudaStorage *storage, long storageOffset,
+THCTensor *THCTensor_(newWithStorage1d)(THCState *state, THCStorage *storage, long storageOffset,
long size0, long stride0)
{
- return THCudaTensor_newWithStorage4d(state, storage, storageOffset, size0, stride0, -1, -1, -1, -1, -1, -1);
+ return THCTensor_(newWithStorage4d)(state, storage, storageOffset, size0, stride0, -1, -1, -1, -1, -1, -1);
}
-THCudaTensor *THCudaTensor_newWithStorage2d(THCState *state, THCudaStorage *storage, long storageOffset,
+THCTensor *THCTensor_(newWithStorage2d)(THCState *state, THCStorage *storage, long storageOffset,
long size0, long stride0,
long size1, long stride1)
{
- return THCudaTensor_newWithStorage4d(state, storage, storageOffset, size0, stride0, size1, stride1, -1, -1, -1, -1);
+ return THCTensor_(newWithStorage4d)(state, storage, storageOffset, size0, stride0, size1, stride1, -1, -1, -1, -1);
}
-THCudaTensor *THCudaTensor_newWithStorage3d(THCState *state, THCudaStorage *storage, long storageOffset,
+THCTensor *THCTensor_(newWithStorage3d)(THCState *state, THCStorage *storage, long storageOffset,
long size0, long stride0,
long size1, long stride1,
long size2, long stride2)
{
- return THCudaTensor_newWithStorage4d(state, storage, storageOffset, size0, stride0, size1, stride1, size2, stride2, -1, -1);
+ return THCTensor_(newWithStorage4d)(state, storage, storageOffset, size0, stride0, size1, stride1, size2, stride2, -1, -1);
}
-THCudaTensor *THCudaTensor_newWithStorage4d(THCState *state, THCudaStorage *storage, long storageOffset,
+THCTensor *THCTensor_(newWithStorage4d)(THCState *state, THCStorage *storage, long storageOffset,
long size0, long stride0,
long size1, long stride1,
long size2, long stride2,
@@ -140,102 +139,102 @@ THCudaTensor *THCudaTensor_newWithStorage4d(THCState *state, THCudaStorage *stor
long size[4] = {size0, size1, size2, size3};
long stride[4] = {stride0, stride1, stride2, stride3};
- THCudaTensor *self = (THCudaTensor*)THAlloc(sizeof(THCudaTensor));
- THCudaTensor_rawInit(state, self);
- THCudaTensor_rawSet(state, self, storage, storageOffset, 4, size, stride);
+ THCTensor *self = (THCTensor*)THAlloc(sizeof(THCTensor));
+ THCTensor_(rawInit)(state, self);
+ THCTensor_(rawSet)(state, self, storage, storageOffset, 4, size, stride);
return self;
}
-THCudaTensor *THCudaTensor_newWithSize(THCState *state, THLongStorage *size, THLongStorage *stride)
+THCTensor *THCTensor_(newWithSize)(THCState *state, THLongStorage *size, THLongStorage *stride)
{
- return THCudaTensor_newWithStorage(state, NULL, 0, size, stride);
+ return THCTensor_(newWithStorage)(state, NULL, 0, size, stride);
}
-THCudaTensor *THCudaTensor_newWithSize1d(THCState *state, long size0)
+THCTensor *THCTensor_(newWithSize1d)(THCState *state, long size0)
{
- return THCudaTensor_newWithSize4d(state, size0, -1, -1, -1);
+ return THCTensor_(newWithSize4d)(state, size0, -1, -1, -1);
}
-THCudaTensor *THCudaTensor_newWithSize2d(THCState *state, long size0, long size1)
+THCTensor *THCTensor_(newWithSize2d)(THCState *state, long size0, long size1)
{
- return THCudaTensor_newWithSize4d(state, size0, size1, -1, -1);
+ return THCTensor_(newWithSize4d)(state, size0, size1, -1, -1);
}
-THCudaTensor *THCudaTensor_newWithSize3d(THCState *state, long size0, long size1, long size2)
+THCTensor *THCTensor_(newWithSize3d)(THCState *state, long size0, long size1, long size2)
{
- return THCudaTensor_newWithSize4d(state, size0, size1, size2, -1);
+ return THCTensor_(newWithSize4d)(state, size0, size1, size2, -1);
}
-THCudaTensor *THCudaTensor_newWithSize4d(THCState *state, long size0, long size1, long size2, long size3)
+THCTensor *THCTensor_(newWithSize4d)(THCState *state, long size0, long size1, long size2, long size3)
{
long size[4] = {size0, size1, size2, size3};
- THCudaTensor *self = (THCudaTensor*)THAlloc(sizeof(THCudaTensor));
- THCudaTensor_rawInit(state, self);
- THCudaTensor_rawResize(state, self, 4, size, NULL);
+ THCTensor *self = (THCTensor*)THAlloc(sizeof(THCTensor));
+ THCTensor_(rawInit)(state, self);
+ THCTensor_(rawResize)(state, self, 4, size, NULL);
return self;
}
-THCudaTensor *THCudaTensor_newClone(THCState *state, THCudaTensor *self)
+THCTensor *THCTensor_(newClone)(THCState *state, THCTensor *self)
{
- THCudaTensor *tensor = THCudaTensor_new(state);
- THCudaTensor_resizeAs(state, tensor, self);
- THCudaTensor_copy(state, tensor, self);
+ THCTensor *tensor = THCTensor_(new)(state);
+ THCTensor_(resizeAs)(state, tensor, self);
+ THCTensor_(copy)(state, tensor, self);
return tensor;
}
-THCudaTensor *THCudaTensor_newContiguous(THCState *state, THCudaTensor *self)
+THCTensor *THCTensor_(newContiguous)(THCState *state, THCTensor *self)
{
- if(!THCudaTensor_isContiguous(state, self))
- return THCudaTensor_newClone(state, self);
+ if(!THCTensor_(isContiguous)(state, self))
+ return THCTensor_(newClone)(state, self);
else
{
- THCudaTensor_retain(state, self);
+ THCTensor_(retain)(state, self);
return self;
}
}
-THCudaTensor *THCudaTensor_newSelect(THCState *state, THCudaTensor *tensor, int dimension_, long sliceIndex_)
+THCTensor *THCTensor_(newSelect)(THCState *state, THCTensor *tensor, int dimension_, long sliceIndex_)
{
- THCudaTensor *self = THCudaTensor_newWithTensor(state, tensor);
- THCudaTensor_select(state, self, NULL, dimension_, sliceIndex_);
+ THCTensor *self = THCTensor_(newWithTensor)(state, tensor);
+ THCTensor_(select)(state, self, NULL, dimension_, sliceIndex_);
return self;
}
-THCudaTensor *THCudaTensor_newNarrow(THCState *state, THCudaTensor *tensor, int dimension_, long firstIndex_, long size_)
+THCTensor *THCTensor_(newNarrow)(THCState *state, THCTensor *tensor, int dimension_, long firstIndex_, long size_)
{
- THCudaTensor *self = THCudaTensor_newWithTensor(state, tensor);
- THCudaTensor_narrow(state, self, NULL, dimension_, firstIndex_, size_);
+ THCTensor *self = THCTensor_(newWithTensor)(state, tensor);
+ THCTensor_(narrow)(state, self, NULL, dimension_, firstIndex_, size_);
return self;
}
-THCudaTensor *THCudaTensor_newTranspose(THCState *state, THCudaTensor *tensor, int dimension1_, int dimension2_)
+THCTensor *THCTensor_(newTranspose)(THCState *state, THCTensor *tensor, int dimension1_, int dimension2_)
{
- THCudaTensor *self = THCudaTensor_newWithTensor(state, tensor);
- THCudaTensor_transpose(state, self, NULL, dimension1_, dimension2_);
+ THCTensor *self = THCTensor_(newWithTensor)(state, tensor);
+ THCTensor_(transpose)(state, self, NULL, dimension1_, dimension2_);
return self;
}
-THCudaTensor *THCudaTensor_newUnfold(THCState *state, THCudaTensor *tensor, int dimension_, long size_, long step_)
+THCTensor *THCTensor_(newUnfold)(THCState *state, THCTensor *tensor, int dimension_, long size_, long step_)
{
- THCudaTensor *self = THCudaTensor_newWithTensor(state, tensor);
- THCudaTensor_unfold(state, self, NULL, dimension_, size_, step_);
+ THCTensor *self = THCTensor_(newWithTensor)(state, tensor);
+ THCTensor_(unfold)(state, self, NULL, dimension_, size_, step_);
return self;
}
/* Resize */
-void THCudaTensor_resize(THCState *state, THCudaTensor *self, THLongStorage *size, THLongStorage *stride)
+void THCTensor_(resize)(THCState *state, THCTensor *self, THLongStorage *size, THLongStorage *stride)
{
THArgCheck(size != NULL, 2, "invalid size");
if(stride)
THArgCheck(stride->size == size->size, 3, "invalid stride");
- THCudaTensor_rawResize(state, self, size->size, size->data, (stride ? stride->data : NULL));
+ THCTensor_(rawResize)(state, self, size->size, size->data, (stride ? stride->data : NULL));
}
-void THCudaTensor_resizeAs(THCState *state, THCudaTensor *self, THCudaTensor *src)
+void THCTensor_(resizeAs)(THCState *state, THCTensor *self, THCTensor *src)
{
int isSame = 0;
int d;
@@ -253,42 +252,42 @@ void THCudaTensor_resizeAs(THCState *state, THCudaTensor *self, THCudaTensor *sr
}
if(!isSame)
- THCudaTensor_rawResize(state, self, src->nDimension, src->size, NULL);
+ THCTensor_(rawResize)(state, self, src->nDimension, src->size, NULL);
}
-void THCudaTensor_resize1d(THCState *state, THCudaTensor *tensor, long size0)
+void THCTensor_(resize1d)(THCState *state, THCTensor *tensor, long size0)
{
- THCudaTensor_resize4d(state, tensor, size0, -1, -1, -1);
+ THCTensor_(resize4d)(state, tensor, size0, -1, -1, -1);
}
-void THCudaTensor_resize2d(THCState *state, THCudaTensor *tensor, long size0, long size1)
+void THCTensor_(resize2d)(THCState *state, THCTensor *tensor, long size0, long size1)
{
- THCudaTensor_resize4d(state, tensor, size0, size1, -1, -1);
+ THCTensor_(resize4d)(state, tensor, size0, size1, -1, -1);
}
-void THCudaTensor_resize3d(THCState *state, THCudaTensor *tensor, long size0, long size1, long size2)
+void THCTensor_(resize3d)(THCState *state, THCTensor *tensor, long size0, long size1, long size2)
{
- THCudaTensor_resize4d(state, tensor, size0, size1, size2, -1);
+ THCTensor_(resize4d)(state, tensor, size0, size1, size2, -1);
}
-void THCudaTensor_resize4d(THCState *state, THCudaTensor *self, long size0, long size1, long size2, long size3)
+void THCTensor_(resize4d)(THCState *state, THCTensor *self, long size0, long size1, long size2, long size3)
{
long size[4] = {size0, size1, size2, size3};
- THCudaTensor_rawResize(state, self, 4, size, NULL);
+ THCTensor_(rawResize)(state, self, 4, size, NULL);
}
-void THCudaTensor_resize5d(THCState *state, THCudaTensor *self, long size0, long size1, long size2, long size3, long size4)
+void THCTensor_(resize5d)(THCState *state, THCTensor *self, long size0, long size1, long size2, long size3, long size4)
{
long size[5] = {size0, size1, size2, size3, size4};
- THCudaTensor_rawResize(state, self, 5, size, NULL);
+ THCTensor_(rawResize)(state, self, 5, size, NULL);
}
-void THCudaTensor_set(THCState *state, THCudaTensor *self, THCudaTensor *src)
+void THCTensor_(set)(THCState *state, THCTensor *self, THCTensor *src)
{
if(self != src)
- THCudaTensor_rawSet(state,
+ THCTensor_(rawSet)(state,
self,
src->storage,
src->storageOffset,
@@ -297,12 +296,12 @@ void THCudaTensor_set(THCState *state, THCudaTensor *self, THCudaTensor *src)
src->stride);
}
-void THCudaTensor_setStorage(THCState *state, THCudaTensor *self, THCudaStorage *storage_, long storageOffset_, THLongStorage *size_, THLongStorage *stride_)
+void THCTensor_(setStorage)(THCState *state, THCTensor *self, THCStorage *storage_, long storageOffset_, THLongStorage *size_, THLongStorage *stride_)
{
if(size_ && stride_)
THArgCheck(size_->size == stride_->size, 5, "inconsistent size/stride sizes");
- THCudaTensor_rawSet(state,
+ THCTensor_(rawSet)(state,
self,
storage_,
storageOffset_,
@@ -311,40 +310,40 @@ void THCudaTensor_setStorage(THCState *state, THCudaTensor *self, THCudaStorage
(stride_ ? stride_->data : NULL));
}
-void THCudaTensor_setStorage1d(THCState *state, THCudaTensor *self, THCudaStorage *storage_, long storageOffset_,
+void THCTensor_(setStorage1d)(THCState *state, THCTensor *self, THCStorage *storage_, long storageOffset_,
long size0_, long stride0_)
{
- THCudaTensor_setStorage4d(state, self, storage_, storageOffset_,
+ THCTensor_(setStorage4d)(state, self, storage_, storageOffset_,
size0_, stride0_,
-1, -1,
-1, -1,
-1, -1);
}
-void THCudaTensor_setStorage2d(THCState *state, THCudaTensor *self, THCudaStorage *storage_, long storageOffset_,
+void THCTensor_(setStorage2d)(THCState *state, THCTensor *self, THCStorage *storage_, long storageOffset_,
long size0_, long stride0_,
long size1_, long stride1_)
{
- THCudaTensor_setStorage4d(state, self, storage_, storageOffset_,
+ THCTensor_(setStorage4d)(state, self, storage_, storageOffset_,
size0_, stride0_,
size1_, stride1_,
-1, -1,
-1, -1);
}
-void THCudaTensor_setStorage3d(THCState *state, THCudaTensor *self, THCudaStorage *storage_, long storageOffset_,
+void THCTensor_(setStorage3d)(THCState *state, THCTensor *self, THCStorage *storage_, long storageOffset_,
long size0_, long stride0_,
long size1_, long stride1_,
long size2_, long stride2_)
{
- THCudaTensor_setStorage4d(state, self, storage_, storageOffset_,
+ THCTensor_(setStorage4d)(state, self, storage_, storageOffset_,
size0_, stride0_,
size1_, stride1_,
size2_, stride2_,
-1, -1);
}
-void THCudaTensor_setStorage4d(THCState *state, THCudaTensor *self, THCudaStorage *storage_, long storageOffset_,
+void THCTensor_(setStorage4d)(THCState *state, THCTensor *self, THCStorage *storage_, long storageOffset_,
long size0_, long stride0_,
long size1_, long stride1_,
long size2_, long stride2_,
@@ -354,11 +353,11 @@ void THCudaTensor_setStorage4d(THCState *state, THCudaTensor *self, THCudaStorag
long size[4] = {size0_, size1_, size2_, size3_};
long stride[4] = {stride0_, stride1_, stride2_, stride3_};
- THCudaTensor_rawSet(state, self, storage_, storageOffset_, 4, size, stride);
+ THCTensor_(rawSet)(state, self, storage_, storageOffset_, 4, size, stride);
}
-void THCudaTensor_narrow(THCState *state, THCudaTensor *self, THCudaTensor *src, int dimension, long firstIndex, long size)
+void THCTensor_(narrow)(THCState *state, THCTensor *self, THCTensor *src, int dimension, long firstIndex, long size)
{
if(!src)
src = self;
@@ -367,7 +366,7 @@ void THCudaTensor_narrow(THCState *state, THCudaTensor *self, THCudaTensor *src,
THArgCheck( (firstIndex >= 0) && (firstIndex < src->size[dimension]), 4, "out of range");
THArgCheck( (size > 0) && (firstIndex+size <= src->size[dimension]), 5, "out of range");
- THCudaTensor_set(state, self, src);
+ THCTensor_(set)(state, self, src);
if(firstIndex > 0)
self->storageOffset += firstIndex*self->stride[dimension];
@@ -375,7 +374,7 @@ void THCudaTensor_narrow(THCState *state, THCudaTensor *self, THCudaTensor *src,
self->size[dimension] = size;
}
-void THCudaTensor_select(THCState *state, THCudaTensor *self, THCudaTensor *src, int dimension, long sliceIndex)
+void THCTensor_(select)(THCState *state, THCTensor *self, THCTensor *src, int dimension, long sliceIndex)
{
int d;
@@ -386,8 +385,8 @@ void THCudaTensor_select(THCState *state, THCudaTensor *self, THCudaTensor *src,
THArgCheck((dimension >= 0) && (dimension < src->nDimension), 3, "out of range");
THArgCheck((sliceIndex >= 0) && (sliceIndex < src->size[dimension]), 4, "out of range");
- THCudaTensor_set(state, self, src);
- THCudaTensor_narrow(state, self, NULL, dimension, sliceIndex, 1);
+ THCTensor_(set)(state, self, src);
+ THCTensor_(narrow)(state, self, NULL, dimension, sliceIndex, 1);
for(d = dimension; d < self->nDimension-1; d++)
{
self->size[d] = self->size[d+1];
@@ -396,7 +395,7 @@ void THCudaTensor_select(THCState *state, THCudaTensor *self, THCudaTensor *src,
self->nDimension--;
}
-void THCudaTensor_transpose(THCState *state, THCudaTensor *self, THCudaTensor *src, int dimension1, int dimension2)
+void THCTensor_(transpose)(THCState *state, THCTensor *self, THCTensor *src, int dimension1, int dimension2)
{
long z;
@@ -406,7 +405,7 @@ void THCudaTensor_transpose(THCState *state, THCudaTensor *self, THCudaTensor *s
THArgCheck( (dimension1 >= 0) && (dimension1 < src->nDimension), 1, "out of range");
THArgCheck( (dimension2 >= 0) && (dimension2 < src->nDimension), 2, "out of range");
- THCudaTensor_set(state, self, src);
+ THCTensor_(set)(state, self, src);
if(dimension1 == dimension2)
return;
@@ -419,7 +418,7 @@ void THCudaTensor_transpose(THCState *state, THCudaTensor *self, THCudaTensor *s
self->size[dimension2] = z;
}
-void THCudaTensor_unfold(THCState *state, THCudaTensor *self, THCudaTensor *src, int dimension, long size, long step)
+void THCTensor_(unfold)(THCState *state, THCTensor *self, THCTensor *src, int dimension, long size, long step)
{
long *newSize;
long *newStride;
@@ -433,7 +432,7 @@ void THCudaTensor_unfold(THCState *state, THCudaTensor *self, THCudaTensor *src,
THArgCheck(size <= src->size[dimension], 3, "out of range");
THArgCheck(step > 0, 4, "invalid step");
- THCudaTensor_set(state, self, src);
+ THCTensor_(set)(state, self, src);
newSize = (long*)THAlloc(sizeof(long)*(self->nDimension+1));
newStride = (long*)THAlloc(sizeof(long)*(self->nDimension+1));
@@ -463,7 +462,7 @@ void THCudaTensor_unfold(THCState *state, THCudaTensor *self, THCudaTensor *src,
}
/* we have to handle the case where the result is a number */
-void THCudaTensor_squeeze(THCState *state, THCudaTensor *self, THCudaTensor *src)
+void THCTensor_(squeeze)(THCState *state, THCTensor *self, THCTensor *src)
{
int ndim = 0;
int d;
@@ -471,7 +470,7 @@ void THCudaTensor_squeeze(THCState *state, THCudaTensor *self, THCudaTensor *src
if(!src)
src = self;
- THCudaTensor_set(state, self, src);
+ THCTensor_(set)(state, self, src);
for(d = 0; d < src->nDimension; d++)
{
@@ -496,7 +495,7 @@ void THCudaTensor_squeeze(THCState *state, THCudaTensor *self, THCudaTensor *src
self->nDimension = ndim;
}
-void THCudaTensor_squeeze1d(THCState *state, THCudaTensor *self, THCudaTensor *src, int dimension)
+void THCTensor_(squeeze1d)(THCState *state, THCTensor *self, THCTensor *src, int dimension)
{
int d;
@@ -505,7 +504,7 @@ void THCudaTensor_squeeze1d(THCState *state, THCudaTensor *self, THCudaTensor *s
THArgCheck(dimension < src->nDimension, 3, "dimension out of range");
- THCudaTensor_set(state, self, src);
+ THCTensor_(set)(state, self, src);
if(src->size[dimension] == 1 && src->nDimension > 1)
{
@@ -518,7 +517,7 @@ void THCudaTensor_squeeze1d(THCState *state, THCudaTensor *self, THCudaTensor *s
}
}
-int THCudaTensor_isContiguous(THCState *state, const THCudaTensor *self)
+int THCTensor_(isContiguous)(THCState *state, const THCTensor *self)
{
long z = 1;
int d;
@@ -535,7 +534,7 @@ int THCudaTensor_isContiguous(THCState *state, const THCudaTensor *self)
return 1;
}
-int THCudaTensor_isSize(THCState *state, const THCudaTensor *self, const THLongStorage *dims)
+int THCTensor_(isSize)(THCState *state, const THCTensor *self, const THLongStorage *dims)
{
int d;
if (self->nDimension != dims->size)
@@ -549,7 +548,7 @@ int THCudaTensor_isSize(THCState *state, const THCudaTensor *self, const THLongS
return 1;
}
-int THCudaTensor_isSetTo(THCState *state, const THCudaTensor *self, const THCudaTensor *src)
+int THCTensor_(isSetTo)(THCState *state, const THCTensor *self, const THCTensor *src)
{
if (self->storage == src->storage &&
self->storageOffset == src->storageOffset &&
@@ -566,7 +565,7 @@ int THCudaTensor_isSetTo(THCState *state, const THCudaTensor *self, const THCuda
return 0;
}
-int THCudaTensor_isSameSizeAs(THCState *state, const THCudaTensor *self, const THCudaTensor* src)
+int THCTensor_(isSameSizeAs)(THCState *state, const THCTensor *self, const THCTensor* src)
{
int d;
if (self->nDimension != src->nDimension)
@@ -579,7 +578,7 @@ int THCudaTensor_isSameSizeAs(THCState *state, const THCudaTensor *self, const T
return 1;
}
-long THCudaTensor_nElement(THCState *state, const THCudaTensor *self)
+long THCTensor_(nElement)(THCState *state, const THCTensor *self)
{
if(self->nDimension == 0)
return 0;
@@ -593,13 +592,13 @@ long THCudaTensor_nElement(THCState *state, const THCudaTensor *self)
}
}
-void THCudaTensor_retain(THCState *state, THCudaTensor *self)
+void THCTensor_(retain)(THCState *state, THCTensor *self)
{
if(self->flag & TH_TENSOR_REFCOUNTED)
THAtomicIncrementRef(&self->refcount);
}
-void THCudaTensor_free(THCState *state, THCudaTensor *self)
+void THCTensor_(free)(THCState *state, THCTensor *self)
{
if(!self)
return;
@@ -611,23 +610,23 @@ void THCudaTensor_free(THCState *state, THCudaTensor *self)
THFree(self->size);
THFree(self->stride);
if(self->storage)
- THCudaStorage_free(state, self->storage);
+ THCStorage_(free)(state, self->storage);
THFree(self);
}
}
}
-void THCudaTensor_freeCopyTo(THCState *state, THCudaTensor *self, THCudaTensor *dst)
+void THCTensor_(freeCopyTo)(THCState *state, THCTensor *self, THCTensor *dst)
{
if(self != dst)
- THCudaTensor_copy(state, dst, self);
+ THCTensor_(copy)(state, dst, self);
- THCudaTensor_free(state, self);
+ THCTensor_(free)(state, self);
}
/*******************************************************************************/
-static void THCudaTensor_rawInit(THCState *state, THCudaTensor *self)
+static void THCTensor_(rawInit)(THCState *state, THCTensor *self)
{
self->refcount = 1;
self->storage = NULL;
@@ -638,18 +637,18 @@ static void THCudaTensor_rawInit(THCState *state, THCudaTensor *self)
self->flag = TH_TENSOR_REFCOUNTED;
}
-static void THCudaTensor_rawSet(THCState *state, THCudaTensor *self, THCudaStorage *storage, long storageOffset, int nDimension, long *size, long *stride)
+static void THCTensor_(rawSet)(THCState *state, THCTensor *self, THCStorage *storage, long storageOffset, int nDimension, long *size, long *stride)
{
/* storage */
if(self->storage != storage)
{
if(self->storage)
- THCudaStorage_free(state, self->storage);
+ THCStorage_(free)(state, self->storage);
if(storage)
{
self->storage = storage;
- THCudaStorage_retain(state, self->storage);
+ THCStorage_(retain)(state, self->storage);
}
else
self->storage = NULL;
@@ -661,10 +660,10 @@ static void THCudaTensor_rawSet(THCState *state, THCudaTensor *self, THCudaStora
self->storageOffset = storageOffset;
/* size and stride */
- THCudaTensor_rawResize(state, self, nDimension, size, stride);
+ THCTensor_(rawResize)(state, self, nDimension, size, stride);
}
-void THCudaTensor_rawResize(THCState *state, THCudaTensor *self, int nDimension, long *size, long *stride)
+void THCTensor_(rawResize)(THCState *state, THCTensor *self, int nDimension, long *size, long *stride)
{
int d;
int nDimension_;
@@ -722,72 +721,72 @@ void THCudaTensor_rawResize(THCState *state, THCudaTensor *self, int nDimension,
if(totalSize+self->storageOffset > 0)
{
if(!self->storage)
- self->storage = THCudaStorage_new(state);
+ self->storage = THCStorage_(new)(state);
if(totalSize+self->storageOffset > self->storage->size)
- THCudaStorage_resize(state, self->storage, totalSize+self->storageOffset);
+ THCStorage_(resize)(state, self->storage, totalSize+self->storageOffset);
}
}
else
self->nDimension = 0;
}
-void THCudaTensor_set1d(THCState *state, THCudaTensor *tensor, long x0, float value)
+void THCTensor_(set1d)(THCState *state, THCTensor *tensor, long x0, real value)
{
THArgCheck(tensor->nDimension == 1, 1, "tensor must have one dimension");
THArgCheck( (x0 >= 0) && (x0 < tensor->size[0]), 2, "out of range");
- THCudaStorage_set(state, tensor->storage, tensor->storageOffset+x0*tensor->stride[0], value);
+ THCStorage_(set)(state, tensor->storage, tensor->storageOffset+x0*tensor->stride[0], value);
}
-float THCudaTensor_get1d(THCState *state, const THCudaTensor *tensor, long x0)
+real THCTensor_(get1d)(THCState *state, const THCTensor *tensor, long x0)
{
THArgCheck(tensor->nDimension == 1, 1, "tensor must have one dimension");
THArgCheck( (x0 >= 0) && (x0 < tensor->size[0]), 2, "out of range");
- return THCudaStorage_get(state, tensor->storage, tensor->storageOffset+x0*tensor->stride[0]);
+ return THCStorage_(get)(state, tensor->storage, tensor->storageOffset+x0*tensor->stride[0]);
}
-void THCudaTensor_set2d(THCState *state, THCudaTensor *tensor, long x0, long x1, float value)
+void THCTensor_(set2d)(THCState *state, THCTensor *tensor, long x0, long x1, real value)
{
THArgCheck(tensor->nDimension == 2, 1, "tensor must have two dimensions");
THArgCheck((x0 >= 0) && (x0 < tensor->size[0]) && (x1 >= 0) && (x1 < tensor->size[1]), 2, "out of range");
- THCudaStorage_set(state, tensor->storage, tensor->storageOffset+x0*tensor->stride[0]+x1*tensor->stride[1], value);
+ THCStorage_(set)(state, tensor->storage, tensor->storageOffset+x0*tensor->stride[0]+x1*tensor->stride[1], value);
}
-float THCudaTensor_get2d(THCState *state, const THCudaTensor *tensor, long x0, long x1)
+real THCTensor_(get2d)(THCState *state, const THCTensor *tensor, long x0, long x1)
{
THArgCheck(tensor->nDimension == 2, 1, "tensor must have two dimensions");
THArgCheck((x0 >= 0) && (x0 < tensor->size[0]) && (x1 >= 0) && (x1 < tensor->size[1]), 2, "out of range");
- return THCudaStorage_get(state, tensor->storage, tensor->storageOffset+x0*tensor->stride[0]+x1*tensor->stride[1]);
+ return THCStorage_(get)(state, tensor->storage, tensor->storageOffset+x0*tensor->stride[0]+x1*tensor->stride[1]);
}
-void THCudaTensor_set3d(THCState *state, THCudaTensor *tensor, long x0, long x1, long x2, float value)
+void THCTensor_(set3d)(THCState *state, THCTensor *tensor, long x0, long x1, long x2, real value)
{
THArgCheck(tensor->nDimension == 3, 1, "tensor must have three dimensions");
THArgCheck( (x0 >= 0) && (x0 < tensor->size[0]) && (x1 >= 0) && (x1 < tensor->size[1]) && (x2 >= 0) && (x2 < tensor->size[2]), 2, "out of range");
- THCudaStorage_set(state, tensor->storage, tensor->storageOffset+x0*tensor->stride[0]+x1*tensor->stride[1]+x2*tensor->stride[2], value);
+ THCStorage_(set)(state, tensor->storage, tensor->storageOffset+x0*tensor->stride[0]+x1*tensor->stride[1]+x2*tensor->stride[2], value);
}
-float THCudaTensor_get3d(THCState *state, const THCudaTensor *tensor, long x0, long x1, long x2)
+real THCTensor_(get3d)(THCState *state, const THCTensor *tensor, long x0, long x1, long x2)
{
THArgCheck(tensor->nDimension == 3, 1, "tensor must have three dimensions");
THArgCheck( (x0 >= 0) && (x0 < tensor->size[0]) && (x1 >= 0) && (x1 < tensor->size[1]) && (x2 >= 0) && (x2 < tensor->size[2]), 2, "out of range");
- return THCudaStorage_get(state, tensor->storage, tensor->storageOffset+x0*tensor->stride[0]+x1*tensor->stride[1]+x2*tensor->stride[2]);
+ return THCStorage_(get)(state, tensor->storage, tensor->storageOffset+x0*tensor->stride[0]+x1*tensor->stride[1]+x2*tensor->stride[2]);
}
-void THCudaTensor_set4d(THCState *state, THCudaTensor *tensor, long x0, long x1, long x2, long x3, float value)
+void THCTensor_(set4d)(THCState *state, THCTensor *tensor, long x0, long x1, long x2, long x3, real value)
{
THArgCheck(tensor->nDimension == 4, 1, "tensor must have four dimensions");
THArgCheck((x0 >= 0) && (x0 < tensor->size[0]) && (x1 >= 0) && (x1 < tensor->size[1]) && (x2 >= 0) && (x2 < tensor->size[2]) && (x3 >= 0) && (x3 < tensor->size[3]), 2, "out of range");
- THCudaStorage_set(state, tensor->storage, tensor->storageOffset+x0*tensor->stride[0]+x1*tensor->stride[1]+x2*tensor->stride[2]+x3*tensor->stride[3], value);
+ THCStorage_(set)(state, tensor->storage, tensor->storageOffset+x0*tensor->stride[0]+x1*tensor->stride[1]+x2*tensor->stride[2]+x3*tensor->stride[3], value);
}
-float THCudaTensor_get4d(THCState *state, const THCudaTensor *tensor, long x0, long x1, long x2, long x3)
+real THCTensor_(get4d)(THCState *state, const THCTensor *tensor, long x0, long x1, long x2, long x3)
{
THArgCheck(tensor->nDimension == 4, 1, "tensor must have four dimensions");
THArgCheck((x0 >= 0) && (x0 < tensor->size[0]) && (x1 >= 0) && (x1 < tensor->size[1]) && (x2 >= 0) && (x2 < tensor->size[2]) && (x3 >= 0) && (x3 < tensor->size[3]), 2, "out of range");
- return THCudaStorage_get(state, tensor->storage, tensor->storageOffset+x0*tensor->stride[0]+x1*tensor->stride[1]+x2*tensor->stride[2]+x3*tensor->stride[3]);
+ return THCStorage_(get)(state, tensor->storage, tensor->storageOffset+x0*tensor->stride[0]+x1*tensor->stride[1]+x2*tensor->stride[2]+x3*tensor->stride[3]);
}
-int THCudaTensor_checkGPU(THCState *state, unsigned int nTensors, ...)
+int THCTensor_(checkGPU)(THCState *state, unsigned int nTensors, ...)
{
#ifdef DISABLE_CHECK_GPU
return 1; // Disable GPU checks.
@@ -798,11 +797,11 @@ int THCudaTensor_checkGPU(THCState *state, unsigned int nTensors, ...)
va_start(args, nTensors);
int valid = 1;
for (unsigned int i = 0; i < nTensors; i++) {
- THCudaTensor* tensor = va_arg(args, THCudaTensor*);
+ THCTensor* tensor = va_arg(args, THCTensor*);
if (tensor == NULL) {
continue;
}
- int tensorDev = THCudaTensor_getDevice(state, tensor);
+ int tensorDev = THCTensor_(getDevice)(state, tensor);
if (tensorDev != -1 && tensorDev != curDev) {
valid = 0;
break;
@@ -812,3 +811,5 @@ int THCudaTensor_checkGPU(THCState *state, unsigned int nTensors, ...)
return valid;
#endif
}
+
+#endif