Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torch/cutorch.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeff Johnson <jhj@fb.com>2016-06-11 03:37:24 +0300
committerSoumith Chintala <soumith@gmail.com>2016-07-29 06:55:26 +0300
commit20001aca5b7e49c414011938a2d79637924f2888 (patch)
tree0785a0721c3b190fc51eab4f92bc969aa09d8dc8 /TensorOperator.c
parent630d9ebc1bd2b057dcc1030a4429baf266d9f22f (diff)
reduce and BLAS work
Diffstat (limited to 'TensorOperator.c')
-rw-r--r--TensorOperator.c182
1 files changed, 7 insertions, 175 deletions
diff --git a/TensorOperator.c b/TensorOperator.c
index bbd33b5..ae7c2b3 100644
--- a/TensorOperator.c
+++ b/TensorOperator.c
@@ -2,180 +2,12 @@
#include "luaT.h"
#include "THC.h"
-static int cutorch_CudaTensorOperator___add__(lua_State *L)
-{
- THCudaTensor *tensor1 = luaT_toudata(L, 1, "torch.CudaTensor");
- THCudaTensor *tensor2 = luaT_toudata(L, 2, "torch.CudaTensor");
- THCudaTensor *r;
- THCState *state = cutorch_getstate(L);
- THAssert(THCudaTensor_checkGPU(state, 2, tensor1, tensor2));
+#include "THCTensorMath.h"
- if(!tensor1 && !tensor2)
- luaL_error(L, "expecting two Tensors or one Tensor and one number");
- else
- {
- r = THCudaTensor_new(state);
- luaT_pushudata(L, r, "torch.CudaTensor");
+#define cutorch_TensorOperator_(NAME) TH_CONCAT_4(cutorch_,CReal,TensorOperator_,NAME)
+#define torch_Tensor_(NAME) TH_CONCAT_4(torch_,CReal,Tensor_,NAME)
+#define torch_Tensor TH_CONCAT_STRING_3(torch.,CReal,Tensor)
+#define cutorch_Tensor_(NAME) TH_CONCAT_4(cutorch_,CReal,Tensor_,NAME)
- if(!tensor1 && tensor2)
- {
- THCudaTensor_resizeAs(state, r, tensor2);
- THCudaTensor_copy(state, r, tensor2);
- THCudaTensor_add(state, r, r, luaL_checknumber(L, 1));
- }
- else if(tensor1 && !tensor2)
- {
- THCudaTensor_resizeAs(state, r, tensor1);
- THCudaTensor_copy(state, r, tensor1);
- THCudaTensor_add(state, r, r, luaL_checknumber(L, 2));
- }
- else
- {
- THCudaTensor_resizeAs(state, r, tensor1);
- THCudaTensor_copy(state, r, tensor1);
- THCudaTensor_cadd(state, r, r, 1, tensor2);
- }
- }
- return 1;
-}
-
-static int cutorch_CudaTensorOperator___sub__(lua_State *L)
-{
- THCudaTensor *tensor1 = luaT_toudata(L, 1, "torch.CudaTensor");
- THCudaTensor *tensor2 = luaT_toudata(L, 2, "torch.CudaTensor");
- THCudaTensor *r;
- THCState *state = cutorch_getstate(L);
- THAssert(THCudaTensor_checkGPU(state, 2, tensor1, tensor2));
-
- if(!tensor1 && !tensor2)
- luaL_error(L, "expecting two Tensors or one Tensor and one number");
- else
- {
- r = THCudaTensor_new(state);
- luaT_pushudata(L, r, "torch.CudaTensor");
-
- if(!tensor1 && tensor2)
- {
- THCudaTensor_resizeAs(state, r, tensor2);
- THCudaTensor_fill(state, r, luaL_checknumber(L, 1));
- THCudaTensor_cadd(state, r, r, -1, tensor2);
- }
- else if(tensor1 && !tensor2)
- {
- THCudaTensor_resizeAs(state, r, tensor1);
- THCudaTensor_copy(state, r, tensor1);
- THCudaTensor_add(state, r, r, -luaL_checknumber(L, 2));
- }
- else
- {
- THCudaTensor_resizeAs(state, r, tensor1);
- THCudaTensor_copy(state, r, tensor1);
- THCudaTensor_cadd(state, r, r, -1, tensor2);
- }
- }
- return 1;
-}
-
-static int cutorch_CudaTensorOperator___unm__(lua_State *L)
-{
- THCudaTensor *tensor = luaT_checkudata(L, 1, "torch.CudaTensor");
- THCudaTensor *r;
- THCState *state = cutorch_getstate(L);
- THAssert(THCudaTensor_checkGPU(state, 1, tensor));
-
- r = THCudaTensor_new(state);
- luaT_pushudata(L, r, "torch.CudaTensor");
- THCudaTensor_resizeAs(state, r, tensor);
- THCudaTensor_copy(state, r, tensor);
- THCudaTensor_mul(state, r, r, -1);
-
- return 1;
-}
-
-static int cutorch_CudaTensorOperator___mul__(lua_State *L)
-{
- THCudaTensor *tensor1 = luaT_toudata(L, 1, "torch.CudaTensor");
- THCudaTensor *tensor2 = luaT_toudata(L, 2, "torch.CudaTensor");
- THCudaTensor *r;
- THCState *state = cutorch_getstate(L);
- THAssert(THCudaTensor_checkGPU(state, 2, tensor1, tensor2));
-
- if(!tensor1 && !tensor2)
- luaL_error(L, "expecting two Tensors or one Tensor and one number");
- else
- {
- r = THCudaTensor_new(state);
- luaT_pushudata(L, r, "torch.CudaTensor");
-
- if(!tensor1 && tensor2)
- {
- THCudaTensor_resizeAs(state, r, tensor2);
- THCudaTensor_copy(state, r, tensor2);
- THCudaTensor_mul(state, r, r, luaL_checknumber(L, 1));
- }
- else if(tensor1 && !tensor2)
- {
- THCudaTensor_resizeAs(state, r, tensor1);
- THCudaTensor_copy(state, r, tensor1);
- THCudaTensor_mul(state, r, r, luaL_checknumber(L, 2));
- }
- else
- {
- int dimt = tensor1->nDimension;
- int dims = tensor2->nDimension;
-
- if(dimt == 1 && dims == 1)
- lua_pushnumber(L, THCudaTensor_dot(state, tensor1, tensor2)); /* ok, we wasted r, but who cares */
- else if(dimt == 2 && dims == 1)
- {
- THCudaTensor_resize1d(state, r, tensor1->size[0]);
- THCudaTensor_zero(state, r);
- THCudaTensor_addmv(state, r, 1, r, 1, tensor1, tensor2);
- }
- else if(dimt == 2 && dims == 2)
- {
- THCudaTensor_resize2d(state, r, tensor1->size[0], tensor2->size[1]);
- THCudaTensor_zero(state, r);
- THCudaTensor_addmm(state, r, 1, r, 1, tensor1, tensor2);
- }
- else
- luaL_error(L, "multiplication between %dD and %dD tensors not yet supported", tensor1->nDimension, tensor2->nDimension);
- }
- }
- return 1;
-}
-
-static int cutorch_CudaTensorOperator___div__(lua_State *L)
-{
- THCudaTensor *tensor = luaT_checkudata(L, 1, "torch.CudaTensor");
- THCudaTensor *r;
- THCState *state = cutorch_getstate(L);
- THAssert(THCudaTensor_checkGPU(state, 1, tensor));
-
- luaL_argcheck(L, lua_isnumber(L,2), 2, "number expected");
-
- r = THCudaTensor_new(state);
- luaT_pushudata(L, r, "torch.CudaTensor");
-
- THCudaTensor_resizeAs(state, r, tensor);
- THCudaTensor_copy(state, r, tensor);
- THCudaTensor_mul(state, r, r, 1/lua_tonumber(L, 2));
-
- return 1;
-}
-
-static const struct luaL_Reg cutorch_CudaTensorOperator__ [] = {
- {"__add__", cutorch_CudaTensorOperator___add__},
- {"__sub__", cutorch_CudaTensorOperator___sub__},
- {"__unm__", cutorch_CudaTensorOperator___unm__},
- {"__mul__", cutorch_CudaTensorOperator___mul__},
- {"__div__", cutorch_CudaTensorOperator___div__},
- {NULL, NULL}
-};
-
-void cutorch_CudaTensorOperator_init(lua_State *L)
-{
- luaT_pushmetatable(L, "torch.CudaTensor");
- luaL_setfuncs(L, cutorch_CudaTensorOperator__, 0);
- lua_pop(L, 1);
-}
+#include "generic/TensorOperator.c"
+#include "THCGenerateAllTypes.h"