Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torch/cutorch.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorBoris Fomitchev <borisfom@users.noreply.github.com>2016-11-24 01:38:57 +0300
committerSoumith Chintala <soumith@gmail.com>2016-11-24 01:38:57 +0300
commitf5932241e86087821a4c61dbde2c39a03d7c9883 (patch)
treee36533a796dc81b1c3cf75a7bc04ea41006891c0 /test
parent2d75d411560df62f4ac291143f6b0f2e15378031 (diff)
Implemented cudaMemGetInfo for caching allocator (#600)
* Implemented cudaMemGetInfo for caching allocator
Diffstat (limited to 'test')
-rw-r--r--test/test_shutdown.lua61
1 files changed, 56 insertions, 5 deletions
diff --git a/test/test_shutdown.lua b/test/test_shutdown.lua
index e78a51e..750df06 100644
--- a/test/test_shutdown.lua
+++ b/test/test_shutdown.lua
@@ -1,13 +1,64 @@
local Threads = require 'threads'
require 'cutorch'
-print ("cutorch.hasHalf is ", cutorch.hasHalf)
+local function test_cudaEvent()
+ cutorch.reserveStreams(2)
+ cutorch.setStream(1)
+
+ local t1 = torch.CudaTensor(10000000):zero()
+ local t2 = torch.CudaTensor(1):zero()
+
+ local t1View = t1:narrow(1, 10000000, 1)
+ t1:fill(1)
+
+ -- Event is created here
+ local event = cutorch.Event()
+
+ cutorch.setStream(2)
+
+ -- assert below will fail without this
+ event:waitOn()
+ t2:copy(t1View)
+ -- revert to default stream
+ cutorch.setStream(0)
+end
+
+local Gig = 1024*1024*1024
+
+local function test_getMemInfo()
+ local sz = Gig*0.1
+ local t1 = torch.CudaTensor(sz):zero()
+ print('Memory usage after 1st allocation [free memory], [total memory]')
+ local free, total = cutorch.getMemoryUsage()
+ print(free/Gig, total/Gig)
+ local t2 = torch.CudaTensor(sz*1.3):zero()
+ print('Memory usage after 2nd allocation [free memory], [total memory]')
+ local free, total = cutorch.getMemoryUsage()
+ print(free/Gig, total/Gig)
+ t1 = nil
+ collectgarbage()
+ print('Memory usage after 1st deallocation [free memory], [total memory]')
+ local free, total = cutorch.getMemoryUsage()
+ print(free/Gig, total/Gig)
+ t2 = nil
+ collectgarbage()
+ print('Memory usage after 2nd deallocation [free memory], [total memory]')
+ free, total = cutorch.getMemoryUsage()
+ print(free/Gig, total/Gig)
+end
+
+print ("cutorch.hasHalf is ", cutorch.hasHalf)
print('Memory usage before intialization of threads [free memory], [total memory]')
-print(cutorch.getMemoryUsage())
-threads = Threads(100, function() require 'cutorch' end)
+local free, total = cutorch.getMemoryUsage()
+print(free/Gig, total/Gig)
+threads = Threads(20, function() require 'cutorch'; test_getMemInfo(); test_cudaEvent(); end)
print('Memory usage after intialization of threads [free memory], [total memory]')
-print(cutorch.getMemoryUsage())
+free, total = cutorch.getMemoryUsage()
+print(free/Gig, total/Gig)
threads:terminate()
+collectgarbage()
print('Memory usage after termination of threads [free memory], [total memory]')
-print(cutorch.getMemoryUsage())
+free, total = cutorch.getMemoryUsage()
+print(free/Gig, total/Gig)
+