Skip to content
This repository has been archived by the owner on Aug 3, 2021. It is now read-only.

Commit

Permalink
Bugfix, test extended
Browse files Browse the repository at this point in the history
  • Loading branch information
borisfom committed Nov 15, 2016
1 parent 9aef731 commit 9a6ba41
Show file tree
Hide file tree
Showing 3 changed files with 35 additions and 9 deletions.
2 changes: 1 addition & 1 deletion lib/THC/THCCachingAllocator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ struct THCCachingAllocator
auto it = blocks.lower_bound(&search_key);
for (;it != blocks.end() && *it && (*it)->device == dev_id; ++it) {
size_t blocksize = (*it)->size;
total += blocksize;
*total += blocksize;
if (blocksize > *largest)
*largest = blocksize;
}
Expand Down
2 changes: 1 addition & 1 deletion lib/THC/THCGeneral.c
Original file line number Diff line number Diff line change
Expand Up @@ -732,7 +732,7 @@ cudaError_t THCudaMemGetInfo(THCState *state, size_t* freeBytes, size_t* totalB

if (allocator->cacheInfo != NULL)
allocator->cacheInfo(allocator->state, device, &cachedBytes, &largestBlock);

/* Adjust resulting free bytes number. largesBlock unused for now */
*freeBytes += cachedBytes;
return cudaSuccess;
Expand Down
40 changes: 33 additions & 7 deletions test/test_shutdown.lua
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,6 @@ local function test_cudaEvent()

local t1View = t1:narrow(1, 10000000, 1)
t1:fill(1)
print('Memory usage after some allocations [free memory], [total memory]')
print(cutorch.getMemoryUsage())

-- Event is created here
local event = cutorch.Event()
Expand All @@ -26,13 +24,41 @@ local function test_cudaEvent()
cutorch.setStream(0)
end

print ("cutorch.hasHalf is ", cutorch.hasHalf)
local Gig = 1024*1024*1024

local function test_getMemInfo()
local sz = Gig*0.1
local t1 = torch.CudaTensor(sz):zero()
print('Memory usage after 1st allocation [free memory], [total memory]')
local total, free = cutorch.getMemoryUsage()
print(free/Gig, total/Gig)
local t2 = torch.CudaTensor(sz*1.3):zero()
print('Memory usage after 2nd allocation [free memory], [total memory]')
local total, free = cutorch.getMemoryUsage()
print(free/Gig, total/Gig)
t1 = nil
collectgarbage()
print('Memory usage after 1st deallocation [free memory], [total memory]')
local total, free = cutorch.getMemoryUsage()
print(free/Gig, total/Gig)
t2 = nil
collectgarbage()
print('Memory usage after 2nd deallocation [free memory], [total memory]')
total, free = cutorch.getMemoryUsage()
print(free/Gig, total/Gig)
end

print ("cutorch.hasHalf is ", cutorch.hasHalf)
print('Memory usage before intialization of threads [free memory], [total memory]')
print(cutorch.getMemoryUsage())
threads = Threads(100, function() require 'cutorch'; test_cudaEvent(); end)
local total, free = cutorch.getMemoryUsage()
print(free/Gig, total/Gig)
threads = Threads(20, function() require 'cutorch'; test_getMemInfo(); test_cudaEvent(); end)
print('Memory usage after intialization of threads [free memory], [total memory]')
print(cutorch.getMemoryUsage())
total, free = cutorch.getMemoryUsage()
print(free/Gig, total/Gig)
threads:terminate()
collectgarbage()
print('Memory usage after termination of threads [free memory], [total memory]')
print(cutorch.getMemoryUsage())
total, free = cutorch.getMemoryUsage()
print(free/Gig, total/Gig)

0 comments on commit 9a6ba41

Please sign in to comment.