Skip to content

Commit

Permalink
make GC counters atomic
Browse files Browse the repository at this point in the history
fixes #31923
  • Loading branch information
JeffBezanson committed Jun 1, 2019
1 parent 123ff48 commit 5923179
Showing 1 changed file with 19 additions and 19 deletions.
38 changes: 19 additions & 19 deletions src/gc.c
Original file line number Diff line number Diff line change
Expand Up @@ -881,7 +881,7 @@ JL_DLLEXPORT jl_value_t *jl_gc_big_alloc(jl_ptls_t ptls, size_t sz)
#else
gc_num.allocd += allocsz;
#endif
gc_num.bigalloc++;
jl_atomic_fetch_add(&gc_num.bigalloc, 1);
#ifdef MEMDEBUG
memset(v, 0xee, allocsz);
#endif
Expand Down Expand Up @@ -973,7 +973,7 @@ void jl_gc_track_malloced_array(jl_ptls_t ptls, jl_array_t *a) JL_NOTSAFEPOINT

void jl_gc_count_allocd(size_t sz) JL_NOTSAFEPOINT
{
gc_num.allocd += sz;
jl_atomic_fetch_add(&gc_num.allocd, sz);
}

void jl_gc_reset_alloc_count(void) JL_NOTSAFEPOINT
Expand Down Expand Up @@ -1098,8 +1098,8 @@ JL_DLLEXPORT jl_value_t *jl_gc_pool_alloc(jl_ptls_t ptls, int pool_offset,
#ifdef MEMDEBUG
return jl_gc_big_alloc(ptls, osize);
#endif
// FIXME - need JL_ATOMIC_FETCH_AND_ADD here
if (__unlikely((gc_num.allocd += osize) >= 0) || gc_debug_check_pool()) {
jl_atomic_fetch_add(&gc_num.allocd, osize);
if (__unlikely(gc_num.allocd >= 0) || gc_debug_check_pool()) {
//gc_num.allocd -= osize;
jl_gc_collect(0);
//gc_num.allocd += osize;
Expand Down Expand Up @@ -2999,8 +2999,8 @@ JL_DLLEXPORT void *jl_gc_counted_malloc(size_t sz)
{
jl_ptls_t ptls = jl_get_ptls_states();
maybe_collect(ptls);
gc_num.allocd += sz;
gc_num.malloc++;
jl_atomic_fetch_add(&gc_num.allocd, sz);
jl_atomic_fetch_add(&gc_num.malloc, 1);
void *b = malloc(sz);
if (b == NULL)
jl_throw(jl_memory_exception);
Expand All @@ -3011,8 +3011,8 @@ JL_DLLEXPORT void *jl_gc_counted_calloc(size_t nm, size_t sz)
{
jl_ptls_t ptls = jl_get_ptls_states();
maybe_collect(ptls);
gc_num.allocd += nm*sz;
gc_num.malloc++;
jl_atomic_fetch_add(&gc_num.allocd, nm*sz);
jl_atomic_fetch_add(&gc_num.malloc, 1);
void *b = calloc(nm, sz);
if (b == NULL)
jl_throw(jl_memory_exception);
Expand All @@ -3022,8 +3022,8 @@ JL_DLLEXPORT void *jl_gc_counted_calloc(size_t nm, size_t sz)
JL_DLLEXPORT void jl_gc_counted_free_with_size(void *p, size_t sz)
{
free(p);
gc_num.freed += sz;
gc_num.freecall++;
jl_atomic_fetch_add(&gc_num.freed, sz);
jl_atomic_fetch_add(&gc_num.freecall, 1);
}

// older name for jl_gc_counted_free_with_size
Expand All @@ -3037,10 +3037,10 @@ JL_DLLEXPORT void *jl_gc_counted_realloc_with_old_size(void *p, size_t old, size
jl_ptls_t ptls = jl_get_ptls_states();
maybe_collect(ptls);
if (sz < old)
gc_num.freed += (old - sz);
jl_atomic_fetch_add(&gc_num.freed, (old - sz));
else
gc_num.allocd += (sz - old);
gc_num.realloc++;
jl_atomic_fetch_add(&gc_num.allocd, (sz - old));
jl_atomic_fetch_add(&gc_num.realloc, 1);
void *b = realloc(p, sz);
if (b == NULL)
jl_throw(jl_memory_exception);
Expand Down Expand Up @@ -3100,8 +3100,8 @@ JL_DLLEXPORT void *jl_gc_managed_malloc(size_t sz)
size_t allocsz = LLT_ALIGN(sz, JL_CACHE_BYTE_ALIGNMENT);
if (allocsz < sz) // overflow in adding offs, size was "negative"
jl_throw(jl_memory_exception);
gc_num.allocd += allocsz;
gc_num.malloc++;
jl_atomic_fetch_add(&gc_num.allocd, allocsz);
jl_atomic_fetch_add(&gc_num.malloc, 1);
void *b = malloc_cache_align(allocsz);
if (b == NULL)
jl_throw(jl_memory_exception);
Expand All @@ -3120,13 +3120,13 @@ static void *gc_managed_realloc_(jl_ptls_t ptls, void *d, size_t sz, size_t olds

if (jl_astaggedvalue(owner)->bits.gc == GC_OLD_MARKED) {
ptls->gc_cache.perm_scanned_bytes += allocsz - oldsz;
live_bytes += allocsz - oldsz;
jl_atomic_fetch_add(&live_bytes, allocsz - oldsz);
}
else if (allocsz < oldsz)
gc_num.freed += (oldsz - allocsz);
jl_atomic_fetch_add(&gc_num.freed, (oldsz - allocsz));
else
gc_num.allocd += (allocsz - oldsz);
gc_num.realloc++;
jl_atomic_fetch_add(&gc_num.allocd, (allocsz - oldsz));
jl_atomic_fetch_add(&gc_num.realloc, 1);

void *b;
if (isaligned)
Expand Down

0 comments on commit 5923179

Please sign in to comment.