From 5923179519e49597ecb64797bfe13e04ca607dc6 Mon Sep 17 00:00:00 2001 From: Jeff Bezanson Date: Sat, 1 Jun 2019 13:51:50 -0400 Subject: [PATCH] make GC counters atomic fixes #31923 --- src/gc.c | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/src/gc.c b/src/gc.c index 0fe4b1fdc460a..c5b3402e77763 100644 --- a/src/gc.c +++ b/src/gc.c @@ -881,7 +881,7 @@ JL_DLLEXPORT jl_value_t *jl_gc_big_alloc(jl_ptls_t ptls, size_t sz) #else gc_num.allocd += allocsz; #endif - gc_num.bigalloc++; + jl_atomic_fetch_add(&gc_num.bigalloc, 1); #ifdef MEMDEBUG memset(v, 0xee, allocsz); #endif @@ -973,7 +973,7 @@ void jl_gc_track_malloced_array(jl_ptls_t ptls, jl_array_t *a) JL_NOTSAFEPOINT void jl_gc_count_allocd(size_t sz) JL_NOTSAFEPOINT { - gc_num.allocd += sz; + jl_atomic_fetch_add(&gc_num.allocd, sz); } void jl_gc_reset_alloc_count(void) JL_NOTSAFEPOINT @@ -1098,8 +1098,8 @@ JL_DLLEXPORT jl_value_t *jl_gc_pool_alloc(jl_ptls_t ptls, int pool_offset, #ifdef MEMDEBUG return jl_gc_big_alloc(ptls, osize); #endif - // FIXME - need JL_ATOMIC_FETCH_AND_ADD here - if (__unlikely((gc_num.allocd += osize) >= 0) || gc_debug_check_pool()) { + jl_atomic_fetch_add(&gc_num.allocd, osize); + if (__unlikely(gc_num.allocd >= 0) || gc_debug_check_pool()) { //gc_num.allocd -= osize; jl_gc_collect(0); //gc_num.allocd += osize; @@ -2999,8 +2999,8 @@ JL_DLLEXPORT void *jl_gc_counted_malloc(size_t sz) { jl_ptls_t ptls = jl_get_ptls_states(); maybe_collect(ptls); - gc_num.allocd += sz; - gc_num.malloc++; + jl_atomic_fetch_add(&gc_num.allocd, sz); + jl_atomic_fetch_add(&gc_num.malloc, 1); void *b = malloc(sz); if (b == NULL) jl_throw(jl_memory_exception); @@ -3011,8 +3011,8 @@ JL_DLLEXPORT void *jl_gc_counted_calloc(size_t nm, size_t sz) { jl_ptls_t ptls = jl_get_ptls_states(); maybe_collect(ptls); - gc_num.allocd += nm*sz; - gc_num.malloc++; + jl_atomic_fetch_add(&gc_num.allocd, nm*sz); + jl_atomic_fetch_add(&gc_num.malloc, 1); void *b = calloc(nm, sz); if (b == NULL) jl_throw(jl_memory_exception); @@ -3022,8 +3022,8 @@ JL_DLLEXPORT void *jl_gc_counted_calloc(size_t nm, size_t sz) JL_DLLEXPORT void jl_gc_counted_free_with_size(void *p, size_t sz) { free(p); - gc_num.freed += sz; - gc_num.freecall++; + jl_atomic_fetch_add(&gc_num.freed, sz); + jl_atomic_fetch_add(&gc_num.freecall, 1); } // older name for jl_gc_counted_free_with_size @@ -3037,10 +3037,10 @@ JL_DLLEXPORT void *jl_gc_counted_realloc_with_old_size(void *p, size_t old, size jl_ptls_t ptls = jl_get_ptls_states(); maybe_collect(ptls); if (sz < old) - gc_num.freed += (old - sz); + jl_atomic_fetch_add(&gc_num.freed, (old - sz)); else - gc_num.allocd += (sz - old); - gc_num.realloc++; + jl_atomic_fetch_add(&gc_num.allocd, (sz - old)); + jl_atomic_fetch_add(&gc_num.realloc, 1); void *b = realloc(p, sz); if (b == NULL) jl_throw(jl_memory_exception); @@ -3100,8 +3100,8 @@ JL_DLLEXPORT void *jl_gc_managed_malloc(size_t sz) size_t allocsz = LLT_ALIGN(sz, JL_CACHE_BYTE_ALIGNMENT); if (allocsz < sz) // overflow in adding offs, size was "negative" jl_throw(jl_memory_exception); - gc_num.allocd += allocsz; - gc_num.malloc++; + jl_atomic_fetch_add(&gc_num.allocd, allocsz); + jl_atomic_fetch_add(&gc_num.malloc, 1); void *b = malloc_cache_align(allocsz); if (b == NULL) jl_throw(jl_memory_exception); @@ -3120,13 +3120,13 @@ static void *gc_managed_realloc_(jl_ptls_t ptls, void *d, size_t sz, size_t olds if (jl_astaggedvalue(owner)->bits.gc == GC_OLD_MARKED) { ptls->gc_cache.perm_scanned_bytes += allocsz - oldsz; - live_bytes += allocsz - oldsz; + jl_atomic_fetch_add(&live_bytes, allocsz - oldsz); } else if (allocsz < oldsz) - gc_num.freed += (oldsz - allocsz); + jl_atomic_fetch_add(&gc_num.freed, (oldsz - allocsz)); else - gc_num.allocd += (allocsz - oldsz); - gc_num.realloc++; + jl_atomic_fetch_add(&gc_num.allocd, (allocsz - oldsz)); + jl_atomic_fetch_add(&gc_num.realloc, 1); void *b; if (isaligned)