From cf1f1e8dca4c7942f7c988040b3c7bd8c6e86213 Mon Sep 17 00:00:00 2001 From: Valentin Churavy Date: Fri, 19 May 2017 22:26:51 +0900 Subject: [PATCH] remove jl_alignment in favour of datatype_alignment --- base/atomics.jl | 17 +++++++---------- src/julia_internal.h | 2 -- src/threading.c | 9 --------- 3 files changed, 7 insertions(+), 21 deletions(-) diff --git a/base/atomics.jl b/base/atomics.jl index 8ab283db82a16..505f1a73a26c2 100644 --- a/base/atomics.jl +++ b/base/atomics.jl @@ -2,7 +2,7 @@ using Core.Intrinsics: llvmcall -import Base: setindex!, getindex, unsafe_convert +import Base: setindex!, getindex, unsafe_convert, datatype_alignment import Base.Sys: ARCH, WORD_SIZE export @@ -321,9 +321,6 @@ inttype(::Type{Float16}) = Int16 inttype(::Type{Float32}) = Int32 inttype(::Type{Float64}) = Int64 - -alignment(::Type{T}) where {T} = ccall(:jl_alignment, Cint, (Any,), T) - # All atomic operations have acquire and/or release semantics, depending on # whether the load or store values. Most of the time, this is what one wants # anyway, and it's only moderately expensive on most hardware. @@ -335,31 +332,31 @@ for typ in atomictypes if VersionNumber(Base.libllvm_version) >= v"3.8" @eval getindex(x::Atomic{$typ}) = llvmcall($""" - %rv = load atomic $rt %0 acquire, align $(alignment(typ)) + %rv = load atomic $rt %0 acquire, align $(datatype_alignment(typ)) ret $lt %rv """, $typ, Tuple{Ptr{$typ}}, unsafe_convert(Ptr{$typ}, x)) @eval setindex!(x::Atomic{$typ}, v::$typ) = llvmcall($""" - store atomic $lt %1, $lt* %0 release, align $(alignment(typ)) + store atomic $lt %1, $lt* %0 release, align $(datatype_alignment(typ)) ret void """, Void, Tuple{Ptr{$typ},$typ}, unsafe_convert(Ptr{$typ}, x), v) else if typ <: Integer @eval getindex(x::Atomic{$typ}) = llvmcall($""" - %rv = load atomic $rt %0 acquire, align $(alignment(typ)) + %rv = load atomic $rt %0 acquire, align $(datatype_alignment(typ)) ret $lt %rv """, $typ, Tuple{Ptr{$typ}}, unsafe_convert(Ptr{$typ}, x)) @eval setindex!(x::Atomic{$typ}, v::$typ) = llvmcall($""" - store atomic $lt %1, $lt* %0 release, align $(alignment(typ)) + store atomic $lt %1, $lt* %0 release, align $(datatype_alignment(typ)) ret void """, Void, Tuple{Ptr{$typ},$typ}, unsafe_convert(Ptr{$typ}, x), v) else @eval getindex(x::Atomic{$typ}) = llvmcall($""" %iptr = bitcast $lt* %0 to $ilt* - %irv = load atomic $irt %iptr acquire, align $(alignment(typ)) + %irv = load atomic $irt %iptr acquire, align $(datatype_alignment(typ)) %rv = bitcast $ilt %irv to $lt ret $lt %rv """, $typ, Tuple{Ptr{$typ}}, unsafe_convert(Ptr{$typ}, x)) @@ -367,7 +364,7 @@ for typ in atomictypes llvmcall($""" %iptr = bitcast $lt* %0 to $ilt* %ival = bitcast $lt %1 to $ilt - store atomic $ilt %ival, $ilt* %iptr release, align $(alignment(typ)) + store atomic $ilt %ival, $ilt* %iptr release, align $(datatype_alignment(typ)) ret void """, Void, Tuple{Ptr{$typ},$typ}, unsafe_convert(Ptr{$typ}, x), v) end diff --git a/src/julia_internal.h b/src/julia_internal.h index b3d20297de8de..63f855bc55c81 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -132,8 +132,6 @@ extern jl_mutex_t gc_perm_lock; void *jl_gc_perm_alloc_nolock(size_t sz, int zero); void *jl_gc_perm_alloc(size_t sz, int zero); -JL_DLLEXPORT int jl_alignment(void* ty); - #define JL_SMALL_BYTE_ALIGNMENT 16 #define JL_CACHE_BYTE_ALIGNMENT 64 #define GC_MAX_SZCLASS (2032-sizeof(void*)) diff --git a/src/threading.c b/src/threading.c index 6e651de81601b..ba9f8a2c33307 100644 --- a/src/threading.c +++ b/src/threading.c @@ -832,15 +832,6 @@ void jl_init_threading(void) void jl_start_threads(void) { } #endif // !JULIA_ENABLE_THREADING - -// Make gc alignment available for threading -// see threads.jl alignment -JL_DLLEXPORT int jl_alignment(void* ty) -{ - assert(jl_is_datatype(ty)); - return jl_datatype_align(ty); -} - #ifdef __cplusplus } #endif