diff --git a/src/julia_internal.h b/src/julia_internal.h index af463a86fc5f4..796d402ee470e 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -1019,37 +1019,12 @@ void jl_register_fptrs(uint64_t sysimage_base, const struct _jl_sysimg_fptrs_t * extern arraylist_t partial_inst; -STATIC_INLINE uint64_t jl_load_unaligned_i64(const void *ptr) JL_NOTSAFEPOINT -{ - uint64_t val; - memcpy(&val, ptr, 8); - return val; -} -STATIC_INLINE uint32_t jl_load_unaligned_i32(const void *ptr) JL_NOTSAFEPOINT -{ - uint32_t val; - memcpy(&val, ptr, 4); - return val; -} -STATIC_INLINE uint16_t jl_load_unaligned_i16(const void *ptr) JL_NOTSAFEPOINT -{ - uint16_t val; - memcpy(&val, ptr, 2); - return val; -} - -STATIC_INLINE void jl_store_unaligned_i64(void *ptr, uint64_t val) JL_NOTSAFEPOINT -{ - memcpy(ptr, &val, 8); -} -STATIC_INLINE void jl_store_unaligned_i32(void *ptr, uint32_t val) JL_NOTSAFEPOINT -{ - memcpy(ptr, &val, 4); -} -STATIC_INLINE void jl_store_unaligned_i16(void *ptr, uint16_t val) JL_NOTSAFEPOINT -{ - memcpy(ptr, &val, 2); -} +STATIC_INLINE uint64_t jl_load_unaligned_i64(const void *ptr) JL_NOTSAFEPOINT; +STATIC_INLINE uint32_t jl_load_unaligned_i32(const void *ptr) JL_NOTSAFEPOINT; +STATIC_INLINE uint16_t jl_load_unaligned_i16(const void *ptr) JL_NOTSAFEPOINT; +STATIC_INLINE void jl_store_unaligned_i64(void *ptr, uint64_t val) JL_NOTSAFEPOINT; +STATIC_INLINE void jl_store_unaligned_i32(void *ptr, uint32_t val) JL_NOTSAFEPOINT; +STATIC_INLINE void jl_store_unaligned_i16(void *ptr, uint16_t val) JL_NOTSAFEPOINT; #if jl_has_builtin(__builtin_assume_aligned) || defined(_COMPILER_GCC_) #define jl_assume_aligned(ptr, align) __builtin_assume_aligned(ptr, align) diff --git a/src/support/MurmurHash3.c b/src/support/MurmurHash3.c index a89ac1ee9af36..48b63e6d0d0fc 100644 --- a/src/support/MurmurHash3.c +++ b/src/support/MurmurHash3.c @@ -48,20 +48,6 @@ static inline uint64_t rotl64 ( uint64_t x, int8_t r ) #endif // !defined(_MSC_VER) -//----------------------------------------------------------------------------- -// Block read - if your platform needs to do endian-swapping or can only -// handle aligned reads, do the conversion here - -FORCE_INLINE uint32_t getblock32 ( const uint32_t * p, int i ) -{ - return p[i]; -} - -FORCE_INLINE uint64_t getblock64 ( const uint64_t * p, int i ) -{ - return p[i]; -} - //----------------------------------------------------------------------------- // Finalization mix - force all bits of a hash block to avalanche @@ -109,7 +95,7 @@ void MurmurHash3_x86_32 ( const void * key, int len, for(int i = -nblocks; i; i++) { - uint32_t k1 = getblock32(blocks,i); + uint32_t k1 = jl_load_unaligned_i32(blocks + i); k1 *= c1; k1 = ROTL32(k1,15); @@ -170,10 +156,10 @@ void MurmurHash3_x86_128 ( const void * key, const int len, for(int i = -nblocks; i; i++) { - uint32_t k1 = getblock32(blocks,i*4+0); - uint32_t k2 = getblock32(blocks,i*4+1); - uint32_t k3 = getblock32(blocks,i*4+2); - uint32_t k4 = getblock32(blocks,i*4+3); + uint32_t k1 = jl_load_unaligned_i32(blocks + i*4 + 0); + uint32_t k2 = jl_load_unaligned_i32(blocks + i*4 + 1); + uint32_t k3 = jl_load_unaligned_i32(blocks + i*4 + 2); + uint32_t k4 = jl_load_unaligned_i32(blocks + i*4 + 3); k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; @@ -274,8 +260,8 @@ void MurmurHash3_x64_128 ( const void * key, const int len, for(int i = 0; i < nblocks; i++) { - uint64_t k1 = getblock64(blocks,i*2+0); - uint64_t k2 = getblock64(blocks,i*2+1); + uint64_t k1 = jl_load_unaligned_i64(blocks + i*2 + 0); + uint64_t k2 = jl_load_unaligned_i64(blocks + i*2 + 1); k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1; diff --git a/src/support/dtypes.h b/src/support/dtypes.h index bd75ea3a9913c..73080ea8f418d 100644 --- a/src/support/dtypes.h +++ b/src/support/dtypes.h @@ -6,6 +6,7 @@ #include #include // double include of stddef.h fixes #3421 #include +#include // memcpy #if defined(_COMPILER_INTEL_) #include #else @@ -220,4 +221,37 @@ typedef enum { T_INT8, T_UINT8, T_INT16, T_UINT16, T_INT32, T_UINT32, #define JL_UNUSED #endif +STATIC_INLINE uint64_t jl_load_unaligned_i64(const void *ptr) +{ + uint64_t val; + memcpy(&val, ptr, 8); + return val; +} +STATIC_INLINE uint32_t jl_load_unaligned_i32(const void *ptr) +{ + uint32_t val; + memcpy(&val, ptr, 4); + return val; +} +STATIC_INLINE uint16_t jl_load_unaligned_i16(const void *ptr) +{ + uint16_t val; + memcpy(&val, ptr, 2); + return val; +} + +STATIC_INLINE void jl_store_unaligned_i64(void *ptr, uint64_t val) +{ + memcpy(ptr, &val, 8); +} +STATIC_INLINE void jl_store_unaligned_i32(void *ptr, uint32_t val) +{ + memcpy(ptr, &val, 4); +} +STATIC_INLINE void jl_store_unaligned_i16(void *ptr, uint16_t val) +{ + memcpy(ptr, &val, 2); +} + + #endif /* DTYPES_H */