Skip to content
This repository has been archived by the owner on Mar 21, 2024. It is now read-only.

Commit

Permalink
Implement memory_order_consume operations as acquire, move support in…
Browse files Browse the repository at this point in the history
…clude to <atomic>
  • Loading branch information
wmaxey committed Oct 14, 2020
1 parent c7eb389 commit 264a9f2
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 10 deletions.
4 changes: 2 additions & 2 deletions include/cuda/std/detail/__atomic
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@
#define __ATOMIC_THREAD 10
#endif //__ATOMIC_BLOCK


_LIBCUDACXX_BEGIN_NAMESPACE_CUDA

namespace detail {
Expand Down Expand Up @@ -102,7 +101,8 @@ namespace detail {

_LIBCUDACXX_END_NAMESPACE_CUDA

#ifdef _MSC_VER
#if defined(_LIBCUDACXX_COMPILER_MSVC)
// Inject atomic intrinsics built from MSVC compiler intrinsics
#include "libcxx/include/support/win32/atomic_msvc.h"
#endif

Expand Down
5 changes: 5 additions & 0 deletions libcxx/include/atomic
Original file line number Diff line number Diff line change
Expand Up @@ -555,6 +555,11 @@ void atomic_signal_fence(memory_order m) noexcept;
#include <type_traits>
#include <version>
#include <__pragma_push>

#if defined(_LIBCUDACXX_COMPILER_MSVC)
#include "support/win32/atomic_msvc.h"
#endif

#endif //__cuda_std__

#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
Expand Down
22 changes: 14 additions & 8 deletions libcxx/include/support/win32/atomic_msvc.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ void __atomic_load_relaxed(const volatile _Type *__ptr, _Type *__ret) {
template<class _Type>
void __atomic_load(const volatile _Type *__ptr, _Type *__ret, int __memorder) {
switch (__memorder) {
case __ATOMIC_SEQ_CST: _Memory_barrier();
case __ATOMIC_SEQ_CST: _Memory_barrier(); _LIBCUDACXX_FALLTHROUGH();
case __ATOMIC_CONSUME:
case __ATOMIC_ACQUIRE: __atomic_load_relaxed(__ptr, __ret); _Compiler_or_memory_barrier(); break;
case __ATOMIC_RELAXED: __atomic_load_relaxed(__ptr, __ret); break;
Expand Down Expand Up @@ -98,7 +98,7 @@ template<class _Type>
void __atomic_store(volatile _Type *__ptr, _Type *__val, int __memorder) {
switch (__memorder) {
case __ATOMIC_RELEASE: _Compiler_or_memory_barrier(); __atomic_store_relaxed(__ptr, __val); break;
case __ATOMIC_SEQ_CST: _Memory_barrier();
case __ATOMIC_SEQ_CST: _Memory_barrier(); _LIBCUDACXX_FALLTHROUGH();
case __ATOMIC_RELAXED: __atomic_store_relaxed(__ptr, __val); break;
default: assert(0);
}
Expand Down Expand Up @@ -149,7 +149,8 @@ bool __atomic_compare_exchange(_Type volatile *__ptr, _Type *__expected, const _
bool success = false;
switch (detail::__stronger_order_cuda(__success_memorder, __failure_memorder)) {
case __ATOMIC_RELEASE: _Compiler_or_memory_barrier(); success = __atomic_compare_exchange_relaxed(__ptr, __expected, __desired); break;
case __ATOMIC_ACQ_REL: _Compiler_or_memory_barrier();
case __ATOMIC_ACQ_REL: _Compiler_or_memory_barrier(); _LIBCUDACXX_FALLTHROUGH();
case __ATOMIC_CONSUME:
case __ATOMIC_ACQUIRE: success = __atomic_compare_exchange_relaxed(__ptr, __expected, __desired); _Compiler_or_memory_barrier(); break;
case __ATOMIC_SEQ_CST: _Memory_barrier(); success = __atomic_compare_exchange_relaxed(__ptr, __expected, __desired); _Compiler_or_memory_barrier(); break;
case __ATOMIC_RELAXED: success = __atomic_compare_exchange_relaxed(__ptr, __expected, __desired); break;
Expand Down Expand Up @@ -182,7 +183,8 @@ template<class _Type>
void __atomic_exchange(_Type volatile *__ptr, const _Type *__val, _Type *__ret, int __memorder) {
switch (__memorder) {
case __ATOMIC_RELEASE: _Compiler_or_memory_barrier(); __atomic_exchange_relaxed(__ptr, __val, __ret);break;
case __ATOMIC_ACQ_REL: _Compiler_or_memory_barrier();
case __ATOMIC_ACQ_REL: _Compiler_or_memory_barrier(); _LIBCUDACXX_FALLTHROUGH();
case __ATOMIC_CONSUME:
case __ATOMIC_ACQUIRE: __atomic_exchange_relaxed(__ptr, __val, __ret); _Compiler_or_memory_barrier(); break;
case __ATOMIC_SEQ_CST: _Memory_barrier(); __atomic_exchange_relaxed(__ptr, __val, __ret); _Compiler_or_memory_barrier(); break;
case __ATOMIC_RELAXED: __atomic_exchange_relaxed(__ptr, __val, __ret); break;
Expand Down Expand Up @@ -217,7 +219,8 @@ _Type __atomic_fetch_add(_Type volatile *__ptr, _Delta __val, int __memorder) {

switch (__memorder) {
case __ATOMIC_RELEASE: _Compiler_or_memory_barrier(); __atomic_fetch_add_relaxed(__ptr, &__val, __dest);break;
case __ATOMIC_ACQ_REL: _Compiler_or_memory_barrier();
case __ATOMIC_ACQ_REL: _Compiler_or_memory_barrier(); _LIBCUDACXX_FALLTHROUGH();
case __ATOMIC_CONSUME:
case __ATOMIC_ACQUIRE: __atomic_fetch_add_relaxed(__ptr, &__val, __dest); _Compiler_or_memory_barrier(); break;
case __ATOMIC_SEQ_CST: _Memory_barrier(); __atomic_fetch_add_relaxed(__ptr, &__val, __dest); _Compiler_or_memory_barrier(); break;
case __ATOMIC_RELAXED: __atomic_fetch_add_relaxed(__ptr, &__val, __dest); break;
Expand Down Expand Up @@ -258,7 +261,8 @@ _Type __atomic_fetch_and(_Type volatile *__ptr, _Delta __val, int __memorder) {

switch (__memorder) {
case __ATOMIC_RELEASE: _Compiler_or_memory_barrier(); __atomic_fetch_and_relaxed(__ptr, &__val, __dest);break;
case __ATOMIC_ACQ_REL: _Compiler_or_memory_barrier();
case __ATOMIC_ACQ_REL: _Compiler_or_memory_barrier(); _LIBCUDACXX_FALLTHROUGH();
case __ATOMIC_CONSUME:
case __ATOMIC_ACQUIRE: __atomic_fetch_and_relaxed(__ptr, &__val, __dest); _Compiler_or_memory_barrier(); break;
case __ATOMIC_SEQ_CST: _Memory_barrier(); __atomic_fetch_and_relaxed(__ptr, &__val, __dest); _Compiler_or_memory_barrier(); break;
case __ATOMIC_RELAXED: __atomic_fetch_and_relaxed(__ptr, &__val, __dest); break;
Expand Down Expand Up @@ -294,7 +298,8 @@ _Type __atomic_fetch_xor(_Type volatile *__ptr, _Delta __val, int __memorder) {

switch (__memorder) {
case __ATOMIC_RELEASE: _Compiler_or_memory_barrier(); __atomic_fetch_xor_relaxed(__ptr, &__val, __dest);break;
case __ATOMIC_ACQ_REL: _Compiler_or_memory_barrier();
case __ATOMIC_ACQ_REL: _Compiler_or_memory_barrier(); _LIBCUDACXX_FALLTHROUGH();
case __ATOMIC_CONSUME:
case __ATOMIC_ACQUIRE: __atomic_fetch_xor_relaxed(__ptr, &__val, __dest); _Compiler_or_memory_barrier(); break;
case __ATOMIC_SEQ_CST: _Memory_barrier(); __atomic_fetch_xor_relaxed(__ptr, &__val, __dest); _Compiler_or_memory_barrier(); break;
case __ATOMIC_RELAXED: __atomic_fetch_xor_relaxed(__ptr, &__val, __dest); break;
Expand Down Expand Up @@ -330,7 +335,8 @@ _Type __atomic_fetch_or(_Type volatile *__ptr, _Delta __val, int __memorder) {

switch (__memorder) {
case __ATOMIC_RELEASE: _Compiler_or_memory_barrier(); __atomic_fetch_or_relaxed(__ptr, &__val, __dest);break;
case __ATOMIC_ACQ_REL: _Compiler_or_memory_barrier();
case __ATOMIC_ACQ_REL: _Compiler_or_memory_barrier(); _LIBCUDACXX_FALLTHROUGH();
case __ATOMIC_CONSUME:
case __ATOMIC_ACQUIRE: __atomic_fetch_or_relaxed(__ptr, &__val, __dest); _Compiler_or_memory_barrier(); break;
case __ATOMIC_SEQ_CST: _Memory_barrier(); __atomic_fetch_or_relaxed(__ptr, &__val, __dest); _Compiler_or_memory_barrier(); break;
case __ATOMIC_RELAXED: __atomic_fetch_or_relaxed(__ptr, &__val, __dest); break;
Expand Down

0 comments on commit 264a9f2

Please sign in to comment.