diff --git a/java/src/main/native/src/RmmJni.cpp b/java/src/main/native/src/RmmJni.cpp index 0f424761bfe..8c733018fa7 100644 --- a/java/src/main/native/src/RmmJni.cpp +++ b/java/src/main/native/src/RmmJni.cpp @@ -778,17 +778,11 @@ JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Rmm_newCudaAsyncMemoryResource( try { cudf::jni::auto_set_device(env); - // When we are using fabric, we need to set the memory access to be - // read_write, in order for peer GPUs to have access to this memory. - // Otherwise, choose default parameters (optional set to nullopt). - auto [handle_type, prot_flag] = - fabric - ? std::pair{std::optional{ - rmm::mr::cuda_async_memory_resource::allocation_handle_type::fabric}, - std::optional{rmm::mr::cuda_async_memory_resource::access_flags::read_write}} - : std::pair{std::nullopt, std::nullopt}; - - auto ret = new rmm::mr::cuda_async_memory_resource(init, release, handle_type, prot_flag); + auto handle_type = + fabric ? std::optional{rmm::mr::cuda_async_memory_resource::allocation_handle_type::fabric} + : std::nullopt; + + auto ret = new rmm::mr::cuda_async_memory_resource(init, release, handle_type); return reinterpret_cast(ret); }