Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Set RAPIDS_NO_INITIALIZE at the top of CUDAWorker/LocalCUDACluster #379

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions dask_cuda/benchmarks/utils.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import argparse
import os

from dask.distributed import SSHCluster

Expand Down Expand Up @@ -187,7 +186,6 @@ def get_scheduler_workers(dask_scheduler=None):
def setup_memory_pool(pool_size=None, disable_pool=False):
import cupy

os.environ["RAPIDS_NO_INITIALIZE"] = "True"
import rmm

rmm.reinitialize(
Expand Down
5 changes: 4 additions & 1 deletion dask_cuda/cuda_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,10 @@ def __init__(
net_devices=None,
**kwargs,
):
# Required by RAPIDS libraries (e.g., cuDF) to ensure no context
# initialization happens before we can set CUDA_VISIBLE_DEVICES
os.environ["RAPIDS_NO_INITIALIZE"] = "True"

enable_proctitle_on_current()
enable_proctitle_on_children()

Expand Down Expand Up @@ -133,7 +137,6 @@ def del_pid_file():

if rmm_pool_size is not None or rmm_managed_memory:
try:
os.environ["RAPIDS_NO_INITIALIZE"] = "True"
import rmm # noqa F401
except ImportError:
raise ValueError(
Expand Down
3 changes: 0 additions & 3 deletions dask_cuda/initialize.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
about Dask configuration.
"""
import logging
import os

import click
import numba.cuda
Expand All @@ -47,7 +46,6 @@ def initialize(
):
if create_cuda_context:
try:
os.environ["RAPIDS_NO_INITIALIZE"] = "True"
numba.cuda.current_context()
except Exception:
logger.error("Unable to start CUDA Context", exc_info=True)
Expand Down Expand Up @@ -107,7 +105,6 @@ def dask_setup(
):
if create_cuda_context:
try:
os.environ["RAPIDS_NO_INITIALIZE"] = "True"
numba.cuda.current_context()
except Exception:
logger.error("Unable to start CUDA Context", exc_info=True)
5 changes: 4 additions & 1 deletion dask_cuda/local_cuda_cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,10 @@ def __init__(
rmm_managed_memory=False,
**kwargs,
):
# Required by RAPIDS libraries (e.g., cuDF) to ensure no context
# initialization happens before we can set CUDA_VISIBLE_DEVICES
os.environ["RAPIDS_NO_INITIALIZE"] = "True"

if CUDA_VISIBLE_DEVICES is None:
CUDA_VISIBLE_DEVICES = cuda_visible_devices(0)
if isinstance(CUDA_VISIBLE_DEVICES, str):
Expand All @@ -166,7 +170,6 @@ def __init__(
self.rmm_managed_memory = rmm_managed_memory
if rmm_pool_size is not None or rmm_managed_memory:
try:
os.environ["RAPIDS_NO_INITIALIZE"] = "True"
import rmm # noqa F401
except ImportError:
raise ValueError(
Expand Down
1 change: 0 additions & 1 deletion dask_cuda/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ def __init__(self, nbytes, managed_memory):

def setup(self, worker=None):
if self.nbytes is not None or self.managed_memory is True:
os.environ["RAPIDS_NO_INITIALIZE"] = "True"
import rmm

pool_allocator = False if self.nbytes is None else True
Expand Down