You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
from parla import Parla, spawn, TaskSpace
from parla.common.globals import get_current_context
from parla.cython.device_manager import cpu, gpu
from parla import parray
import argparse
import cupy as cp
import numpy as np
import crosspy as xp
import math
import time
parser = argparse.ArgumentParser()
parser.add_argument("-dev_config", type=str, default="devices_sample.YAML")
parser.add_argument('-num_partitions', type=int, default=2)
parser.add_argument("-ngpus", type=int, default=4)
parser.add_argument("-m", type=int, default=5)
args = parser.parse_args()
wrapper = parray.asarray
def spawn_task(xA, xB):
T = TaskSpace("T")
placement = tuple([gpu for i in range(len(xA.values()))])
@spawn(T[0], placement=[placement], inout=[xA, xB])
def task():
context = get_current_context()
print("Task is running on: ", context)
xp.alltoallv(xA, np.arange(args.m*args.ngpus), xB)
print("Result: ", xB)
print("Status: ")
for arr in xB.block_view():
print(arr.print_overview())
def main():
# Per device size
global_size = args.m * args.ngpus
global_array = np.arange(global_size, dtype=np.int32)
np.random.shuffle(global_array)
with Parla():
# Initilize a CrossPy Array
cupy_list = []
cupy_list_B = []
for i in range(args.ngpus):
with cp.cuda.Device(i) as device:
random_array = cp.random.randint(0, 100, size=args.m)
random_array = random_array.astype(cp.int32)
cupy_list.append(random_array)
cupy_list_B.append(cp.zeros_like(random_array))
device.synchronize()
xA = xp.array(cupy_list, dim=0, wrapper=wrapper)
xB = xp.array(cupy_list_B, dim=0, wrapper=wrapper)
#xA = xp.array(cupy_list, dim=0)
#xB = xp.array(cupy_list_B, dim=0)
spawn_task(xA, xB)
if __name__ == "__main__":
main()
Error
Task is running on: TaskEnvironment([GPUEnvironment(CUDA:0), GPUEnvironment(CUDA:1), GPUEnvironment(CUDA:2), GPUEnvironment(CUDA:3)])
Exception in Task Task(T_0) : unhashable type: 'ndarray' Traceback (most recent call last):
File "tasks.pyx", line 409, in parla.cython.tasks.Task.run
File "tasks.pyx", line 561, in parla.cython.tasks.ComputeTask._execute_task
File "scheduler.pyx", line 511, in parla.cython.scheduler._task_callback
File "/work2/06081/wlruys/frontera/parla-experimental/example/quicksort/example_2.py", line 34, in task
xp.alltoallv(xA, np.arange(args.m*args.ngpus), xB)
File "/work2/06081/wlruys/frontera/mambaforge/envs/parla/lib/python3.10/site-packages/crosspy/mpi/collective.py", line 28, in alltoallv
buf = sendbuf._original_data[i][gather_indices_local]
File "/work2/06081/wlruys/frontera/mambaforge/envs/parla/lib/python3.10/site-packages/parla/common/parray/core.py", line 328, in __getitem__
return PArray(ret, parent=self, slices=slices)
File "/work2/06081/wlruys/frontera/mambaforge/envs/parla/lib/python3.10/site-packages/parla/common/parray/core.py", line 71, in __init__
self._slices_hash = self._array.get_slices_hash(slices)
File "/work2/06081/wlruys/frontera/mambaforge/envs/parla/lib/python3.10/site-packages/parla/common/parray/memory.py", line 498, in get_slices_hash
hash_value = hash_value * prime + ctypes.c_size_t(hash(global_slices)).value
TypeError: unhashable type: 'ndarray'
The text was updated successfully, but these errors were encountered:
@bozhiyou is there plan for this support? I think we need it for a cohesive presentation.
We can't really use CrossPy with parrays if there is no way to move data between them.
I could support that if you need. But it will be done by just simply convert the numpy/cupy array to list, so just use list if could to avoid any unnecessary conversion
bozhiyou
changed the title
all2allv does not support underlying PArray type
PArray does not support array type indices
Apr 14, 2023
Reproducer
Error
The text was updated successfully, but these errors were encountered: