diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index b917dfd3aa..d17e14bd92 100755 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -4,9 +4,6 @@ cpp/ @rapidsai/kvikio-cpp-codeowners #python code owners python/ @rapidsai/kvikio-python-codeowners -#legate code owners -legate/ @rapidsai/kvikio-cpp-codeowners @rapidsai/kvikio-python-codeowners - #cmake code owners **/CMakeLists.txt @rapidsai/kvikio-cmake-codeowners **/cmake/ @rapidsai/kvikio-cmake-codeowners diff --git a/.github/labeler.yml b/.github/labeler.yml index 7907548a1a..aaea0aecbe 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -7,9 +7,6 @@ KvikIO (Python): libkvikio: - 'cpp/**' -legate: - - 'legate/**' - CMake: - '**/CMakeLists.txt' - '**/cmake/**' diff --git a/.gitignore b/.gitignore index 1b23a132dc..1101e3ae49 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,6 @@ *.*~ build python/_skbuild -legate/_skbuild -legate/legate_kvikio/install_info.py -legate/record.txt python/record.txt *.so dask-worker-space diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2e1ad6b7f4..d98544c57b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2022, NVIDIA CORPORATION. +# Copyright (c) 2019-2024, NVIDIA CORPORATION. repos: - repo: https://github.com/pre-commit/pre-commit-hooks @@ -13,22 +13,18 @@ repos: args: ["--config-root=python/", "--resolve-all-configs"] files: python/.* types_or: [python, cython, pyi] - - id: isort - args: ["--config-root=legate/", "--resolve-all-configs"] - files: legate/.* - types_or: [python, cython, pyi] - repo: https://github.com/psf/black rev: 23.3.0 hooks: - id: black - files: (python|legate)/.* + files: python/.* args: ["--config", "python/pyproject.toml"] - repo: https://github.com/PyCQA/flake8 rev: 6.0.0 hooks: - id: flake8 args: ["--config=.flake8"] - files: (python|legate)/.*$ + files: python/.*$ types: [file] types_or: [python, cython] additional_dependencies: ["flake8-force"] diff --git a/build.sh b/build.sh index e094d2d4fc..769600b10f 100755 --- a/build.sh +++ b/build.sh @@ -18,12 +18,11 @@ ARGS=$* # script, and that this script resides in the repo dir! REPODIR=$(cd $(dirname $0); pwd) -VALIDARGS="clean libkvikio kvikio legate -v -g -n -s --ptds -h" -HELP="$0 [clean] [libkvikio] [kvikio] [legate] [-v] [-g] [-n] [-s] [--ptds] [--cmake-args=\"\"] [-h] +VALIDARGS="clean libkvikio kvikio -v -g -n -s --ptds -h" +HELP="$0 [clean] [libkvikio] [kvikio] [-v] [-g] [-n] [-s] [--ptds] [--cmake-args=\"\"] [-h] clean - remove all existing build artifacts and configuration (start over) libkvikio - build and install the libkvikio C++ code kvikio - build and install the kvikio Python package - legate - build and install the legate-kvikio Python package -v - verbose build mode -g - build for debug -n - no install step @@ -33,8 +32,7 @@ HELP="$0 [clean] [libkvikio] [kvikio] [legate] [-v] [-g] [-n] [-s] [--ptds] [--c " LIBKVIKIO_BUILD_DIR=${LIBKVIKIO_BUILD_DIR:=${REPODIR}/cpp/build} KVIKIO_BUILD_DIR="${REPODIR}/python/build ${REPODIR}/python/_skbuild" -LEGATE_BUILD_DIR="${REPODIR}/legate/build ${REPODIR}/legate/_skbuild" -BUILD_DIRS="${LIBKVIKIO_BUILD_DIR} ${KVIKIO_BUILD_DIR} ${LEGATE_BUILD_DIR}" +BUILD_DIRS="${LIBKVIKIO_BUILD_DIR} ${KVIKIO_BUILD_DIR}" # Set defaults for vars modified by flags to this script VERBOSE_FLAG="" @@ -131,7 +129,6 @@ if hasArg clean; then rmdir "${bd}" || true fi done - rm -f ${REPODIR}/legate/legate_kvikio/install_info.py fi ################################################################################ diff --git a/ci/release/update-version.sh b/ci/release/update-version.sh index 0e8a476590..1dc6966bd9 100755 --- a/ci/release/update-version.sh +++ b/ci/release/update-version.sh @@ -41,11 +41,9 @@ sed_runner 's/set(kvikio_version.*)/set(kvikio_version '${NEXT_FULL_TAG}')/g' py # Python __init__.py updates sed_runner "s/__version__ = .*/__version__ = \"${NEXT_FULL_TAG}\"/g" python/kvikio/__init__.py -sed_runner "s/__version__ = .*/__version__ = \"${NEXT_FULL_TAG}\"/g" legate/legate_kvikio/__init__.py # Python pyproject.toml updates sed_runner "s/^version = .*/version = \"${NEXT_FULL_TAG}\"/g" python/pyproject.toml -sed_runner "s/^version = .*/version = \"${NEXT_FULL_TAG}\"/g" legate/pyproject.toml # rapids-cmake version sed_runner 's/'"branch-.*\/RAPIDS.cmake"'/'"branch-${NEXT_SHORT_TAG}\/RAPIDS.cmake"'/g' cpp/cmake/fetch_rapids.cmake diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index fd500cc5be..a6cf11b3cb 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -15,7 +15,6 @@ dependencies: - cxx-compiler - cython>=3.0.0 - dask>=2022.05.2 -- distributed>=2022.05.2 - doxygen=1.9.1 - gcc_linux-64=11.* - libcufile-dev=1.4.0.31 diff --git a/conda/environments/all_cuda-122_arch-x86_64.yaml b/conda/environments/all_cuda-122_arch-x86_64.yaml index 72e8f5c6f0..bdda38edbb 100644 --- a/conda/environments/all_cuda-122_arch-x86_64.yaml +++ b/conda/environments/all_cuda-122_arch-x86_64.yaml @@ -15,7 +15,6 @@ dependencies: - cxx-compiler - cython>=3.0.0 - dask>=2022.05.2 -- distributed>=2022.05.2 - doxygen=1.9.1 - gcc_linux-64=11.* - libcufile-dev diff --git a/dependencies.yaml b/dependencies.yaml index ad0228f26c..1013d86517 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -7,7 +7,6 @@ files: arch: [x86_64] includes: - build - - build_legate_wheel - checks - cuda - cuda_version @@ -16,7 +15,6 @@ files: - py_version - run - test_python - - test_python_legate test_cpp: output: none includes: @@ -61,31 +59,6 @@ files: key: test includes: - test_python - legate_py_build: - output: pyproject - pyproject_dir: legate - extras: - table: build-system - includes: - - build - - build_legate_wheel - legate_py_run: - output: pyproject - pyproject_dir: legate - extras: - table: project - includes: - - depends_on_cupy - - run - legate_py_optional_test: - output: pyproject - pyproject_dir: legate - extras: - table: project.optional-dependencies - key: test - includes: - - test_python - - test_python_legate channels: - rapidsai - rapidsai-nightly @@ -136,12 +109,6 @@ dependencies: cuda: "12.*" packages: - cuda-nvcc - build_legate_wheel: - common: - - output_types: [requirements, pyproject] - packages: - - setuptools - - wheel checks: common: - output_types: [conda, requirements] diff --git a/legate/CMakeLists.txt b/legate/CMakeLists.txt deleted file mode 100644 index a25a79177f..0000000000 --- a/legate/CMakeLists.txt +++ /dev/null @@ -1,40 +0,0 @@ -# ============================================================================= -# Copyright (c) 2023, NVIDIA CORPORATION. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except -# in compliance with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed under the License -# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express -# or implied. See the License for the specific language governing permissions and limitations under -# the License. -# ============================================================================= - -cmake_minimum_required(VERSION 3.26.4 FATAL_ERROR) - -project( - LegateKvikIO - VERSION 23.04.00 - LANGUAGES C CXX -) - -# This is for convenience only when doing editable builds to avoid setting the flag -if(NOT LegateKvikIO_ROOT) - set(LegateKvikIO_ROOT ${CMAKE_SOURCE_DIR}/build) -endif() - -set(BUILD_SHARED_LIBS ON) -set(CMAKE_EXPORT_COMPILE_COMMANDS ON) - -# We always want to build our bindings, so we trick the legate wrappers to never search for an -# installed version which is currently deduced by seeing if it is invoked via scikit-build. See -# -find_package(legate_core REQUIRED) -set(SKBUILD OFF) -legate_add_cpp_subdirectory(cpp TARGET legate_kvikio EXPORT legate_kvikio-export) -legate_default_python_install(legate_kvikio EXPORT legate_kvikio-export) - -# Generates `install_info.py` -legate_add_cffi(${CMAKE_SOURCE_DIR}/cpp/task_opcodes.hpp TARGET legate_kvikio) diff --git a/legate/benchmarks/hdf5_read.py b/legate/benchmarks/hdf5_read.py deleted file mode 100644 index b11114cad9..0000000000 --- a/legate/benchmarks/hdf5_read.py +++ /dev/null @@ -1,188 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -# See file LICENSE for terms. - -import argparse -import contextlib -import pathlib -import tempfile -from time import perf_counter as clock -from typing import ContextManager - -import h5py -import numpy as np - -DATASET = "dataset-hdf5-read" - - -def try_open_hdf5_array(filepath, shape, chunks, dtype): - try: - with h5py.File(filepath, "r") as f: - a = f[DATASET] - if a.shape == shape and a.chunks == chunks and a.dtype == dtype: - return a - except BaseException: - pass - return None - - -def create_hdf5_array(filepath: pathlib.Path, shape, chunks, dtype=np.float64) -> None: - ret = try_open_hdf5_array(filepath, shape, chunks, dtype) - if ret is None: - filepath.unlink(missing_ok=True) - # Write array using h5py - with h5py.File(filepath, "w") as f: - f.create_dataset(DATASET, chunks=chunks, data=np.random.random(shape)) - print(f"HDF5 '{filepath}': shape: {shape}, " f"chunks: {chunks}, dtype: {dtype}") - - -@contextlib.contextmanager -def dask_h5py(args): - import cupy - import h5py - from dask import array as da - from dask_cuda import LocalCUDACluster - from distributed import Client - - def f(): - t0 = clock() - with h5py.File(args.dir / "A", "r") as af: - with h5py.File(args.dir / "B", "r") as bf: - a = da.from_array(af[DATASET], chunks=af[DATASET].chunks) - b = da.from_array(bf[DATASET], chunks=bf[DATASET].chunks) - a = a.map_blocks(cupy.asarray) - b = b.map_blocks(cupy.asarray) - c = args.op(da, a, b) - int(c.sum().compute()) - t1 = clock() - return t1 - t0 - - with LocalCUDACluster(n_workers=args.n_workers) as cluster: - with Client(cluster): - yield f - - -@contextlib.contextmanager -def run_legate(args): - import cunumeric as num - - from legate.core import get_legate_runtime - from legate_kvikio.kerchunk import hdf5_read - - def f(): - get_legate_runtime().issue_execution_fence(block=True) - t0 = clock() - a = hdf5_read(args.dir / "A", dataset_name=DATASET) - b = hdf5_read(args.dir / "B", dataset_name=DATASET) - c = args.op(num, a, b) - int(c.sum()) - t1 = clock() - return t1 - t0 - - yield f - - -API = { - "dask-h5py": dask_h5py, - "legate": run_legate, -} - -OP = {"add": lambda xp, a, b: a + b, "matmul": lambda xp, a, b: xp.matmul(a, b)} - - -def main(args): - create_hdf5_array(args.dir / "A", chunks=(args.c, args.c), shape=(args.m, args.m)) - create_hdf5_array(args.dir / "B", chunks=(args.c, args.c), shape=(args.m, args.m)) - - timings = [] - with API[args.api](args) as f: - for _ in range(args.n_warmup_runs): - elapsed = f() - print("elapsed[warmup]: ", elapsed) - for i in range(args.nruns): - elapsed = f() - print(f"elapsed[run #{i}]: ", elapsed) - timings.append(elapsed) - print(f"elapsed mean: {np.mean(timings):.5}s (std: {np.std(timings):.5}s)") - - -if __name__ == "__main__": - - def parse_directory(x): - if x is None: - return x - else: - p = pathlib.Path(x) - if not p.is_dir(): - raise argparse.ArgumentTypeError("Must be a directory") - return p - - parser = argparse.ArgumentParser(description="Matrix operation on two Zarr files") - parser.add_argument( - "-m", - default=100, - type=int, - help="Dimension of the two square input matrices (MxM) (default: %(default)s).", - ) - parser.add_argument( - "-c", - default=None, - type=int, - help="Dimension of the square chunks (CxC) (default: M//10).", - ) - parser.add_argument( - "-d", - "--dir", - metavar="PATH", - default=None, - type=parse_directory, - help="Path to the directory to r/w from (default: tempfile.TemporaryDirectory)", - ) - parser.add_argument( - "--nruns", - metavar="RUNS", - default=1, - type=int, - help="Number of runs (default: %(default)s).", - ) - parser.add_argument( - "--api", - metavar="API", - default=tuple(API.keys())[0], - choices=tuple(API.keys()), - help="API to use {%(choices)s}", - ) - parser.add_argument( - "--n-workers", - default=1, - type=int, - help="Number of workers (default: %(default)s).", - ) - parser.add_argument( - "--op", - metavar="OP", - default=tuple(OP.keys())[0], - choices=tuple(OP.keys()), - help="Operation to run {%(choices)s}", - ) - parser.add_argument( - "--n-warmup-runs", - default=1, - type=int, - help="Number of warmup runs (default: %(default)s).", - ) - - args = parser.parse_args() - args.op = OP[args.op] # Parse the operation argument - if args.c is None: - args.c = args.m // 10 - - # Create a temporary directory if user didn't specify a directory - temp_dir: tempfile.TemporaryDirectory | ContextManager - if args.dir is None: - temp_dir = tempfile.TemporaryDirectory() - args.dir = pathlib.Path(temp_dir.name) - else: - temp_dir = contextlib.nullcontext() - - with temp_dir: - main(args) diff --git a/legate/benchmarks/single-node-io.py b/legate/benchmarks/single-node-io.py deleted file mode 100644 index 3c651f1b82..0000000000 --- a/legate/benchmarks/single-node-io.py +++ /dev/null @@ -1,207 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -# See file LICENSE for terms. - -import argparse -import contextlib -import os -import os.path -import pathlib -import statistics -import tempfile -from time import perf_counter as clock -from typing import ContextManager, Union - -import cunumeric as num -from dask.utils import format_bytes, parse_bytes - -import kvikio -import kvikio.defaults -import legate.core -from legate_kvikio import CuFile - -runtime = legate.core.get_legate_runtime() - - -def run_cufile(args): - """Single file and array""" - file_path = args.dir / "kvikio-single-file" - - src = num.arange(args.nbytes, dtype="uint8") - dst = num.empty_like(src) - runtime.issue_execution_fence(block=True) - - # Write - f = CuFile(file_path, flags="w") - t0 = clock() - f.write(src) - f.close() - runtime.issue_execution_fence(block=True) - write_time = clock() - t0 - - # Read - f = CuFile(file_path, flags="r") - t0 = clock() - f.read(dst) - f.close() - runtime.issue_execution_fence(block=True) - read_time = clock() - t0 - assert (src == dst).all() - - return read_time, write_time - - -API = { - "cufile": run_cufile, -} - - -def main(args): - props = kvikio.DriverProperties() - try: - import pynvml.smi - - nvsmi = pynvml.smi.nvidia_smi.getInstance() - except ImportError: - gpu_name = "Unknown (install pynvml)" - mem_total = gpu_name - bar1_total = gpu_name - else: - info = nvsmi.DeviceQuery()["gpu"][0] - gpu_name = f"{info['product_name']} (dev #0)" - mem_total = format_bytes( - parse_bytes( - str(info["fb_memory_usage"]["total"]) + info["fb_memory_usage"]["unit"] - ) - ) - bar1_total = format_bytes( - parse_bytes( - str(info["bar1_memory_usage"]["total"]) - + info["bar1_memory_usage"]["unit"] - ) - ) - gds_version = "N/A (Compatibility Mode)" - if props.is_gds_available: - gds_version = f"v{props.major_version}.{props.minor_version}" - gds_config_json_path = os.path.realpath( - os.getenv("CUFILE_ENV_PATH_JSON", "/etc/cufile.json") - ) - - print("Roundtrip benchmark") - print("----------------------------------") - if kvikio.defaults.compat_mode(): - print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") - print(" WARNING - KvikIO compat mode ") - print(" libcufile.so not used ") - print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") - elif not props.is_gds_available: - print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") - print(" WARNING - cuFile compat mode ") - print(" GDS not enabled ") - print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") - print(f"GPU | {gpu_name}") - print(f"GPU Memory Total | {mem_total}") - print(f"BAR1 Memory Total | {bar1_total}") - print(f"GDS driver | {gds_version}") - print(f"GDS config.json | {gds_config_json_path}") - print("----------------------------------") - print(f"nbytes | {args.nbytes} bytes ({format_bytes(args.nbytes)})") - print(f"4K aligned | {args.nbytes % 4096 == 0}") - print(f"directory | {args.dir}") - print(f"nthreads | {args.nthreads}") - print(f"nruns | {args.nruns}") - print(f"#CPUs | {runtime.num_cpus}") - print(f"#GPUs | {runtime.num_gpus}") - print("==================================") - - # Run each benchmark using the requested APIs - for api in args.api: - rs = [] - ws = [] - for _ in range(args.nruns): - read, write = API[api](args) - rs.append(args.nbytes / read) - ws.append(args.nbytes / write) - - def pprint_api_res(name, samples): - mean = statistics.mean(samples) if len(samples) > 1 else samples[0] - ret = f"{api} {name}".ljust(18) - ret += f"| {format_bytes(mean).rjust(10)}/s".ljust(14) - if len(samples) > 1: - stdev = statistics.stdev(samples) / mean * 100 - ret += " ± %5.2f %%" % stdev - ret += " (" - for sample in samples: - ret += f"{format_bytes(sample)}/s, " - ret = ret[:-2] + ")" # Replace trailing comma - return ret - - print(pprint_api_res("read", rs)) - print(pprint_api_res("write", ws)) - - -if __name__ == "__main__": - - def parse_directory(x): - if x is None: - return x - else: - p = pathlib.Path(x) - if not p.is_dir(): - raise argparse.ArgumentTypeError("Must be a directory") - return p - - parser = argparse.ArgumentParser(description="Roundtrip benchmark") - parser.add_argument( - "-n", - "--nbytes", - metavar="BYTES", - default="10 MiB", - type=parse_bytes, - help="Message size, which must be a multiple of 8 (default: %(default)s).", - ) - parser.add_argument( - "-d", - "--dir", - metavar="PATH", - default=None, - type=parse_directory, - help="Path to the directory to r/w from (default: tempfile.TemporaryDirectory)", - ) - parser.add_argument( - "--nruns", - metavar="RUNS", - default=1, - type=int, - help="Number of runs per API (default: %(default)s).", - ) - parser.add_argument( - "-t", - "--nthreads", - metavar="THREADS", - default=1, - type=int, - help="Number of threads to use (default: %(default)s).", - ) - parser.add_argument( - "--api", - metavar="API", - default=("cufile",), - nargs="+", - choices=tuple(API.keys()) + ("all",), - help="List of APIs to use {%(choices)s}", - ) - - args = parser.parse_args() - if "all" in args.api: - args.api = tuple(API.keys()) - - # Create a temporary directory if user didn't specify a directory - temp_dir: Union[tempfile.TemporaryDirectory, ContextManager] - if args.dir is None: - temp_dir = tempfile.TemporaryDirectory() - args.dir = pathlib.Path(temp_dir.name) - else: - temp_dir = contextlib.nullcontext() - - with temp_dir: - main(args) diff --git a/legate/benchmarks/zarr_read.py b/legate/benchmarks/zarr_read.py deleted file mode 100644 index cde58de34a..0000000000 --- a/legate/benchmarks/zarr_read.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -# See file LICENSE for terms. - -import argparse -import contextlib -import functools -import pathlib -import tempfile -from time import perf_counter as clock -from typing import ContextManager - -import numpy as np -import zarr -from zarr.errors import ArrayNotFoundError - -from kvikio.zarr import GDSStore - - -def try_open_zarr_array(dirpath, shape, chunks, dtype): - try: - a = zarr.open_array(dirpath, mode="r") - chunks = chunks or a.chunks - if a.shape == shape and a.chunks == chunks and a.dtype == dtype: - return a - except ArrayNotFoundError: - pass - return None - - -def create_zarr_array(dirpath, shape, chunks=None, dtype=np.float64) -> None: - ret = try_open_zarr_array(dirpath, shape, chunks, dtype) - if ret is None: - ret = zarr.open_array( - dirpath, - shape=shape, - dtype=dtype, - mode="w", - chunks=chunks, - compressor=None, - ) - ret[:] = np.random.random(shape) - - print( - f"Zarr '{ret.store.path}': shape: {ret.shape}, " - f"chunks: {ret.chunks}, dtype: {ret.dtype}" - ) - - -@contextlib.contextmanager -def run_dask(args, *, use_cupy): - from dask import array as da - from dask_cuda import LocalCUDACluster - from distributed import Client - - def f(): - t0 = clock() - if use_cupy: - import cupy - - az = zarr.open_array(GDSStore(args.dir / "A"), meta_array=cupy.empty(())) - bz = zarr.open_array(GDSStore(args.dir / "B"), meta_array=cupy.empty(())) - else: - az = args.dir / "A" - bz = args.dir / "B" - - a = da.from_zarr(az) - b = da.from_zarr(bz) - c = args.op(da, a, b) - - int(c.sum().compute()) - t1 = clock() - return t1 - t0 - - with LocalCUDACluster(n_workers=args.n_workers) as cluster: - with Client(cluster): - yield f - - -@contextlib.contextmanager -def run_legate(args): - import cunumeric as num - - from legate.core import get_legate_runtime - from legate_kvikio.zarr import read_array - - def f(): - get_legate_runtime().issue_execution_fence(block=True) - t0 = clock() - a = read_array(args.dir / "A") - b = read_array(args.dir / "B") - c = args.op(num, a, b) - int(c.sum()) - t1 = clock() - return t1 - t0 - - yield f - - -API = { - "dask-cpu": functools.partial(run_dask, use_cupy=False), - "dask-gpu": functools.partial(run_dask, use_cupy=True), - "legate": run_legate, -} - -OP = {"add": lambda xp, a, b: a + b, "matmul": lambda xp, a, b: xp.matmul(a, b)} - - -def main(args): - create_zarr_array(args.dir / "A", shape=(args.m, args.m)) - create_zarr_array(args.dir / "B", shape=(args.m, args.m)) - - timings = [] - with API[args.api](args) as f: - for _ in range(args.n_warmup_runs): - elapsed = f() - print("elapsed[warmup]: ", elapsed) - for i in range(args.nruns): - elapsed = f() - print(f"elapsed[run #{i}]: ", elapsed) - timings.append(elapsed) - print(f"elapsed mean: {np.mean(timings):.5}s (std: {np.std(timings):.5}s)") - - -if __name__ == "__main__": - - def parse_directory(x): - if x is None: - return x - else: - p = pathlib.Path(x) - if not p.is_dir(): - raise argparse.ArgumentTypeError("Must be a directory") - return p - - parser = argparse.ArgumentParser(description="Matrix operation on two Zarr files") - parser.add_argument( - "-m", - default=100, - type=int, - help="Dimension of the two squired input matrix (MxM) (default: %(default)s).", - ) - parser.add_argument( - "-d", - "--dir", - metavar="PATH", - default=None, - type=parse_directory, - help="Path to the directory to r/w from (default: tempfile.TemporaryDirectory)", - ) - parser.add_argument( - "--nruns", - metavar="RUNS", - default=1, - type=int, - help="Number of runs (default: %(default)s).", - ) - parser.add_argument( - "--api", - metavar="API", - default=tuple(API.keys())[0], - choices=tuple(API.keys()), - help="API to use {%(choices)s}", - ) - parser.add_argument( - "--n-workers", - default=1, - type=int, - help="Number of workers (default: %(default)s).", - ) - parser.add_argument( - "--op", - metavar="OP", - default=tuple(OP.keys())[0], - choices=tuple(OP.keys()), - help="Operation to run {%(choices)s}", - ) - parser.add_argument( - "--n-warmup-runs", - default=1, - type=int, - help="Number of warmup runs (default: %(default)s).", - ) - - args = parser.parse_args() - args.op = OP[args.op] # Parse the operation argument - - # Create a temporary directory if user didn't specify a directory - temp_dir: tempfile.TemporaryDirectory | ContextManager - if args.dir is None: - temp_dir = tempfile.TemporaryDirectory() - args.dir = pathlib.Path(temp_dir.name) - else: - temp_dir = contextlib.nullcontext() - - with temp_dir: - main(args) diff --git a/legate/cpp/CMakeLists.txt b/legate/cpp/CMakeLists.txt deleted file mode 100644 index 37dc99ae55..0000000000 --- a/legate/cpp/CMakeLists.txt +++ /dev/null @@ -1,28 +0,0 @@ -# ============================================================================= -# Copyright (c) 2023, NVIDIA CORPORATION. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except -# in compliance with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed under the License -# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express -# or implied. See the License for the specific language governing permissions and limitations under -# the License. -# ============================================================================= - -set(CMAKE_EXPORT_COMPILE_COMMANDS ON) - -find_package(CUDAToolkit REQUIRED) - -file(GLOB SOURCES "*.cpp" "*.hpp") -add_library(legate_kvikio ${SOURCES}) - -target_include_directories( - legate_kvikio - PRIVATE $ ${CMAKE_SOURCE_DIR}/../cpp/include - INTERFACE $ -) - -target_link_libraries(legate_kvikio PRIVATE legate::core CUDA::cudart) diff --git a/legate/cpp/legate_kvikio.cpp b/legate/cpp/legate_kvikio.cpp deleted file mode 100644 index 4899fdece4..0000000000 --- a/legate/cpp/legate_kvikio.cpp +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright (c) 2023, NVIDIA CORPORATION. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include -#include -#include -#include - -#include "legate_mapping.hpp" -#include "task_opcodes.hpp" - -#include - -namespace legate_kvikio { - -/** - * @brief Functor converting Legate type code to size - */ -struct elem_size_fn { - template - size_t operator()() - { - return sizeof(legate::legate_type_of); - } -}; - -/** - * @brief Get the size of a Legate type code - * - * @param code Legate type code - * @return The number of bytes - */ -size_t sizeof_legate_type_code(legate::Type::Code code) -{ - return legate::type_dispatch(code, elem_size_fn{}); -} - -/** - * @brief Get store argument from task context - * - * @tparam IsOutputArgument Whether it is an output or an input argument - * @param context Legate task context. - * @param i The argument index - * @return The i'th argument store argument - */ -template -legate::Store& get_store_arg(legate::TaskContext& context, int i) -{ - if constexpr (IsOutputArgument) { return context.outputs()[i]; } - return context.inputs()[i]; -} - -/** - * @brief Read or write Legate store to or from disk using KvikIO - * - * @tparam IsReadOperation Whether the operation is a read or a write operation - * @param context Legate task context. - */ -template -void read_write_store(legate::TaskContext& context) -{ - std::string path = context.scalars()[0].value(); - legate::Store& store = get_store_arg(context, 0); - auto shape = store.shape<1>(); - - size_t itemsize = sizeof_legate_type_code(store.code()); - if (shape.volume() == 0) { return; } - size_t nbytes = shape.volume() * itemsize; - size_t offset = shape.lo.x * itemsize; // Offset in bytes - std::array strides{}; - - // We know that the accessor is contiguous because we set `policy.exact = true` - // in `Mapper::store_mappings()`. - // TODO: support of non-contigues stores - if constexpr (IsReadOperation) { - kvikio::FileHandle f(path, "r"); - auto* data = store.write_accessor().ptr(shape, strides.data()); - assert(strides[0] == itemsize); - f.pread(data, nbytes, offset).get(); - } else { - kvikio::FileHandle f(path, "r+"); - const auto* data = store.read_accessor().ptr(shape, strides.data()); - assert(strides[0] == itemsize); - f.pwrite(data, nbytes, offset).get(); - } -} - -/** - * @brief Write a Legate store to disk using KvikIO - * Task signature: - * - scalars: - * - path: std::string - * - inputs: - * - buffer: 1d store (any dtype) - * NB: the file must exist before running this task because in order to support - * access from multiple processes, this task opens the file in "r+" mode. - */ -class WriteTask : public Task { - public: - static void cpu_variant(legate::TaskContext& context) { read_write_store(context); } - - static void gpu_variant(legate::TaskContext& context) { read_write_store(context); } -}; - -/** - * @brief Read a Legate store from disk using KvikIO - * Task signature: - * - scalars: - * - path: std::string - * - outputs: - * - buffer: 1d store (any dtype) - */ -class ReadTask : public Task { - public: - static void cpu_variant(legate::TaskContext& context) { read_write_store(context); } - - static void gpu_variant(legate::TaskContext& context) { read_write_store(context); } -}; - -} // namespace legate_kvikio - -namespace // unnamed -{ - -static void __attribute__((constructor)) register_tasks(void) -{ - legate_kvikio::WriteTask::register_variants(); - legate_kvikio::ReadTask::register_variants(); -} - -} // namespace diff --git a/legate/cpp/legate_mapping.cpp b/legate/cpp/legate_mapping.cpp deleted file mode 100644 index 75ed18e128..0000000000 --- a/legate/cpp/legate_mapping.cpp +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright (c) 2023, NVIDIA CORPORATION. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -#include "core/mapping/mapping.h" -#include "legate_mapping.hpp" -#include "task_opcodes.hpp" - -namespace legate_kvikio { - -class Mapper : public legate::mapping::Mapper { - public: - Mapper() {} - - Mapper(const Mapper& rhs) = delete; - Mapper& operator=(const Mapper& rhs) = delete; - - // Legate mapping functions - - void set_machine(const legate::mapping::MachineQueryInterface* machine) override - { - machine_ = machine; - } - - legate::mapping::TaskTarget task_target( - const legate::mapping::Task& task, - const std::vector& options) override - { - return *options.begin(); // Choose first priority - } - - std::vector store_mappings( - const legate::mapping::Task& task, - const std::vector& options) override - { - using legate::mapping::StoreMapping; - std::vector mappings; - const auto& inputs = task.inputs(); - const auto& outputs = task.outputs(); - for (const auto& input : inputs) { - mappings.push_back(StoreMapping::default_mapping(input, options.front())); - mappings.back().policy.exact = true; - } - for (const auto& output : outputs) { - mappings.push_back(StoreMapping::default_mapping(output, options.front())); - mappings.back().policy.exact = true; - } - return std::move(mappings); - } - - legate::Scalar tunable_value(legate::TunableID tunable_id) override { return 0; } - - private: - const legate::mapping::MachineQueryInterface* machine_; -}; - -static const char* const library_name = "legate_kvikio"; - -Legion::Logger log_legate_kvikio(library_name); - -/*static*/ legate::TaskRegistrar& Registry::get_registrar() -{ - static legate::TaskRegistrar registrar; - return registrar; -} - -void registration_callback() -{ - legate::ResourceConfig config = {.max_tasks = OP_NUM_TASK_IDS}; - auto context = legate::Runtime::get_runtime()->create_library( - library_name, config, std::make_unique()); - Registry::get_registrar().register_all_tasks(context); -} - -} // namespace legate_kvikio - -extern "C" { - -void legate_kvikio_perform_registration(void) -{ - // Tell the runtime about our registration callback so we hook it - // in before the runtime starts and make it global so that we know - // that this call back is invoked everywhere across all nodes - legate::Core::perform_registration(); -} -} diff --git a/legate/cpp/legate_mapping.hpp b/legate/cpp/legate_mapping.hpp deleted file mode 100644 index 2158cb0955..0000000000 --- a/legate/cpp/legate_mapping.hpp +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2023, NVIDIA CORPORATION. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include "legate.h" - -namespace legate_kvikio { - -struct Registry { - static legate::TaskRegistrar& get_registrar(); -}; - -template -struct Task : public legate::LegateTask { - using Registrar = Registry; - static constexpr int TASK_ID = ID; -}; - -} // namespace legate_kvikio diff --git a/legate/cpp/task_opcodes.hpp b/legate/cpp/task_opcodes.hpp deleted file mode 100644 index 9920a8c7ac..0000000000 --- a/legate/cpp/task_opcodes.hpp +++ /dev/null @@ -1,25 +0,0 @@ - -/* - * Copyright (c) 2023, NVIDIA CORPORATION. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -enum TaskOpCode { - OP_WRITE, - OP_READ, - OP_TILE_WRITE, - OP_TILE_READ, - OP_TILE_READ_BY_OFFSETS, - OP_NUM_TASK_IDS, // must be last -}; diff --git a/legate/cpp/tile_io.cpp b/legate/cpp/tile_io.cpp deleted file mode 100644 index 3ef29704ae..0000000000 --- a/legate/cpp/tile_io.cpp +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Copyright (c) 2023, NVIDIA CORPORATION. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "legate_mapping.hpp" -#include "task_opcodes.hpp" - -#include - -namespace { - -/** - * @brief Get the tile coordinate based on a task index - * - * @param task_index Task index - * @param tile_start The start tile coordinate - * @return Tile coordinate - */ -legate::DomainPoint get_tile_coord(legate::DomainPoint task_index, - legate::Span& tile_start) -{ - for (uint32_t i = 0; i < task_index.dim; ++i) { - task_index[i] += tile_start[i]; - } - return task_index; -} - -/** - * @brief Get the file path of a tile - * - * @param dirpath The path to the root directory of the Zarr file - * @param tile_coord The coordinate of the tile - * @param delimiter The delimiter - * @return Path to the file representing the requested tile - */ -std::filesystem::path get_file_path(const std::string& dirpath, - const legate::DomainPoint& tile_coord, - const std::string& delimiter = ".") -{ - std::stringstream ss; - for (int32_t idx = 0; idx < tile_coord.dim; ++idx) { - if (idx != 0) { ss << delimiter; } - ss << tile_coord[idx]; - } - return std::filesystem::path(dirpath) / ss.str(); -} - -/** - * @brief Functor for tiling read or write Legate store to or from disk using KvikIO - * - * @tparam IsReadOperation Whether the operation is a read or a write operation - * @param context The Legate task context - * @param store The Legate store to read or write - */ -template -struct tile_read_write_fn { - template - void operator()(legate::TaskContext& context, legate::Store& store) - { - using DTYPE = legate::legate_type_of; - const auto task_index = context.get_task_index(); - const std::string path = context.scalars().at(0).value(); - legate::Span tile_shape = context.scalars().at(1).values(); - legate::Span tile_start = context.scalars().at(2).values(); - const auto tile_coord = get_tile_coord(task_index, tile_start); - const auto filepath = get_file_path(path, tile_coord); - - auto shape = store.shape(); - auto shape_volume = shape.volume(); - if (shape_volume == 0) { return; } - size_t nbytes = shape_volume * sizeof(DTYPE); - - // We know that the accessor is contiguous because we set `policy.exact = true` - // in `Mapper::store_mappings()`. - if constexpr (IsReadOperation) { - kvikio::FileHandle f(filepath, "r"); - auto* data = store.write_accessor().ptr(shape); - f.pread(data, nbytes).get(); - } else { - kvikio::FileHandle f(filepath, "w"); - const auto* data = store.read_accessor().ptr(shape); - f.pwrite(data, nbytes).get(); - } - } -}; - -/** - * @brief Flatten the domain point to a 1D point - * - * @param lo_dp Lower point - * @param hi_dp High point - * @param point_dp The domain point to flatten - * @return The flatten domain point - */ -template -size_t linearize(const legate::DomainPoint& lo_dp, - const legate::DomainPoint& hi_dp, - const legate::DomainPoint& point_dp) -{ - const legate::Point lo = lo_dp; - const legate::Point hi = hi_dp; - const legate::Point point = point_dp - lo_dp; - const legate::Point extents = hi - lo + legate::Point::ONES(); - size_t idx = 0; - for (int32_t dim = 0; dim < DIM; ++dim) { - idx = idx * extents[dim] + point[dim]; - } - return idx; -} - -/** - * @brief Functor for tiling read Legate store by offsets from disk using KvikIO - * - * @param context The Legate task context - * @param store The Legate output store - */ -struct tile_read_by_offsets_fn { - template - void operator()(legate::TaskContext& context, legate::Store& store) - { - using DTYPE = legate::legate_type_of; - const auto task_index = context.get_task_index(); - const auto launch_domain = context.get_launch_domain(); - const std::string path = context.scalars().at(0).value(); - legate::Span offsets = context.scalars().at(1).values(); - legate::Span tile_shape = context.scalars().at(2).values(); - - // Flatten task index - uint32_t flatten_task_index = - linearize(launch_domain.lo(), launch_domain.hi(), task_index); - - auto shape = store.shape(); - auto shape_volume = shape.volume(); - if (shape_volume == 0) { return; } - size_t nbytes = shape_volume * sizeof(DTYPE); - - // We know that the accessor is contiguous because we set `policy.exact = true` - // in `Mapper::store_mappings()`. - kvikio::FileHandle f(path, "r"); - auto* data = store.write_accessor().ptr(shape); - f.pread(data, nbytes, offsets[flatten_task_index]).get(); - } -}; - -} // namespace - -namespace legate_kvikio { - -/** - * @brief Write a tiled Legate store to disk using KvikIO - * Task signature: - * - scalars: - * - path: std::string - * - tile_shape: tuple of int64_t - * - tile_start: tuple of int64_t - * - inputs: - * - buffer: store (any dtype) - * - * NB: the store must be contigues. To make Legate in force this, - * set `policy.exact = true` in `Mapper::store_mappings()`. - * - */ -class TileWriteTask : public Task { - public: - static void cpu_variant(legate::TaskContext& context) - { - legate::Store& store = context.inputs().at(0); - legate::double_dispatch(store.dim(), store.code(), tile_read_write_fn{}, context, store); - } - - static void gpu_variant(legate::TaskContext& context) - { - // Since KvikIO supports both GPU and CPU memory seamlessly, we reuse the CPU variant. - cpu_variant(context); - } -}; - -/** - * @brief Read a tiled Legate store to disk using KvikIO - * Task signature: - * - scalars: - * - path: std::string - * - tile_shape: tuple of int64_t - * - tile_start: tuple of int64_t - * - outputs: - * - buffer: store (any dtype) - * - * NB: the store must be contigues. To make Legate in force this, - * set `policy.exact = true` in `Mapper::store_mappings()`. - * - */ -class TileReadTask : public Task { - public: - static void cpu_variant(legate::TaskContext& context) - { - legate::Store& store = context.outputs().at(0); - legate::double_dispatch(store.dim(), store.code(), tile_read_write_fn{}, context, store); - } - - static void gpu_variant(legate::TaskContext& context) - { - // Since KvikIO supports both GPU and CPU memory seamlessly, we reuse the CPU variant. - cpu_variant(context); - } -}; - -/** - * @brief Read a tiled Legate store by offset to disk using KvikIO - * Task signature: - * - scalars: - * - path: std::string - * - offsets: tuple of int64_t - * - tile_shape: tuple of int64_t - * - outputs: - * - buffer: store (any dtype) - * - * NB: the store must be contigues. To make Legate in force this, - * set `policy.exact = true` in `Mapper::store_mappings()`. - * - */ -class TileReadByOffsetsTask - : public Task { - public: - static void cpu_variant(legate::TaskContext& context) - { - legate::Store& store = context.outputs().at(0); - legate::double_dispatch(store.dim(), store.code(), tile_read_by_offsets_fn{}, context, store); - } - - static void gpu_variant(legate::TaskContext& context) - { - // Since KvikIO supports both GPU and CPU memory seamlessly, we reuse the CPU variant. - cpu_variant(context); - } -}; - -} // namespace legate_kvikio - -namespace { - -void __attribute__((constructor)) register_tasks() -{ - legate_kvikio::TileWriteTask::register_variants(); - legate_kvikio::TileReadTask::register_variants(); - legate_kvikio::TileReadByOffsetsTask::register_variants(); -} - -} // namespace diff --git a/legate/examples/basic_io.py b/legate/examples/basic_io.py deleted file mode 100644 index 50f29b3066..0000000000 --- a/legate/examples/basic_io.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -# See file LICENSE for terms. - -import cunumeric as num - -import legate_kvikio as kvikio -from legate.core import get_legate_runtime - - -def main(path): - a = num.arange(1_000_000) - f = kvikio.CuFile(path, "w+") - f.write(a) - - # In order to make sure the file has been written before the following - # read execute, we insert a fence between the write and read. - # Notice, this call isn't blocking. - get_legate_runtime().issue_execution_fence(block=False) - - b = num.empty_like(a) - f.read(b) - f.close() - - # In order to make sure the file has been written before re-opening - # it for reading, we block the execution. - get_legate_runtime().issue_execution_fence(block=True) - - c = num.empty_like(a) - with kvikio.CuFile(path, "r") as f: - f.read(c) - - # They should all be identical - assert all(a == b) - assert all(a == c) - print("sum: ", c.sum()) - - -if __name__ == "__main__": - main("/tmp/legate-kvikio-hello-world-file") diff --git a/legate/examples/hdf5_io.py b/legate/examples/hdf5_io.py deleted file mode 100644 index 5332cd571a..0000000000 --- a/legate/examples/hdf5_io.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -# See file LICENSE for terms. - - -import h5py -import numpy as np - -import legate_kvikio.kerchunk - - -def hdf5_io(filename): - a = np.arange(10000).reshape((100, 100)) - - # Write array using h5py - with h5py.File(filename, "w") as f: - f.create_dataset("mydataset", chunks=(10, 10), data=a) - - # Read hdf5 file using legate+kerchunk - b = legate_kvikio.kerchunk.hdf5_read(filename, dataset_name="mydataset") - - # They should be equal - assert (a == b).all() - - -if __name__ == "__main__": - hdf5_io("/tmp/legate-kvikio-io.hdf5") diff --git a/legate/examples/zarr_io.py b/legate/examples/zarr_io.py deleted file mode 100644 index f79cbe6826..0000000000 --- a/legate/examples/zarr_io.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -# See file LICENSE for terms. - -import cunumeric as num - -import legate.core -import legate_kvikio.zarr - - -def zarr_io(dirname): - a = num.arange(10000).reshape(100, 100) - - # Write array to a Zarr file by chunks of 10x10. - legate_kvikio.zarr.write_array(a, dirname, chunks=(10, 10)) - - # Block until done writing. - legate.core.get_legate_runtime().issue_execution_fence(block=True) - - # Read array from a Zarr file. - b = legate_kvikio.zarr.read_array(dirname) - - # They should be equal - assert (a == b).all() - - -if __name__ == "__main__": - zarr_io("/tmp/legate-kvikio-zarr-io") diff --git a/legate/legate_kvikio/__init__.py b/legate/legate_kvikio/__init__.py deleted file mode 100644 index b875d77429..0000000000 --- a/legate/legate_kvikio/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -# See file LICENSE for terms. - -from .cufile import CuFile # noqa: F401 - -__version__ = "24.04.00" diff --git a/legate/legate_kvikio/cufile.py b/legate/legate_kvikio/cufile.py deleted file mode 100644 index c35ebc8909..0000000000 --- a/legate/legate_kvikio/cufile.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -# See file LICENSE for terms. - -from __future__ import annotations - -import pathlib -from typing import Any - -import legate.core.types as types - -from .library_description import TaskOpCode, context -from .utils import get_legate_store - - -class CuFile: - """File handle for GPUDirect Storage (GDS)""" - - def __init__(self, file: pathlib.Path | str, flags: str = "r"): - """Open file for GDS IO operations - - The file is opened in this constructor immediately and not in a - Legate task. This means that re-opening a file that was created - by a not-yet-executed Legate task requires a blocking fence like - `get_legate_runtime().issue_execution_fence(block=True)`. - - Legate-KvikIO doesn't maintain a file descriptor thus the file path - to the file must not change while opened by this handle. - - Parameters - ---------- - file: pathlib.Path or str - Path-like object giving the pathname (absolute or relative to the - current working directory) of the file to be opened and registered. - flags: str, optional - "r" -> "open for reading (default)" - "w" -> "open for writing, truncating the file first" - "+" -> "open for updating (reading and writing)" - """ - assert "a" not in flags - self._closed = False - self._filepath = str(file) - self._flags = flags - - # We open the file here in order to: - # * trigger exceptions here instead of in the Legate tasks, which - # forces the Python interpreter to exit. - # * create or truncate files opened in "w" mode, which is required - # because `TaskOpCode.WRITE` always opens the file in "r+" mode. - with open(self._filepath, mode=flags): - pass - - def close(self) -> None: - """Deregister the file and close the file""" - self._closed = True - - @property - def closed(self) -> bool: - return self._closed - - def fileno(self) -> int: - raise RuntimeError("Legate-KvikIO doesn't expose any file descriptor") - - def open_flags(self) -> int: - """Get the flags of the file descriptor (see open(2))""" - raise RuntimeError("Legate-KvikIO doesn't expose any file descriptor") - - def __enter__(self) -> CuFile: - return self - - def __exit__(self, exc_type, exc_val, exc_tb) -> None: - self.close() - - def read(self, buf: Any) -> None: - """Reads specified buffer from the file into device or host memory - - Warning, the size of `buf` must be greater than the size of the file. - - Parameters - ---------- - buf: legate-store-like (1-dimensional) - A Legate store or any object implementing `__legate_data_interface__` to - read into. - """ - assert not self._closed - if "r" not in self._flags and "+" not in self._flags: - raise ValueError(f"Cannot read a file opened with flags={self._flags}") - - output = get_legate_store(buf) - task = context.create_auto_task(TaskOpCode.READ) - task.add_scalar_arg(self._filepath, types.string) - task.add_output(output) - task.set_side_effect(True) - task.execute() - - def write(self, buf: Any) -> None: - """Writes specified buffer from device or host memory to the file - - Hint, if a subsequent operation read this file, insert a fence in between - such as `legate.core.get_legate_runtime().issue_execution_fence(block=False)` - - Parameters - ---------- - buf: legate-store-like (1-dimensional) - A Legate store or any object implementing `__legate_data_interface__` to - write into buffer. - """ - assert not self._closed - if "w" not in self._flags and "+" not in self._flags: - raise ValueError(f"Cannot write to a file opened with flags={self._flags}") - - input = get_legate_store(buf) - task = context.create_auto_task(TaskOpCode.WRITE) - task.add_scalar_arg(self._filepath, types.string) - task.add_input(input) - task.set_side_effect(True) - task.execute() diff --git a/legate/legate_kvikio/kerchunk.py b/legate/legate_kvikio/kerchunk.py deleted file mode 100644 index a226fe965e..0000000000 --- a/legate/legate_kvikio/kerchunk.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -# See file LICENSE for terms. - -from __future__ import annotations - -import itertools -import math -import pathlib - -import cunumeric -import fsspec -import zarr.core -import zarr.hierarchy -from kerchunk.hdf import SingleHdf5ToZarr - -from .tile import read_tiles_by_offsets -from .zarr import get_padded_array - - -def hdf5_read(filepath: pathlib.Path | str, dataset_name: str) -> cunumeric.ndarray: - """Read an HDF5 array from disk using Kerchunk and KvikIO - - We use Kerchunk's `SingleHdf5ToZarr` to find the data chunks embedded - in the hdf5 file. If it fails for any reason, this function fails as well. - - Notes - ----- - The returned array might be a view of a underlying array that has been padded in - order to make its shape divisible by the shape of the Zarr chunks on disk. - - Parameters - ---------- - filepath - File path to the hdf5 file. - - Return - ------ - The cuNumeric array read from disk. - """ - filepath = pathlib.Path(filepath) - - # TODO: look for already generated kerchunk annotations - annotations = SingleHdf5ToZarr(filepath, inline_threshold=0).translate() - - # Load annotations - zarr_group = zarr.open(fsspec.get_mapper("reference://", fo=annotations)) - zarr_ary: zarr.Array = zarr_group[dataset_name] - if zarr_ary.compressor is not None: - raise NotImplementedError("compressor isn't supported") - - # Extract offset and bytes for each chunk - refs = annotations["refs"] - offsets = [] - tile_nbytes = math.prod(zarr_ary.chunks) * zarr_ary.itemsize - for chunk_coord in itertools.product( - *(range(math.ceil(s / c)) for s, c in zip(zarr_ary.shape, zarr_ary.chunks)) - ): - key = zarr_ary._chunk_key(chunk_coord) - _, offset, nbytes = refs[key] - offsets.append(offset) - assert tile_nbytes == nbytes - - padded_ary = get_padded_array(zarr_ary) - if padded_ary is None: - ret = cunumeric.empty(shape=zarr_ary.shape, dtype=zarr_ary.dtype) - read_tiles_by_offsets( - ret, - filepath=filepath, - offsets=tuple(offsets), - tile_shape=zarr_ary.chunks, - ) - else: - read_tiles_by_offsets( - padded_ary, - filepath=filepath, - offsets=tuple(offsets), - tile_shape=zarr_ary.chunks, - ) - ret = padded_ary[tuple(slice(s) for s in zarr_ary.shape)] - return ret diff --git a/legate/legate_kvikio/library_description.py b/legate/legate_kvikio/library_description.py deleted file mode 100644 index 661b4cc096..0000000000 --- a/legate/legate_kvikio/library_description.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -# See file LICENSE for terms. - -import os -from enum import IntEnum -from typing import Any - -from legate.core import Library, get_legate_runtime -from legate_kvikio.install_info import header, libpath - - -class LibraryDescription(Library): - def __init__(self) -> None: - self.shared_object: Any = None - - @property - def cffi(self) -> Any: - return self.shared_object - - def get_name(self) -> str: - return "legate_kvikio" - - def get_shared_library(self) -> str: - return os.path.join(libpath, f"liblegate_kvikio{self.get_library_extension()}") - - def get_c_header(self) -> str: - return header - - def get_registration_callback(self) -> str: - return "legate_kvikio_perform_registration" - - def initialize(self, shared_object: Any) -> None: - self.shared_object = shared_object - - def destroy(self) -> None: - pass - - -description = LibraryDescription() -context = get_legate_runtime().register_library(description) - - -class TaskOpCode(IntEnum): - WRITE = description.cffi.OP_WRITE - READ = description.cffi.OP_READ - TILE_WRITE = description.cffi.OP_TILE_WRITE - TILE_READ = description.cffi.OP_TILE_READ - TILE_READ_BY_OFFSETS = description.cffi.OP_TILE_READ_BY_OFFSETS diff --git a/legate/legate_kvikio/tile.py b/legate/legate_kvikio/tile.py deleted file mode 100644 index 54f9fba7cf..0000000000 --- a/legate/legate_kvikio/tile.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -# See file LICENSE for terms. - -from __future__ import annotations - -import pathlib -from typing import Iterable, Optional, Tuple - -import cunumeric - -import legate.core.types as types -from legate.core import Rect - -from .library_description import TaskOpCode, context -from .utils import get_legate_store - - -def _tile_read_write( - op: TaskOpCode, - ary: cunumeric.ndarray, - dirpath: pathlib.Path | str, - tile_shape: Tuple[int], - tile_start: Optional[Tuple[int]], -) -> None: - """Implementation of `write_tiles` and `read_tiles`""" - - dirpath = pathlib.Path(dirpath) - if tile_start is None: - tile_start = (0,) * len(tile_shape) - - if len(ary.shape) != len(tile_shape): - raise ValueError("Tile shape and array shape must have same number of axes") - if any(d % c != 0 for d, c in zip(ary.shape, tile_shape)): - raise ValueError( - f"The tile shape {tile_shape} must be " - f"divisible with the array shape {ary.shape}" - ) - - # Partition the array into even tiles - store_partition = get_legate_store(ary).partition_by_tiling(tile_shape) - - # Use the partition's color shape as the launch shape so there will be - # one task for each tile - launch_shape = store_partition.partition.color_shape - task = context.create_manual_task( - op, - launch_domain=Rect(launch_shape), - ) - if op == TaskOpCode.TILE_WRITE: - task.add_input(store_partition) - elif op == TaskOpCode.TILE_READ: - task.add_output(store_partition) - else: - raise ValueError(f"Unknown op: {op}") - task.add_scalar_arg(str(dirpath), types.string) - task.add_scalar_arg(tile_shape, (types.uint64,)) - task.add_scalar_arg(tile_start, (types.uint64,)) - task.execute() - - -def write_tiles( - ary: cunumeric.ndarray, - dirpath: pathlib.Path | str, - tile_shape: Tuple[int], - tile_start: Optional[Tuple[int]] = None, -) -> None: - """Write an array as multiple tiles to disk using KvikIO - - The array shape must be divisible with the tile shape. - - Parameters - ---------- - ary - The cuNumeric array to write. - dirpath - Root directory of the tile files. - tile_shape - The shape of each tile. - tile_start - The start coordinate of the tiles - """ - _tile_read_write(TaskOpCode.TILE_WRITE, ary, dirpath, tile_shape, tile_start) - - -def read_tiles( - ary: cunumeric.ndarray, - dirpath: pathlib.Path | str, - tile_shape: Tuple[int], - tile_start: Optional[Tuple[int]] = None, -) -> None: - """Read multiple tiles from disk into an array using KvikIO - - The array shape must be divisible with the tile shape. - - Parameters - ---------- - ary - The cuNumeric array to read into. - dirpath - Root directory of the tile files. - tile_shape - The shape of each tile. - tile_start - The start coordinate of the tiles - """ - _tile_read_write(TaskOpCode.TILE_READ, ary, dirpath, tile_shape, tile_start) - - -def read_tiles_by_offsets( - ary: cunumeric.ndarray, - filepath: Iterable[pathlib.Path | str], - offsets: Tuple[int], - tile_shape: Tuple[int], -) -> None: - """Read multiple tiles from a single file into an array using KvikIO - - The array shape must be divisible with the tile shape. - - # TODO: support a filepath per offset/size - - Parameters - ---------- - ary - The cuNumeric array to read into. - filepath - Filepath to the file. - offsets - The offset of each tile in the file (in bytes). - tile_shape - The shape of each tile. - """ - - if len(ary.shape) != len(tile_shape): - raise ValueError("Tile shape and array shape must have same number of axes") - if any(d % c != 0 for d, c in zip(ary.shape, tile_shape)): - raise ValueError( - f"The tile shape {tile_shape} must be " - f"divisible with the array shape {ary.shape}" - ) - - # Partition the array into even tiles - store_partition = get_legate_store(ary).partition_by_tiling(tile_shape) - - # Use the partition's color shape as the launch shape so there will be - # one task for each tile - launch_shape = Rect(store_partition.partition.color_shape) - launch_vol = launch_shape.get_volume() - if launch_vol != len(offsets): - raise ValueError( - f"Number of offsets ({len(offsets)}) must match the number " - f"of tiles of `ary` ({launch_vol})" - ) - task = context.create_manual_task( - TaskOpCode.TILE_READ_BY_OFFSETS, - launch_domain=launch_shape, - ) - - task.add_output(store_partition) - task.add_scalar_arg(str(filepath), types.string) - task.add_scalar_arg(offsets, (types.uint64,)) - task.add_scalar_arg(tile_shape, (types.uint64,)) - task.execute() diff --git a/legate/legate_kvikio/utils.py b/legate/legate_kvikio/utils.py deleted file mode 100644 index f46e36bd96..0000000000 --- a/legate/legate_kvikio/utils.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -# See file LICENSE for terms. - -from typing import Any - -from legate.core import Store - - -def get_legate_store(buf: Any) -> Store: - """Extracts a Legate store from object - - Supports any object that implements the Legate data interface - (`__legate_data_interface__`). - - Parameters - ---------- - buf: legate-store-like - Object implement the Legate store interface - - Returns - ------- - Store - The extracted Legate store - """ - if isinstance(buf, Store): - return buf - data = buf.__legate_data_interface__["data"] - field = next(iter(data)) - array = data[field] - _, store = array.stores() - return store diff --git a/legate/legate_kvikio/zarr.py b/legate/legate_kvikio/zarr.py deleted file mode 100644 index 03a466448c..0000000000 --- a/legate/legate_kvikio/zarr.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -# See file LICENSE for terms. - -from __future__ import annotations - -import math -import pathlib -from typing import Optional, Tuple - -import cunumeric -import zarr.core - -from .tile import read_tiles, write_tiles - - -def get_padded_array(zarr_ary: zarr.Array) -> Optional[cunumeric.ndarray]: - """Get a padded array that has an shape divisible by `zarr_ary.chunks`. - - Parameters - ---------- - zarr_ary - The Zarr array - - Return - ------ - The padded array or None if the shapes are already divisible - """ - if all(s % c == 0 for s, c in zip(zarr_ary.shape, zarr_ary.chunks)): - return None # Already aligned - - padded_shape = tuple( - math.ceil(s / c) * c for s, c in zip(zarr_ary.shape, zarr_ary.chunks) - ) - return cunumeric.empty(shape=padded_shape, dtype=zarr_ary.dtype) - - -def write_array( - ary: cunumeric.ndarray, - dirpath: pathlib.Path | str, - chunks: Optional[int | Tuple[int]], - compressor=None, -) -> None: - """Write an Zarr array to disk using KvikIO - - Notes - ----- - The array is padded to make its shape divisible by chunks (if not already). - This involves coping the whole array, which can be expensive both in terms of - performance and memory usage. - TODO: minimize the copy needed - - Parameters - ---------- - ary - The cuNumeric array to write. - dirpath - Root directory of the tile files. - tile_shape - The shape of each tile. - tile_start - The start coordinate of the tiles - """ - - dirpath = pathlib.Path(dirpath) - if compressor is not None: - raise NotImplementedError("compressor isn't supported") - - # We use Zarr to write the meta data - zarr_ary = zarr.open_array( - dirpath, - shape=ary.shape, - dtype=ary.dtype, - mode="w", - chunks=chunks, - compressor=compressor, - ) - padded_ary = get_padded_array(zarr_ary) - if padded_ary is None: - write_tiles(ary, dirpath=dirpath, tile_shape=zarr_ary.chunks) - else: - padded_ary[tuple(slice(s) for s in zarr_ary.shape)] = ary - write_tiles(padded_ary, dirpath=dirpath, tile_shape=zarr_ary.chunks) - - -def read_array(dirpath: pathlib.Path | str) -> cunumeric.ndarray: - """Read an Zarr array from disk using KvikIO - - Notes - ----- - The returned array might be a view of a underlying array that has been padded in - order to make its shape divisible by the shape of the Zarr chunks on disk. - - Parameters - ---------- - dirpath - Root directory of the tile files. - - Return - ------ - The cuNumeric array read from disk. - """ - - dirpath = pathlib.Path(dirpath) - - # We use Zarr to read the meta data - zarr_ary = zarr.open_array(dirpath, mode="r") - if zarr_ary.compressor is not None: - raise NotImplementedError("compressor isn't supported") - - padded_ary = get_padded_array(zarr_ary) - if padded_ary is None: - ret = cunumeric.empty(shape=zarr_ary.shape, dtype=zarr_ary.dtype) - read_tiles(ret, dirpath=dirpath, tile_shape=zarr_ary.chunks) - else: - read_tiles(padded_ary, dirpath=dirpath, tile_shape=zarr_ary.chunks) - ret = padded_ary[tuple(slice(s) for s in zarr_ary.shape)] - return ret diff --git a/legate/pyproject.toml b/legate/pyproject.toml deleted file mode 100644 index 801d6f1310..0000000000 --- a/legate/pyproject.toml +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. -# See file LICENSE for terms. - -[build-system] -build-backend = "setuptools.build_meta" -requires = [ - "cmake>=3.26.4", - "cython>=3.0.0", - "ninja", - "scikit-build-core[pyproject]>=0.7.0", - "setuptools", - "wheel", -] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../dependencies.yaml and run `rapids-dependency-file-generator`. - -[project] -name = "legate_kvikio" -version = "24.04.00" -description = "KvikIO - GPUDirect Storage" -readme = { file = "README.md", content-type = "text/markdown" } -authors = [ - { name = "NVIDIA Corporation" }, -] -license = { text = "Apache 2.0" } -requires-python = ">=3.9" -dependencies = [ - "cupy-cuda11x>=12.0.0", - "numcodecs <0.12.0", - "numpy>=1.21", - "packaging", - "zarr", -] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../dependencies.yaml and run `rapids-dependency-file-generator`. -classifiers = [ - "Intended Audience :: Developers", - "Topic :: Database", - "Topic :: Scientific/Engineering", - "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", -] - -[project.optional-dependencies] -test = [ - "cuda-python>=11.7.1,<12.0a0", - "dask>=2022.05.2", - "distributed>=2022.05.2", - "pytest", - "pytest-cov", -] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../dependencies.yaml and run `rapids-dependency-file-generator`. - -[project.urls] -Homepage = "https://github.com/rapidsai/kvikio" - -[tool.black] -line-length = 88 -target-version = ["py39"] -include = '\.py?$' -exclude = ''' -/( - thirdparty | - \.eggs | - \.git | - \.hg | - \.mypy_cache | - \.tox | - \.venv | - _build | - buck-out | - build | - dist | - _skbuild -)/ -''' - -[tool.isort] -line_length = 88 -multi_line_output = 3 -include_trailing_comma = true -force_grid_wrap = 0 -combine_as_imports = true -order_by_type = true -known_first_party = [ - "kvikio", - "legate_kvikio", -] -default_section = "THIRDPARTY" -sections = [ - "FUTURE", - "STDLIB", - "THIRDPARTY", - "FIRSTPARTY", - "LOCALFOLDER", -] -skip = [ - "thirdparty", - ".eggs", - ".git", - ".hg", - ".mypy_cache", - ".tox", - ".venv", - "_build", - "buck-out", - "build", - "dist", - "__init__.py", -] - -[tool.mypy] -ignore_missing_imports = true diff --git a/legate/setup.py b/legate/setup.py deleted file mode 100644 index e5b8ca5c9c..0000000000 --- a/legate/setup.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. -# See file LICENSE for terms. - - -import os -from pathlib import Path - -from setuptools import find_packages -from skbuild import setup - -import legate.install_info as lg_install_info - -legate_dir = Path(lg_install_info.libpath).parent.as_posix() - -cmake_flags = [ - f"-Dlegate_core_ROOT:STRING={legate_dir}", -] - -os.environ["SKBUILD_CONFIGURE_OPTIONS"] = " ".join(cmake_flags) - - -setup( - packages=find_packages(exclude=["tests*"]), - include_package_data=True, - zip_safe=False, -) diff --git a/legate/tests/test_basic_io.py b/legate/tests/test_basic_io.py deleted file mode 100644 index 7b7d122861..0000000000 --- a/legate/tests/test_basic_io.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -# See file LICENSE for terms. - - -import math - -import pytest - -from legate.core import get_legate_runtime -from legate_kvikio import CuFile - -read_tiles = pytest.importorskip("legate_kvikio.tile").read_tiles -write_tiles = pytest.importorskip("legate_kvikio.tile").write_tiles - -num = pytest.importorskip("cunumeric") - - -def fence(*, block: bool): - """Shorthand for a Legate fence""" - get_legate_runtime().issue_execution_fence(block=block) - - -@pytest.mark.parametrize("size", [1, 10, 100, 1000, 1024, 4096, 4096 * 10]) -def test_read_write(tmp_path, size): - """Test basic read/write""" - filename = tmp_path / "test-file" - a = num.arange(size) - f = CuFile(filename, "w") - f.write(a) - assert not f.closed - fence(block=True) - - # Try to read file opened in write-only mode - with pytest.raises(ValueError, match="Cannot read a file opened with flags"): - f.read(a) - - # Close file - f.close() - assert f.closed - - # Read file into a new array and compare - b = num.empty_like(a) - f = CuFile(filename, "r") - f.read(b) - assert all(a == b) - - -def test_file_handle_context(tmp_path): - """Open a CuFile in a context""" - filename = tmp_path / "test-file" - a = num.arange(200) - b = num.empty_like(a) - with CuFile(filename, "w+") as f: - assert not f.closed - f.write(a) - fence(block=False) - f.read(b) - assert all(a == b) - assert f.closed - - -@pytest.mark.parametrize( - "start,end", - [ - (0, 10), - (1, 10), - (0, 10 * 4096), - (1, int(1.3 * 4096)), - (int(2.1 * 4096), int(5.6 * 4096)), - ], -) -def test_read_write_slices(tmp_path, start, end): - """Read and write different slices""" - - filename = tmp_path / "test-file" - a = num.arange(10 * 4096) # 10 page-sizes - b = a.copy() - a[start:end] = 42 - with CuFile(filename, "w") as f: - f.write(a[start:end]) - fence(block=True) - with CuFile(filename, "r") as f: - f.read(b[start:end]) - assert all(a == b) - - -@pytest.mark.parametrize( - "shape,tile_shape", [((2,), (3,)), ((2, 2), (3, 2)), ((2, 3), (2, 2))] -) -def test_read_write_tiles_error(tmp_path, shape, tile_shape): - with pytest.raises(ValueError, match="must be divisible"): - write_tiles(ary=num.ones(shape), dirpath=tmp_path, tile_shape=tile_shape) - with pytest.raises(ValueError, match="must be divisible"): - read_tiles(ary=num.ones(shape), dirpath=tmp_path, tile_shape=tile_shape) - - -@pytest.mark.parametrize( - "shape,tile_shape,tile_start", - [ - ((2,), (2,), (1,)), - ((4,), (2,), (0,)), - ((4, 2), (2, 2), (1, 2)), - ((2, 4), (2, 2), (2, 1)), - ], -) -def test_read_write_tiles(tmp_path, shape, tile_shape, tile_start): - a = num.arange(math.prod(shape)).reshape(shape) - write_tiles(ary=a, dirpath=tmp_path, tile_shape=tile_shape, tile_start=tile_start) - fence(block=True) - b = num.empty_like(a) - read_tiles(ary=b, dirpath=tmp_path, tile_shape=tile_shape, tile_start=tile_start) - assert (a == b).all() diff --git a/legate/tests/test_kerchunk.py b/legate/tests/test_kerchunk.py deleted file mode 100644 index f159ea8f82..0000000000 --- a/legate/tests/test_kerchunk.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -# See file LICENSE for terms. - - -import math - -import numpy as np -import pytest -from numpy.testing import assert_array_equal - -hdf5_read = pytest.importorskip("legate_kvikio.kerchunk").hdf5_read - -num = pytest.importorskip("cunumeric") - -shape_chunks = ( - "shape,chunks", - [ - ((2,), (2,)), - ((5,), (2,)), - ((4, 2), (2, 2)), - ((2, 4), (2, 2)), - ((2, 3), (2, 2)), - ((5, 4, 3, 2), (2, 2, 2, 2)), - ], -) - - -@pytest.mark.parametrize(*shape_chunks) -@pytest.mark.parametrize("dtype", ["u1", "u8", "f8"]) -def test_hdf5_read_array(tmp_path, shape, chunks, dtype): - h5py = pytest.importorskip("h5py") - - filename = tmp_path / "test-file.hdf5" - a = np.arange(math.prod(shape), dtype=dtype).reshape(shape) - with h5py.File(filename, "w") as f: - f.create_dataset("mydataset", chunks=chunks, data=a) - - b = hdf5_read(filename, dataset_name="mydataset") - assert_array_equal(a, b) diff --git a/legate/tests/test_zarr.py b/legate/tests/test_zarr.py deleted file mode 100644 index d641d4b49e..0000000000 --- a/legate/tests/test_zarr.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -# See file LICENSE for terms. - - -import math - -import numpy as np -import pytest -from numpy.testing import assert_array_equal - -from legate.core import get_legate_runtime - -read_array = pytest.importorskip("legate_kvikio.zarr").read_array -write_array = pytest.importorskip("legate_kvikio.zarr").write_array -num = pytest.importorskip("cunumeric") -zarr = pytest.importorskip("zarr") - - -shape_chunks = ( - "shape,chunks", - [ - ((2,), (2,)), - ((5,), (2,)), - ((4, 2), (2, 2)), - ((2, 4), (2, 2)), - ((2, 3), (3, 2)), - ((4, 3, 2, 1), (1, 2, 3, 4)), - ], -) - - -@pytest.mark.parametrize(*shape_chunks) -@pytest.mark.parametrize("dtype", ["u1", "u8", "f8"]) -def test_write_array(tmp_path, shape, chunks, dtype): - """Test write of a Zarr array""" - a = num.arange(math.prod(shape), dtype=dtype).reshape(shape) - - write_array(ary=a, dirpath=tmp_path, chunks=chunks) - get_legate_runtime().issue_execution_fence(block=True) - - b = zarr.open_array(tmp_path, mode="r") - assert_array_equal(a, b) - - -@pytest.mark.parametrize(*shape_chunks) -@pytest.mark.parametrize("dtype", ["u1", "u8", "f8"]) -def test_read_array(tmp_path, shape, chunks, dtype): - """Test read of a Zarr array""" - a = np.arange(math.prod(shape), dtype=dtype).reshape(shape) - zarr.open_array(tmp_path, mode="w", shape=shape, chunks=chunks, compressor=None)[ - ... - ] = a - - b = read_array(dirpath=tmp_path) - assert_array_equal(a, b)