diff --git a/arkouda/array_api/searching_functions.py b/arkouda/array_api/searching_functions.py index 61bc17ea88..3276d388ee 100644 --- a/arkouda/array_api/searching_functions.py +++ b/arkouda/array_api/searching_functions.py @@ -5,12 +5,11 @@ import arkouda as ak from arkouda.client import generic_msg from arkouda.numpy import cast as akcast -from arkouda.pdarrayclass import create_pdarray, create_pdarrays, parse_single_value -from arkouda.pdarraycreation import scalar_array +from arkouda.pdarrayclass import create_pdarray, create_pdarrays from ._dtypes import _real_floating_dtypes, _real_numeric_dtypes from .array_object import Array -from .manipulation_functions import broadcast_arrays, reshape, squeeze +from .manipulation_functions import broadcast_arrays def argmax(x: Array, /, *, axis: Optional[int] = None, keepdims: bool = False) -> Array: @@ -31,31 +30,7 @@ def argmax(x: Array, /, *, axis: Optional[int] = None, keepdims: bool = False) - if x.dtype not in _real_numeric_dtypes: raise TypeError("Only real numeric dtypes are allowed in argmax") - if x.ndim > 1 and axis is None: - # must flatten ND arrays to 1D without an axis argument - x_op = reshape(x, shape=(-1,)) - else: - x_op = x - - resp = generic_msg( - cmd=f"reduce->idx{x_op.ndim}D", - args={ - "x": x_op._array, - "op": "argmax", - "hasAxis": axis is not None, - "axis": axis if axis is not None else 0, - }, - ) - - if axis is None: - return Array._new(scalar_array(parse_single_value(resp))) - else: - arr = Array._new(create_pdarray(resp)) - - if keepdims: - return arr - else: - return squeeze(arr, axis) + return Array._new(ak.argmax(x._array, axis=axis, keepdims=keepdims)) def argmin(x: Array, /, *, axis: Optional[int] = None, keepdims: bool = False) -> Array: @@ -74,32 +49,7 @@ def argmin(x: Array, /, *, axis: Optional[int] = None, keepdims: bool = False) - """ if x.dtype not in _real_numeric_dtypes: raise TypeError("Only real numeric dtypes are allowed in argmax") - - if x.ndim > 1 and axis is None: - # must flatten ND arrays to 1D without an axis argument - x_op = reshape(x, shape=(-1,)) - else: - x_op = x - - resp = generic_msg( - cmd=f"reduce->idx{x_op.ndim}D", - args={ - "x": x_op._array, - "op": "argmin", - "hasAxis": axis is not None, - "axis": axis if axis is not None else 0, - }, - ) - - if axis is None: - return Array._new(scalar_array(parse_single_value(resp))) - else: - arr = Array._new(create_pdarray(resp)) - - if keepdims: - return arr - else: - return squeeze(arr, axis) + return Array._new(ak.argmin(x._array, axis=axis, keepdims=keepdims)) def nonzero(x: Array, /) -> Tuple[Array, ...]: diff --git a/arkouda/numpy/dtypes/dtypes.py b/arkouda/numpy/dtypes/dtypes.py index ef4c1c2819..a50921e876 100644 --- a/arkouda/numpy/dtypes/dtypes.py +++ b/arkouda/numpy/dtypes/dtypes.py @@ -27,6 +27,7 @@ __all__ = [ "_datatype_check", "ARKOUDA_SUPPORTED_DTYPES", + "ARKOUDA_SUPPORTED_INTS", "DType", "DTypeObjects", "DTypes", diff --git a/arkouda/pdarrayclass.py b/arkouda/pdarrayclass.py index 124e61959e..63c734ba88 100644 --- a/arkouda/pdarrayclass.py +++ b/arkouda/pdarrayclass.py @@ -12,7 +12,12 @@ from arkouda.client import generic_msg from arkouda.infoclass import information, pretty_print_information from arkouda.logger import getArkoudaLogger -from arkouda.numpy.dtypes import NUMBER_FORMAT_STRINGS, DTypes, bigint +from arkouda.numpy.dtypes import ( + ARKOUDA_SUPPORTED_INTS, + NUMBER_FORMAT_STRINGS, + DTypes, + bigint, +) from arkouda.numpy.dtypes import bool_ as akbool from arkouda.numpy.dtypes import bool_scalars, dtype from arkouda.numpy.dtypes import float64 as akfloat64 @@ -33,7 +38,7 @@ def numeric_reduce( pda, - axis: Optional[Union[int, Tuple[int, ...]]] = None, + axis: Optional[Union[int_scalars, Tuple[int_scalars, ...]]] = None, keepdims: bool = False, ) -> Union[numpy_scalars, pdarray]: pass @@ -44,7 +49,7 @@ def numeric_reduce( min = numeric_reduce def boolean_reduce( - pda, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False + pda, axis: Optional[Union[int_scalars, Tuple[int_scalars, ...]]] = None, keepdims: bool = False ) -> Union[bool_scalars, pdarray]: pass @@ -53,6 +58,14 @@ def boolean_reduce( all = boolean_reduce any = boolean_reduce + def index_reduce( + pda, axis: Optional[Union[int_scalars, None]] = None, keepdims: bool = False + ) -> Union[akint64, akuint64, pdarray]: + pass + + argmax = index_reduce + argmin = index_reduce + __all__ = [ "pdarray", @@ -97,6 +110,8 @@ def boolean_reduce( SUPPORTED_REDUCTION_OPS = ["any", "all", "isSorted", "isSortedLocally", "max", "min", "sum", "prod"] +SUPPORTED_INDEX_REDUCTION_OPS = ["argmin", "argmax"] + @typechecked def parse_single_value(msg: str) -> Union[numpy_scalars, int]: @@ -1420,17 +1435,21 @@ def max( """ return max(self, axis=axis, keepdims=keepdims) - def argmin(self) -> Union[np.int64, np.uint64]: + def argmin( + self, axis: Optional[Union[int, None]] = None, keepdims: bool = False + ) -> Union[np.int64, np.uint64, pdarray]: """ Return the index of the first occurrence of the array min value """ - return argmin(self) + return argmin(self, axis=axis, keepdims=keepdims) - def argmax(self) -> Union[np.int64, np.uint64]: + def argmax( + self, axis: Optional[Union[int, None]] = None, keepdims: bool = False + ) -> Union[np.int64, np.uint64, pdarray]: """ Return the index of the first occurrence of the array max value. """ - return argmax(self) + return argmax(self, axis=axis, keepdims=keepdims) def mean(self) -> np.float64: """ @@ -2677,12 +2696,12 @@ def _make_reduction_func( return_dtype="numpy_scalars", ): if op not in SUPPORTED_REDUCTION_OPS: - raise ValueError(f"value {op} not supported by _reduce_by_op.") + raise ValueError(f"value {op} not supported by _make_reduction_func.") @typechecked def op_func( pda: pdarray, - axis: Optional[Union[int, Tuple[int, ...]]] = None, + axis: Optional[Union[int_scalars, Tuple[int_scalars, ...]]] = None, keepdims: bool = False, ) -> Union[numpy_scalars, pdarray]: return _common_reduction(op, pda, axis, keepdims=keepdims) @@ -2693,9 +2712,9 @@ def op_func( Parameters ---------- pda : pdarray - The pdarray instance to be evaluated + The pdarray instance to be evaluated. axis : int or Tuple[int, ...], optional - The axis or axes along which to compute the sum. If None, the sum of the entire array is + The axis or axes along which to compute the sum. If None, the reduction of the entire array is computed (returning a scalar). keepdims : bool, optional Whether to keep the singleton dimension(s) along `axis` in the result. @@ -2716,6 +2735,54 @@ def op_func( return op_func +def _make_index_reduction_func( + op, + function_descriptor="Return index reduction of a pdarray by an operation along an axis.", + return_descriptor="", + return_dtype="int64, uint64", +): + if op not in SUPPORTED_INDEX_REDUCTION_OPS: + raise ValueError(f"value {op} not supported by _make_index_reduction_func.") + + @typechecked + def op_func( + pda: pdarray, + axis: Optional[Union[int_scalars, None]] = None, + keepdims: bool = False, + ) -> Union[akuint64, akint64, pdarray]: + return _common_index_reduction(op, pda, axis, keepdims=keepdims) + + op_func.__doc__ = f""" + {function_descriptor} + + Parameters + ---------- + pda : pdarray + The pdarray instance to be evaluated. + axis : int, optional + The axis along which to compute the index reduction. + If None, the reduction of the entire array is + computed (returning a scalar). + keepdims : bool, optional + Whether to keep the singleton dimension(s) along `axis` in the result. + + Returns + ------- + pdarray or {return_dtype} + {return_descriptor} + + Raises + ------ + TypeError + Raised if pda is not a pdarray instance. + Raised axis is not an int. + RuntimeError + Raised if there's a server-side error thrown. + """ + + return op_func + + # check whether a reduction of the given axes on an 'ndim' dimensional array # would result in a single scalar value def _reduces_to_single_value(axis, ndim) -> bool: @@ -2738,7 +2805,7 @@ def _reduces_to_single_value(axis, ndim) -> bool: def _common_reduction( kind: str, pda: pdarray, - axis: Optional[Union[int, Tuple[int, ...], None]] = None, + axis: Optional[Union[int_scalars, Tuple[int_scalars, ...], None]] = None, keepdims: bool = False, ) -> Union[numpy_scalars, pdarray]: """ @@ -2749,22 +2816,22 @@ def _common_reduction( kind : str The name of the reduction operation. Must be a member of SUPPORTED_REDUCTION_OPS. pda : pdarray - The pdarray instance to be evaluated + The pdarray instance to be evaluated. axis : int or Tuple[int, ...], optional - The axis or axes along which to compute the sum. If None, the sum of the entire array is + The axis or axes along which to compute the reduction. If None, the sum of the entire array is computed (returning a scalar). keepdims : bool, optional Whether to keep the singleton dimension(s) along `axis` in the result. Returns ------- - bool + numpy_scalars, pdarray Raises ------ TypeError - Raised if pda is not a pdarray instance + Raised if pda is not a pdarray instance. RuntimeError - Raised if there's a server-side error thrown + Raised if there's a server-side error thrown. ValueError Raised op is not a supported reduction operation. """ @@ -2780,7 +2847,7 @@ def _common_reduction( [ axis, ] - if isinstance(axis, int) + if isinstance(axis, ARKOUDA_SUPPORTED_INTS) else list(axis) ) ) @@ -2808,6 +2875,64 @@ def _common_reduction( return squeeze(result, axis) +# helper function for argmin, argmax +@typechecked +def _common_index_reduction( + kind: str, + pda: pdarray, + axis: Optional[Union[int_scalars, Tuple[int_scalars, ...], None]] = None, + keepdims: bool = False, +) -> Union[akuint64, akint64, pdarray]: + """ + Return reduction of a pdarray by an operation along an axis. + + Parameters + ---------- + kind : str + The name of the reduction operation. Must be a member of SUPPORTED_INDEX_REDUCTION_OPS. + pda : pdarray + The pdarray instance to be evaluated. + axis : int or Tuple[int, ...], optional + The axis or axes along which to compute the reduction. If None, the sum of the entire array is + computed (returning a scalar). + keepdims : bool, optional + Whether to keep the singleton dimension(s) along `axis` in the result. + Returns + ------- + int64 + + Raises + ------ + TypeError + Raised if axis is not of type int. + """ + if kind not in SUPPORTED_INDEX_REDUCTION_OPS: + raise ValueError(f"Unsupported reduction type: {kind}") + + if pda.ndim == 1 or axis is None: + return parse_single_value( + generic_msg( + cmd=f"{kind}All<{pda.dtype.name},{pda.ndim}>", + args={"x": pda}, + ) + ) + elif isinstance(axis, int): + result = create_pdarray( + generic_msg( + cmd=f"{kind}<{pda.dtype.name},{pda.ndim}>", + args={"x": pda, "axis": axis}, + ) + ) + if keepdims is False: + from arkouda.numpy import squeeze + + return squeeze(result, axis) + else: + return result + else: + raise TypeError("axis must by of type int.") + + globals()["any"] = _make_reduction_func( "any", function_descriptor="Return True iff any element of the array evaluates to True.", @@ -2868,6 +2993,22 @@ def _common_reduction( return_dtype="numpy_scalars", ) +globals()["argmin"] = _make_index_reduction_func( + "argmin", + function_descriptor="Return the argmin of the array along the specified axis. " + "This is returned as the ordered index.", + return_descriptor="This argmin of the array.", + return_dtype="int64, uint64", +) + +globals()["argmax"] = _make_index_reduction_func( + "argmax", + function_descriptor="Return the argmax of the array along the specified axis. " + "This is returned as the ordered index.", + return_descriptor="This argmax of the array.", + return_dtype="int64, uint64", +) + @typechecked def dot( @@ -2920,64 +3061,6 @@ def dot( return pda1 * pda2 -@typechecked -def argmin(pda: pdarray) -> Union[np.int64, np.uint64]: - """ - Return the index of the first occurrence of the array min value. - - Parameters - ---------- - pda : pdarray - Values for which to calculate the argmin - - Returns - ------- - Union[np.int64, np.uint64] - The index of the argmin calculated from the pda - - Raises - ------ - TypeError - Raised if pda is not a pdarray instance - RuntimeError - Raised if there's a server-side error thrown - """ - return parse_single_value( - generic_msg( - cmd=f"reduce->idx{pda.ndim}D", args={"op": "argmin", "x": pda, "hasAxis": False, "axis": 0} - ) - ) - - -@typechecked -def argmax(pda: pdarray) -> Union[np.int64, np.uint64]: - """ - Return the index of the first occurrence of the array max value. - - Parameters - ---------- - pda : pdarray - Values for which to calculate the argmax - - Returns - ------- - Union[np.int64, np.uint64] - The index of the argmax calculated from the pda - - Raises - ------ - TypeError - Raised if pda is not a pdarray instance - RuntimeError - Raised if there's a server-side error thrown - """ - return parse_single_value( - generic_msg( - cmd=f"reduce->idx{pda.ndim}D", args={"op": "argmax", "x": pda, "hasAxis": False, "axis": 0} - ) - ) - - @typechecked def mean(pda: pdarray) -> np.float64: """ diff --git a/src/AryUtil.chpl b/src/AryUtil.chpl index e1b4c6b0d9..929d9fc1cd 100644 --- a/src/AryUtil.chpl +++ b/src/AryUtil.chpl @@ -1043,8 +1043,9 @@ module AryUtil // index -> order for the input array's indices // e.g., order = k + (nz * j) + (nz * ny * i) - inline proc indexToOrder(idx: rank*int): int { - var order = 0; + inline proc indexToOrder(idx: rank*?t): t + where (t==int) || (t==uint(64)) { + var order : t = 0; for param i in 0.. 1 { + var minValLoc = (max(t), d.low); + forall i in slice with (minloc reduce minValLoc) do minValLoc reduce= (a[i], i); + return minValLoc[1]; + } + + proc argminSlice(const ref a: [?d] ?t, slice): d.idxType + where a.rank == 1 { + var minValLoc = (max(t), d.low); + forall i in slice with (minloc reduce minValLoc) do minValLoc reduce= (a[i], i); + return minValLoc[1]; + } + + proc argmaxSlice(ref a: [?d] ?t, slice, axis: int): d.idxType { var maxValLoc = (min(t), d.low); forall i in slice with (maxloc reduce maxValLoc) do maxValLoc reduce= (a[i], i); return maxValLoc[1][axis]; } + + proc argmaxSlice(const ref a: [?d] ?t, slice): d.rank * d.idxType + where a.rank > 1 { + var maxValLoc = (min(t), d.low); + forall i in slice with (maxloc reduce maxValLoc) do maxValLoc reduce= (a[i], i); + return maxValLoc[1]; + } + + proc argmaxSlice(const ref a: [?d] ?t, slice): d.idxType + where a.rank == 1 { + var maxValLoc = (min(t), d.low); + forall i in slice with (maxloc reduce maxValLoc) do maxValLoc reduce= (a[i], i); + return maxValLoc[1]; + } + } proc sizeReductionMsg(cmd: string, msgArgs: borrowed MessageArgs, st: borrowed SymTab): MsgTuple throws { @@ -1708,4 +1661,4 @@ module ReductionMsg use CommandMap; registerFunction("segmentedReduction", segmentedReductionMsg, getModuleName()); registerFunction("sizeReduction", sizeReductionMsg, getModuleName()); -} +} \ No newline at end of file diff --git a/src/ReductionMsgFunctions.chpl b/src/ReductionMsgFunctions.chpl index 416c727583..03dfbf61a9 100644 --- a/src/ReductionMsgFunctions.chpl +++ b/src/ReductionMsgFunctions.chpl @@ -5,7 +5,7 @@ module ReductionMsgFunctions use AryUtil; use ReductionMsg; use SliceReductionOps; - + @arkouda.registerCommand proc anyAll(const ref x:[?d] ?t): bool throws { @@ -30,7 +30,6 @@ module ReductionMsgFunctions } } - @arkouda.registerCommand proc allAll(const ref x:[?d] ?t): bool throws { @@ -55,7 +54,6 @@ module ReductionMsgFunctions } } - @arkouda.registerCommand proc isSortedAll(const ref x:[?d] ?t): bool throws { @@ -80,7 +78,6 @@ module ReductionMsgFunctions } } - @arkouda.registerCommand proc isSortedLocallyAll(const ref x:[?d] ?t): bool throws { @@ -105,5 +102,60 @@ module ReductionMsgFunctions } } + @arkouda.registerCommand + proc argmaxAll(const ref x:[?d] ?t): d.idxType throws + where (t != bigint) { + use SliceReductionOps; + if d.rank == 1 { + return argmaxSlice(x, d):d.idxType; + } else { + const ord = new orderer(x.shape); + const ret = ord.indexToOrder(argmaxSlice(x, d)):d.idxType; + return ret; + } + } + + @arkouda.registerCommand + proc argmax(const ref x:[?d] ?t, axis: int): [] d.idxType throws + where (t != bigint) && (d.rank > 1) { + use SliceReductionOps; + const axisArry = [axis]; + const outShape = reducedShape(x.shape, axisArry); + var ret = makeDistArray((...outShape), d.idxType); + forall sliceIdx in domOffAxis(d, axisArry) { + const sliceDom = domOnAxis(d, sliceIdx, axis); + ret[sliceIdx] = argmaxSlice(x, sliceDom)[axis]:d.idxType; + } + return ret; + } + + + @arkouda.registerCommand + proc argminAll(const ref x:[?d] ?t): d.idxType throws + where (t != bigint) { + use SliceReductionOps; + if d.rank == 1 { + return argminSlice(x, d):d.idxType; + } else { + const ord = new orderer(x.shape); + const ret = ord.indexToOrder(argminSlice(x, d)):d.idxType; + return ret; + } + } + + @arkouda.registerCommand + proc argmin(const ref x:[?d] ?t, axis: int): [] d.idxType throws + where (t != bigint) && (d.rank > 1) { + use SliceReductionOps; + const axisArry = [axis]; + const outShape = reducedShape(x.shape, axisArry); + var ret = makeDistArray((...outShape), d.idxType); + forall sliceIdx in domOffAxis(d, axisArry) { + const sliceDom = domOnAxis(d, sliceIdx, axis); + ret[sliceIdx] = argminSlice(x, sliceDom)[axis]:d.idxType; + } + return ret; + } + -} \ No newline at end of file +} diff --git a/src/scripts/generate_reduction_functions.py b/src/scripts/generate_reduction_functions.py index e56c37c03b..e329e2df88 100644 --- a/src/scripts/generate_reduction_functions.py +++ b/src/scripts/generate_reduction_functions.py @@ -1,23 +1,35 @@ -FUNCS = [ +BOOLEAN_FUNCS = [ ["any", "bool", []], ["all", "bool", []], ["isSorted", "bool", []], ["isSortedLocally", "bool", []], ] +INDEX_FUNCS = [ + ["argmax", "d.idxType", ["int", "uint", "real", "bool"]], + ["argmin", "d.idxType", ["int", "uint", "real", "bool"]], +] -def generate_reduction_functions(): - ret = """module ReductionMsgFunctions +def generate_header(): + return """module ReductionMsgFunctions { use BigInteger; use List; use AryUtil; use ReductionMsg; use SliceReductionOps; -""" + """ + + +def generate_footer(): + return "\n}\n" - for func, ret_type, allowed_types in FUNCS: + +def generate_reduction_functions(): + ret = "" + + for func, ret_type, allowed_types in BOOLEAN_FUNCS: where_statement = "" @@ -49,11 +61,57 @@ def generate_reduction_functions(): return ret; }} }}""" - ret += "\n\n" + ret += "\n" + + ret = clean_string(ret) + + return ret + +def clean_string(my_code:str): + return my_code.replace("\t", " ").replace(r"\n\s*\n", "\n\n") + +def generate_index_reduction_functions(): + ret = "" + + for func, ret_type, allowed_types in INDEX_FUNCS: + + where_statement = "" + + if len(allowed_types) > 0: + where_statement += "where " + where_statement += "||".join([f"(t=={tp})" for tp in allowed_types]) + + ret += f""" + @arkouda.registerCommand + proc {func}All(const ref x:[?d] ?t): {ret_type} throws + where (t != bigint) {{ + use SliceReductionOps; + if d.rank == 1 {{ + return {func}Slice(x, d):{ret_type}; + }} else {{ + const ord = new orderer(x.shape); + const ret = ord.indexToOrder({func}Slice(x, d)):{ret_type}; + return ret; + }} + }} + + @arkouda.registerCommand + proc {func}(const ref x:[?d] ?t, axis: int): [] {ret_type} throws + where (t != bigint) && (d.rank > 1) {{ + use SliceReductionOps; + const axisArry = [axis]; + const outShape = reducedShape(x.shape, axisArry); + var ret = makeDistArray((...outShape), {ret_type}); + forall sliceIdx in domOffAxis(d, axisArry) {{ + const sliceDom = domOnAxis(d, sliceIdx, axis); + ret[sliceIdx] = {func}Slice(x, sliceDom)[axis]:{ret_type}; + }} + return ret; + }} +""" + ret += "\n" - ret = ret.replace("\t", " ") - ret = ret - ret += "\n}" + ret = clean_string(ret) return ret @@ -63,7 +121,10 @@ def main(): outfile = "src/ReductionMsgFunctions.chpl" with open(outfile, "w") as text_file: + text_file.write(generate_header()) text_file.write(generate_reduction_functions()) + text_file.write(generate_index_reduction_functions()) + text_file.write(generate_footer()) if __name__ == "__main__": diff --git a/tests/pdarrayclass_test.py b/tests/pdarrayclass_test.py index bef07a396a..171f9e0d0f 100644 --- a/tests/pdarrayclass_test.py +++ b/tests/pdarrayclass_test.py @@ -5,6 +5,7 @@ import arkouda as ak from arkouda.testing import assert_equal as ak_assert_equal +from arkouda.testing import assert_equivalent as ak_assert_equivalent SEED = 314159 import numpy @@ -12,6 +13,8 @@ import arkouda.pdarrayclass REDUCTION_OPS = list(set(ak.pdarrayclass.SUPPORTED_REDUCTION_OPS) - set(["isSorted", "isSortedLocally"])) +INDEX_REDUCTION_OPS = ak.pdarrayclass.SUPPORTED_INDEX_REDUCTION_OPS + DTYPES = ["int64", "float64", "bool", "uint64"] # TODO: add unint8 to DTYPES @@ -133,7 +136,6 @@ def test_is_locally_sorted_multidim(self, dtype, axis): def assert_reduction_ops_match( self, op: str, pda: ak.pdarray, axis: Optional[Union[int, Tuple[int, ...]]] = None ): - from arkouda.testing import assert_equivalent as ak_assert_equivalent ak_op = getattr(arkouda.pdarrayclass, op) np_op = getattr(numpy, op) @@ -149,6 +151,34 @@ def assert_reduction_ops_match( ak_assert_equivalent(ak_result, np_op(nda, axis=axis)) + @pytest.mark.parametrize("op", INDEX_REDUCTION_OPS) + @pytest.mark.parametrize("size", pytest.prob_size) + @pytest.mark.parametrize("dtype", DTYPES) + @pytest.mark.parametrize("arry_gen", [ak.zeros, ak.ones, ak.arange]) + @pytest.mark.parametrize("axis", [0, None]) + def test_index_reduction_1D(self, op, dtype, arry_gen, size, axis): + pda = arry_gen(size, dtype=dtype) + ak_op = getattr(arkouda.pdarrayclass, op) + np_op = getattr(numpy, op) + nda = pda.to_ndarray() + ak_result = ak_op(pda, axis=axis) + ak_assert_equivalent(ak_result, np_op(nda, axis=axis)) + + @pytest.mark.skip_if_max_rank_less_than(3) + @pytest.mark.parametrize("op", INDEX_REDUCTION_OPS) + @pytest.mark.parametrize("size", pytest.prob_size) + @pytest.mark.parametrize("dtype", DTYPES) + @pytest.mark.parametrize("arry_gen", [ak.zeros, ak.ones, ak.arange]) + @pytest.mark.parametrize("axis", [0, 1, None]) + def test_index_reduction_mulit_dim(self, op, dtype, arry_gen, size, axis): + size = 10 # size // 3 + pda = arry_gen(size * size * size, dtype=dtype).reshape((size, size, size)) + ak_op = getattr(arkouda.pdarrayclass, op) + np_op = getattr(numpy, op) + nda = pda.to_ndarray() + ak_result = ak_op(pda, axis=axis) + ak_assert_equivalent(ak_result, np_op(nda, axis=axis)) + @pytest.mark.parametrize("op", REDUCTION_OPS) @pytest.mark.parametrize("size", pytest.prob_size) @pytest.mark.parametrize("dtype", DTYPES)