From de6c30009cd9abb57d8d3746a6d05d0911b74fb4 Mon Sep 17 00:00:00 2001 From: Hao Jin Date: Tue, 17 Mar 2020 22:40:29 +0000 Subject: [PATCH] add ffi for sum, var and std --- benchmark/python/ffi/benchmark_ffi.py | 3 + python/mxnet/_numpy_op_doc.py | 92 ------------ python/mxnet/ndarray/numpy/_op.py | 105 ++++++++++++- python/mxnet/numpy/multiarray.py | 104 ++++++++++++- python/mxnet/symbol/numpy/_symbol.py | 51 ++++++- python/mxnet/symbol/numpy/linalg.py | 8 +- .../numpy/np_broadcast_reduce_op_value.cc | 66 +++++++- src/api/operator/numpy/np_moments_op.cc | 142 ++++++++++++++++++ src/api/operator/numpy/np_tensordot_op.cc | 4 +- src/operator/numpy/np_broadcast_reduce_op.h | 38 ++++- .../numpy/np_broadcast_reduce_op_value.cc | 22 +-- .../numpy/np_broadcast_reduce_op_value.cu | 4 +- 12 files changed, 514 insertions(+), 125 deletions(-) create mode 100644 src/api/operator/numpy/np_moments_op.cc diff --git a/benchmark/python/ffi/benchmark_ffi.py b/benchmark/python/ffi/benchmark_ffi.py index 96d8e1d6658f..d676073c6ae8 100644 --- a/benchmark/python/ffi/benchmark_ffi.py +++ b/benchmark/python/ffi/benchmark_ffi.py @@ -57,6 +57,9 @@ def prepare_workloads(): OpArgMngr.add_workload("tensordot", pool['2x2'], pool['2x2'], ((1, 0), (0, 1))) OpArgMngr.add_workload("kron", pool['2x2'], pool['2x2']) OpArgMngr.add_workload("cumsum", pool['3x2'], axis=0, out=pool['3x2']) + OpArgMngr.add_workload("sum", pool['2x2'], axis=0, keepdims=True, out=pool['1x2']) + OpArgMngr.add_workload("std", pool['2x2'], axis=0, ddof=0, keepdims=True, out=pool['1x2']) + OpArgMngr.add_workload("var", pool['2x2'], axis=0, ddof=1, keepdims=True, out=pool['1x2']) OpArgMngr.add_workload("add", pool['2x2'], pool['2x2']) OpArgMngr.add_workload("linalg.svd", pool['3x3']) OpArgMngr.add_workload("split", pool['3x3'], (0, 1, 2), axis=1) diff --git a/python/mxnet/_numpy_op_doc.py b/python/mxnet/_numpy_op_doc.py index 3d80ce03b44e..c995cb0d41a4 100644 --- a/python/mxnet/_numpy_op_doc.py +++ b/python/mxnet/_numpy_op_doc.py @@ -231,98 +231,6 @@ def _np_dot(a, b, out=None): pass -def _np_sum(a, axis=None, dtype=None, keepdims=False, initial=None, out=None): - r""" - Sum of array elements over a given axis. - - Parameters - ---------- - a : ndarray - Input data. - axis : None or int, optional - Axis or axes along which a sum is performed. The default, - axis=None, will sum all of the elements of the input array. If - axis is negative it counts from the last to the first axis. - dtype : dtype, optional - The type of the returned array and of the accumulator in which the - elements are summed. The default type is float32. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the input array. - - If the default value is passed, then `keepdims` will not be - passed through to the `sum` method of sub-classes of - `ndarray`, however any non-default value will be. If the - sub-classes `sum` method does not implement `keepdims` any - exceptions will be raised. - initial: Currently only supports None as input, optional - Starting value for the sum. - Currently not implemented. Please use ``None`` as input or skip this argument. - out : ndarray or None, optional - Alternative output array in which to place the result. It must have - the same shape and dtype as the expected output. - - Returns - ------- - sum_along_axis : ndarray - An ndarray with the same shape as `a`, with the specified - axis removed. If an output array is specified, a reference to - `out` is returned. - - Notes - ----- - - Input type does not support Python native iterables. - - "out" param: cannot perform auto type change. out ndarray's dtype must be the same as the expected output. - - "initial" param is not supported yet. Please use None as input. - - Arithmetic is modular when using integer types, and no error is raised on overflow. - - The sum of an empty array is the neutral element 0: - - >>> a = np.empty(1) - >>> np.sum(a) - array(0.) - - This function differs from the original `numpy.sum - `_ in - the following aspects: - - - Input type does not support Python native iterables(list, tuple, ...). - - "out" param: cannot perform auto type cast. out ndarray's dtype must be the same as the expected output. - - "initial" param is not supported yet. Please use ``None`` as input or skip it. - - Examples - -------- - >>> a = np.array([0.5, 1.5]) - >>> np.sum(a) - array(2.) - >>> a = np.array([0.5, 0.7, 0.2, 1.5]) - >>> np.sum(a, dtype=np.int32) - array(2, dtype=int32) - >>> a = np.array([[0, 1], [0, 5]]) - >>> np.sum(a) - array(6.) - >>> np.sum(a, axis=0) - array([0., 6.]) - >>> np.sum(a, axis=1) - array([1., 5.]) - - With output ndarray: - - >>> a = np.array([[0, 1], [0, 5]]) - >>> b = np.ones((2,), dtype=np.float32) - >>> np.sum(a, axis = 0, out=b) - array([0., 6.]) - >>> b - array([0., 6.]) - - If the accumulator is too small, overflow occurs: - - >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8) - array(-128, dtype=int8) - """ - pass - - def _np_copy(a, out=None): """ Return an array copy of the given object. diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py index ff0e48d47664..637a5cbbae3a 100644 --- a/python/mxnet/ndarray/numpy/_op.py +++ b/python/mxnet/ndarray/numpy/_op.py @@ -47,7 +47,7 @@ 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'roll', 'rot90', 'einsum', 'true_divide', 'nonzero', 'quantile', 'percentile', 'shares_memory', 'may_share_memory', 'diff', 'ediff1d', 'resize', 'polyval', 'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite', - 'where', 'bincount', 'pad', 'cumsum', 'diag', 'diagonal'] + 'where', 'bincount', 'pad', 'cumsum', 'sum', 'diag', 'diagonal'] @set_module('mxnet.ndarray.numpy') @@ -4883,7 +4883,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: >>> np.std(a, dtype=np.float64) array(0.45, dtype=float64) """ - return _npi.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out) + return _api_internal.std(a, axis, dtype, ddof, keepdims, out) @set_module('mxnet.ndarray.numpy') @@ -4953,7 +4953,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: >>> ((1-0.55)**2 + (0.1-0.55)**2)/2 0.2025 """ - return _npi.var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out) + return _api_internal.var(a, axis, dtype, ddof, keepdims, out) # pylint: disable=redefined-outer-name @@ -6141,7 +6141,7 @@ def outer(a, b): [-2., -1., 0., 1., 2.], [-2., -1., 0., 1., 2.]]) """ - return tensordot(a.flatten(), b.flatten(), 0) + return tensordot(a.reshape_view((-1, )), b.reshape_view((-1, )), 0) @set_module('mxnet.ndarray.numpy') @@ -8033,3 +8033,100 @@ def diagonal(a, offset=0, axis1=0, axis2=1): [1, 7]]) """ return _api_internal.diagonal(a, offset, axis1, axis2) + + +# pylint:disable=redefined-outer-name, too-many-arguments +@set_module('mxnet.ndarray.numpy') +def sum(a, axis=None, dtype=None, out=None, keepdims=None, initial=None, where=None): + r""" + Sum of array elements over a given axis. + + Parameters + ---------- + a : ndarray + Input data. + axis : None or int, optional + Axis or axes along which a sum is performed. The default, + axis=None, will sum all of the elements of the input array. If + axis is negative it counts from the last to the first axis. + dtype : dtype, optional + The type of the returned array and of the accumulator in which the + elements are summed. The default type is float32. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `sum` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-classes `sum` method does not implement `keepdims` any + exceptions will be raised. + initial: Currently only supports None as input, optional + Starting value for the sum. + Currently not implemented. Please use ``None`` as input or skip this argument. + out : ndarray or None, optional + Alternative output array in which to place the result. It must have + the same shape and dtype as the expected output. + + Returns + ------- + sum_along_axis : ndarray + An ndarray with the same shape as `a`, with the specified + axis removed. If an output array is specified, a reference to + `out` is returned. + + Notes + ----- + - Input type does not support Python native iterables. + - "out" param: cannot perform auto type change. out ndarray's dtype must be the same as the expected output. + - "initial" param is not supported yet. Please use None as input. + - Arithmetic is modular when using integer types, and no error is raised on overflow. + - The sum of an empty array is the neutral element 0: + + >>> a = np.empty(1) + >>> np.sum(a) + array(0.) + + This function differs from the original `numpy.sum + `_ in + the following aspects: + + - Input type does not support Python native iterables(list, tuple, ...). + - "out" param: cannot perform auto type cast. out ndarray's dtype must be the same as the expected output. + - "initial" param is not supported yet. Please use ``None`` as input or skip it. + + Examples + -------- + >>> a = np.array([0.5, 1.5]) + >>> np.sum(a) + array(2.) + >>> a = np.array([0.5, 0.7, 0.2, 1.5]) + >>> np.sum(a, dtype=np.int32) + array(2, dtype=int32) + >>> a = np.array([[0, 1], [0, 5]]) + >>> np.sum(a) + array(6.) + >>> np.sum(a, axis=0) + array([0., 6.]) + >>> np.sum(a, axis=1) + array([1., 5.]) + + With output ndarray: + + >>> a = np.array([[0, 1], [0, 5]]) + >>> b = np.ones((2,), dtype=np.float32) + >>> np.sum(a, axis=0, out=b) + array([0., 6.]) + >>> b + array([0., 6.]) + + If the accumulator is too small, overflow occurs: + + >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8) + array(-128, dtype=int8) + """ + if where is not None and where is not True: + raise ValueError("only where=None or where=True cases are supported for now") + return _api_internal.sum(a, axis, dtype, keepdims, initial, out) +# pylint:enable=redefined-outer-name, too-many-arguments diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py index 281a6f7cc3fc..a62216d86445 100644 --- a/python/mxnet/numpy/multiarray.py +++ b/python/mxnet/numpy/multiarray.py @@ -73,7 +73,7 @@ 'greater', 'less', 'greater_equal', 'less_equal', 'roll', 'rot90', 'einsum', 'true_divide', 'nonzero', 'quantile', 'percentile', 'shares_memory', 'may_share_memory', 'diff', 'ediff1d', 'resize', 'matmul', 'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite', 'polyval', 'where', 'bincount', - 'pad', 'cumsum', 'diag', 'diagonal'] + 'pad', 'cumsum', 'sum', 'diag', 'diagonal'] __all__ += fallback.__all__ @@ -6763,7 +6763,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: >>> np.std(a, dtype=np.float64) array(0.45, dtype=float64) """ - return _npi.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out) + return _mx_nd_np.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out) # pylint: enable=redefined-outer-name @@ -6884,7 +6884,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: >>> ((1-0.55)**2 + (0.1-0.55)**2)/2 0.2025 """ - return _npi.var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out) + return _mx_nd_np.var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out) # pylint: disable=redefined-outer-name @@ -7047,6 +7047,7 @@ def ravel(x, order='C'): return _mx_nd_np.ravel(x, order) +@set_module('mxnet.numpy') def unravel_index(indices, shape, order='C'): # pylint: disable=redefined-outer-name """ Converts a flat index or array of flat indices into a tuple of coordinate arrays. @@ -7077,6 +7078,7 @@ def unravel_index(indices, shape, order='C'): # pylint: disable=redefined-outer- return _mx_nd_np.unravel_index(indices, shape, order=order) +@set_module('mxnet.numpy') def flatnonzero(a): r""" Return indices that are non-zero in the flattened version of a. @@ -7116,6 +7118,7 @@ def flatnonzero(a): return _mx_nd_np.flatnonzero(a) +@set_module('mxnet.numpy') def diag_indices_from(arr): """ This returns a tuple of indices that can be used to access the main diagonal of an array @@ -10196,3 +10199,98 @@ def diagonal(a, offset=0, axis1=0, axis2=1): [1, 7]]) """ return _mx_nd_np.diagonal(a, offset=offset, axis1=axis1, axis2=axis2) + + +# pylint: disable=redefined-outer-name, too-many-arguments +@set_module('mxnet.numpy') +def sum(a, axis=None, dtype=None, out=None, keepdims=None, initial=None, where=None): + r""" + Sum of array elements over a given axis. + + Parameters + ---------- + a : ndarray + Input data. + axis : None or int, optional + Axis or axes along which a sum is performed. The default, + axis=None, will sum all of the elements of the input array. If + axis is negative it counts from the last to the first axis. + dtype : dtype, optional + The type of the returned array and of the accumulator in which the + elements are summed. The default type is float32. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `sum` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-classes `sum` method does not implement `keepdims` any + exceptions will be raised. + initial: Currently only supports None as input, optional + Starting value for the sum. + Currently not implemented. Please use ``None`` as input or skip this argument. + out : ndarray or None, optional + Alternative output array in which to place the result. It must have + the same shape and dtype as the expected output. + + Returns + ------- + sum_along_axis : ndarray + An ndarray with the same shape as `a`, with the specified + axis removed. If an output array is specified, a reference to + `out` is returned. + + Notes + ----- + - Input type does not support Python native iterables. + - "out" param: cannot perform auto type change. out ndarray's dtype must be the same as the expected output. + - "initial" param is not supported yet. Please use None as input. + - Arithmetic is modular when using integer types, and no error is raised on overflow. + - The sum of an empty array is the neutral element 0: + + >>> a = np.empty(1) + >>> np.sum(a) + array(0.) + + This function differs from the original `numpy.sum + `_ in + the following aspects: + + - Input type does not support Python native iterables(list, tuple, ...). + - "out" param: cannot perform auto type cast. out ndarray's dtype must be the same as the expected output. + - "initial" param is not supported yet. Please use ``None`` as input or skip it. + + Examples + -------- + >>> a = np.array([0.5, 1.5]) + >>> np.sum(a) + array(2.) + >>> a = np.array([0.5, 0.7, 0.2, 1.5]) + >>> np.sum(a, dtype=np.int32) + array(2, dtype=int32) + >>> a = np.array([[0, 1], [0, 5]]) + >>> np.sum(a) + array(6.) + >>> np.sum(a, axis=0) + array([0., 6.]) + >>> np.sum(a, axis=1) + array([1., 5.]) + + With output ndarray: + + >>> a = np.array([[0, 1], [0, 5]]) + >>> b = np.ones((2,), dtype=np.float32) + >>> np.sum(a, axis = 0, out=b) + array([0., 6.]) + >>> b + array([0., 6.]) + + If the accumulator is too small, overflow occurs: + + >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8) + array(-128, dtype=int8) + """ + return _mx_nd_np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims, initial=initial, where=where) +# pylint: enable=redefined-outer-name, too-many-arguments diff --git a/python/mxnet/symbol/numpy/_symbol.py b/python/mxnet/symbol/numpy/_symbol.py index a2a4cd9d3584..5542c9ec6b94 100644 --- a/python/mxnet/symbol/numpy/_symbol.py +++ b/python/mxnet/symbol/numpy/_symbol.py @@ -53,7 +53,7 @@ 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'roll', 'rot90', 'einsum', 'true_divide', 'quantile', 'percentile', 'shares_memory', 'may_share_memory', 'diff', 'ediff1d', 'resize', 'polyval', 'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite', - 'where', 'bincount', 'pad', 'cumsum', 'diag', 'diagonal'] + 'where', 'bincount', 'pad', 'cumsum', 'sum', 'diag', 'diagonal'] @set_module('mxnet.symbol.numpy') @@ -688,7 +688,7 @@ def diag(self, k=0, **kwargs): def sum(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ """Return the sum of the array elements over the given axis.""" - return _mx_np_op.sum(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims) + return _npi.sum(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims) def nansum(self, *args, **kwargs): """Convenience fluent method for :py:func:`nansum`. @@ -7022,4 +7022,51 @@ def diagonal(a, offset=0, axis1=0, axis2=1): return _npi.diagonal(a, offset=offset, axis1=axis1, axis2=axis2) +# pylint:disable=redefined-outer-name, too-many-arguments +@set_module('mxnet.symbol.numpy') +def sum(a, axis=None, dtype=None, out=None, keepdims=None, initial=None, where=None): + r""" + Sum of array elements over a given axis. + + Parameters + ---------- + a : _Symbol + Input data. + axis : None or int, optional + Axis or axes along which a sum is performed. The default, + axis=None, will sum all of the elements of the input array. If + axis is negative it counts from the last to the first axis. + dtype : dtype, optional + The type of the returned array and of the accumulator in which the + elements are summed. The default type is float32. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `sum` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-classes `sum` method does not implement `keepdims` any + exceptions will be raised. + initial: Currently only supports None as input, optional + Starting value for the sum. + Currently not implemented. Please use ``None`` as input or skip this argument. + out : ndarray or None, optional + Alternative output array in which to place the result. It must have + the same shape and dtype as the expected output. + + Returns + ------- + sum_along_axis : _Symbol + An ndarray with the same shape as `a`, with the specified + axis removed. If an output array is specified, a reference to + `out` is returned. + """ + if where is not None and where is not True: + raise ValueError("only where=None or where=True cases are supported for now") + return _npi.sum(a, axis=axis, dtype=dtype, keepdims=keepdims, initial=initial, out=out) +# pylint:enable=redefined-outer-name, too-many-arguments + + _set_np_symbol_class(_Symbol) diff --git a/python/mxnet/symbol/numpy/linalg.py b/python/mxnet/symbol/numpy/linalg.py index d326b37f0635..d2e11223e647 100644 --- a/python/mxnet/symbol/numpy/linalg.py +++ b/python/mxnet/symbol/numpy/linalg.py @@ -224,18 +224,18 @@ def norm(x, ord=None, axis=None, keepdims=False): if row_axis > col_axis: row_axis -= 1 if ord == 'inf': - return _mx_sym_np.sum(_symbol.abs(x), axis=col_axis, keepdims=keepdims).max(axis=row_axis, keepdims=keepdims) # pylint: disable=line-too-long + return _npi.sum(_symbol.abs(x), axis=col_axis, keepdims=keepdims).max(axis=row_axis, keepdims=keepdims) # pylint: disable=line-too-long else: - return _mx_sym_np.sum(_symbol.abs(x), axis=col_axis, keepdims=keepdims).min(axis=row_axis, keepdims=keepdims) # pylint: disable=line-too-long + return _npi.sum(_symbol.abs(x), axis=col_axis, keepdims=keepdims).min(axis=row_axis, keepdims=keepdims) # pylint: disable=line-too-long if ord in [1, -1]: row_axis, col_axis = axis if not keepdims: if row_axis < col_axis: col_axis -= 1 if ord == 1: - return _mx_sym_np.sum(_symbol.abs(x), axis=row_axis, keepdims=keepdims).max(axis=col_axis, keepdims=keepdims) # pylint: disable=line-too-long + return _npi.sum(_symbol.abs(x), axis=row_axis, keepdims=keepdims).max(axis=col_axis, keepdims=keepdims) # pylint: disable=line-too-long elif ord == -1: - return _mx_sym_np.sum(_symbol.abs(x), axis=row_axis, keepdims=keepdims).min(axis=col_axis, keepdims=keepdims) # pylint: disable=line-too-long + return _npi.sum(_symbol.abs(x), axis=row_axis, keepdims=keepdims).min(axis=col_axis, keepdims=keepdims) # pylint: disable=line-too-long if ord in [2, -2]: return _npi.norm(x, ord=ord, axis=axis, keepdims=keepdims, flag=0) if ord is None: diff --git a/src/api/operator/numpy/np_broadcast_reduce_op_value.cc b/src/api/operator/numpy/np_broadcast_reduce_op_value.cc index 224451c70570..5bb358c83561 100644 --- a/src/api/operator/numpy/np_broadcast_reduce_op_value.cc +++ b/src/api/operator/numpy/np_broadcast_reduce_op_value.cc @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -18,7 +18,7 @@ */ /*! - * \file broadcast_reduce_op_value.cc + * \file np_broadcast_reduce_op_value.cc * \brief Implementation of the API of functions in * src/operator/tensor/np_broadcast_reduce_op_value.cc */ @@ -26,6 +26,7 @@ #include #include "../utils.h" #include "../../../operator/tensor/broadcast_reduce_op.h" +#include "../../../operator/numpy/np_broadcast_reduce_op.h" namespace mxnet { @@ -51,4 +52,63 @@ MXNET_REGISTER_API("_npi.broadcast_to") *ret = ndoutputs[0]; }); +MXNET_REGISTER_API("_npi.sum") +.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { + using namespace runtime; + const nnvm::Op* op = Op::Get("_npi_sum"); + op::NumpyReduceAxesParam param; + nnvm::NodeAttrs attrs; + attrs.op = op; + + // parse axis + if (args[1].type_code() == kNull) { + param.axis = dmlc::nullopt; + } else { + if (args[1].type_code() == kDLInt) { + param.axis = Tuple(1, args[1].operator int64_t()); + } else { + param.axis = Tuple(args[1].operator ObjectRef()); + } + } + + // parse dtype + if (args[2].type_code() == kNull) { + param.dtype = dmlc::nullopt; + } else { + param.dtype = String2MXNetTypeWithBool(args[2].operator std::string()); + } + + // parse keepdims + if (args[3].type_code() == kNull) { + param.keepdims = false; + } else { + param.keepdims = args[3].operator bool(); + } + + // parse initial + if (args[4].type_code() == kNull) { + param.initial = dmlc::nullopt; + } else { + param.initial = args[4].operator double(); + } + + attrs.parsed = std::move(param); + + SetAttrDict(&attrs); + + NDArray* inputs[] = {args[0].operator NDArray*()}; + int num_inputs = 1; + + NDArray* outputs[] = {args[5].operator NDArray*()}; + NDArray** out = (outputs[0] == nullptr) ? nullptr : outputs; + int num_outputs = (outputs[0] != nullptr); + auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, out); + + if (out) { + *ret = PythonArg(5); + } else { + *ret = reinterpret_cast(ndoutputs[0]); + } +}); + } // namespace mxnet diff --git a/src/api/operator/numpy/np_moments_op.cc b/src/api/operator/numpy/np_moments_op.cc new file mode 100644 index 000000000000..e780596272ae --- /dev/null +++ b/src/api/operator/numpy/np_moments_op.cc @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file np_moments_op.cc + * \brief Implementation of the API of functions in src/operator/numpy/np_moments_op.cc + */ + +#include +#include +#include "../utils.h" +#include "../../../operator/numpy/np_broadcast_reduce_op.h" + +namespace mxnet { + +MXNET_REGISTER_API("_npi.std") +.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { + using namespace runtime; + const nnvm::Op* op = Op::Get("_npi_std"); + op::NumpyMomentsParam param; + nnvm::NodeAttrs attrs; + attrs.op = op; + + // parse axis + if (args[1].type_code() == kNull) { + param.axis = dmlc::nullopt; + } else { + if (args[1].type_code() == kDLInt) { + param.axis = Tuple(1, args[1].operator int64_t()); + } else { + param.axis = Tuple(args[1].operator ObjectRef()); + } + } + + // parse dtype + if (args[2].type_code() == kNull) { + param.dtype = dmlc::nullopt; + } else { + param.dtype = String2MXNetTypeWithBool(args[2].operator std::string()); + } + + // parse ddof + param.ddof = args[3].operator int(); + + // parse keepdims + if (args[4].type_code() == kNull) { + param.keepdims = false; + } else { + param.keepdims = args[4].operator bool(); + } + + attrs.parsed = std::move(param); + + SetAttrDict(&attrs); + + NDArray* inputs[] = {args[0].operator NDArray*()}; + int num_inputs = 1; + + NDArray* outputs[] = {args[5].operator NDArray*()}; + NDArray** out = (outputs[0] == nullptr) ? nullptr : outputs; + int num_outputs = (outputs[0] != nullptr); + auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, out); + + if (out) { + *ret = PythonArg(5); + } else { + *ret = reinterpret_cast(ndoutputs[0]); + } +}); + +MXNET_REGISTER_API("_npi.var") +.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { + using namespace runtime; + const nnvm::Op* op = Op::Get("_npi_var"); + op::NumpyMomentsParam param; + nnvm::NodeAttrs attrs; + attrs.op = op; + + // parse axis + if (args[1].type_code() == kNull) { + param.axis = dmlc::nullopt; + } else { + if (args[1].type_code() == kDLInt) { + param.axis = Tuple(1, args[1].operator int64_t()); + } else { + param.axis = Tuple(args[1].operator ObjectRef()); + } + } + + // parse dtype + if (args[2].type_code() == kNull) { + param.dtype = dmlc::nullopt; + } else { + param.dtype = String2MXNetTypeWithBool(args[2].operator std::string()); + } + + // parse ddof + param.ddof = args[3].operator int(); + + // parse keepdims + if (args[4].type_code() == kNull) { + param.keepdims = false; + } else { + param.keepdims = args[4].operator bool(); + } + + attrs.parsed = std::move(param); + + SetAttrDict(&attrs); + + NDArray* inputs[] = {args[0].operator NDArray*()}; + int num_inputs = 1; + + NDArray* outputs[] = {args[5].operator NDArray*()}; + NDArray** out = (outputs[0] == nullptr) ? nullptr : outputs; + int num_outputs = (outputs[0] != nullptr); + auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, out); + + if (out) { + *ret = PythonArg(5); + } else { + *ret = reinterpret_cast(ndoutputs[0]); + } +}); + +}; // namespace mxnet diff --git a/src/api/operator/numpy/np_tensordot_op.cc b/src/api/operator/numpy/np_tensordot_op.cc index eef58b5b3389..55c131468b12 100644 --- a/src/api/operator/numpy/np_tensordot_op.cc +++ b/src/api/operator/numpy/np_tensordot_op.cc @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/src/operator/numpy/np_broadcast_reduce_op.h b/src/operator/numpy/np_broadcast_reduce_op.h index dfa48440b848..199fe2afddc7 100644 --- a/src/operator/numpy/np_broadcast_reduce_op.h +++ b/src/operator/numpy/np_broadcast_reduce_op.h @@ -31,6 +31,7 @@ #include "../nn/moments-inl.h" #include "../tensor/broadcast_reduce_op.h" #include "../tensor/elemwise_binary_broadcast_op.h" +#include "../../api/operator/op_utils.h" namespace mxnet { namespace op { @@ -66,6 +67,22 @@ struct NumpyReduceAxesParam : public dmlc::Parameter { DMLC_DECLARE_FIELD(initial).set_default(dmlc::optional()) .describe("Starting value for the sum."); } + + void SetAttrDict(std::unordered_map* dict) { + std::ostringstream axis_s, dtype_s, keepdims_s, initial_s; + axis_s << axis; + keepdims_s << keepdims; + initial_s << initial; + (*dict)["axis"] = axis_s.str(); + dtype_s << dtype; + if (dtype.has_value()) { + (*dict)["dtype"] = MXNetTypeWithBool2String(dtype.value()); + } else { + (*dict)["dtype"] = dtype_s.str(); + } + (*dict)["keepdims"] = keepdims_s.str(); + (*dict)["initial"] = initial_s.str(); + } }; struct NumpyReduceAxesNoDTypeParam : public dmlc::Parameter { @@ -433,6 +450,7 @@ inline void NumpyReduceAxesBackwardUseNone(const nnvm::NodeAttrs& attrs, } BroadcastComputeImpl(attrs, ctx, inputs, req, outputs, small); + if (normalize) { Stream *s = ctx.get_stream(); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, IType, { @@ -484,11 +502,27 @@ struct NumpyMomentsParam : public dmlc::Parameter { "precision than the default platform integer. In that case, if a is signed then " "the platform integer is used while if a is unsigned then an unsigned integer of " "the same precision as the platform integer is used."); - DMLC_DECLARE_FIELD(ddof).set_default(0) - .describe("Starting value for the sum."); DMLC_DECLARE_FIELD(keepdims).set_default(false) .describe("If this is set to `True`, the reduced axes are left " "in the result as dimension with size one."); + DMLC_DECLARE_FIELD(ddof).set_default(0) + .describe("Starting value for the sum."); + } + + void SetAttrDict(std::unordered_map* dict) { + std::ostringstream axis_s, dtype_s, keepdims_s, ddof_s; + axis_s << axis; + keepdims_s << keepdims; + ddof_s << ddof; + (*dict)["axis"] = axis_s.str(); + dtype_s << dtype; + if (dtype.has_value()) { + (*dict)["dtype"] = MXNetTypeWithBool2String(dtype.value()); + } else { + (*dict)["dtype"] = dtype_s.str(); + } + (*dict)["keepdims"] = keepdims_s.str(); + (*dict)["ddof"] = ddof_s.str(); } }; diff --git a/src/operator/numpy/np_broadcast_reduce_op_value.cc b/src/operator/numpy/np_broadcast_reduce_op_value.cc index 026e60e8bb25..33418667dfb7 100644 --- a/src/operator/numpy/np_broadcast_reduce_op_value.cc +++ b/src/operator/numpy/np_broadcast_reduce_op_value.cc @@ -51,12 +51,12 @@ inline bool NumpySumType(const nnvm::NodeAttrs& attrs, if (param.dtype.has_value()) { if (in_attrs->at(0) == mshadow::kBool) { - CHECK(param.dtype.value() == mshadow::kInt32 - || param.dtype.value() == mshadow::kInt64 - || param.dtype.value() == mshadow::kFloat32 - || param.dtype.value() == mshadow::kFloat64) << "Only support the following output " - "dtypes when input dtype is bool: " - "int32, int64, float32, float64."; + CHECK(param.dtype.value() == mshadow::kInt32 || + param.dtype.value() == mshadow::kInt64 || + param.dtype.value() == mshadow::kFloat32 || + param.dtype.value() == mshadow::kFloat64) + << "Only support the following output dtypes when input dtype is bool: " + "int32, int64, float32, float64."; } TYPE_ASSIGN_CHECK(*out_attrs, 0, param.dtype.value()); } else if (in_attrs->at(0) == mshadow::kBool) { @@ -126,7 +126,7 @@ void TVMOpReduce(const OpContext& ctx, #endif // MXNET_USE_TVM_OP } -NNVM_REGISTER_OP(_np_sum) +NNVM_REGISTER_OP(_npi_sum) .describe(R"code()code" ADD_FILELINE) .set_num_inputs(1) .set_num_outputs(1) @@ -145,9 +145,9 @@ NNVM_REGISTER_OP(_np_sum) return std::vector{ResourceRequest::kTempSpace}; }) .set_attr("THasDeterministicOutput", true) -.set_attr("FGradient", ElemwiseGradUseNone{"_backward_np_sum"}); +.set_attr("FGradient", ElemwiseGradUseNone{"_backward_npi_sum"}); -NNVM_REGISTER_OP(_backward_np_sum) +NNVM_REGISTER_OP(_backward_npi_sum) .set_num_outputs(1) .set_attr_parser(ParamParser) .set_attr("TIsBackward", true) @@ -155,8 +155,8 @@ NNVM_REGISTER_OP(_backward_np_sum) .set_attr("FCompute", NumpyReduceAxesBackwardUseNone); inline bool NumpyReduceAxesNoDTypeType(const nnvm::NodeAttrs& attrs, - std::vector *in_attrs, - std::vector *out_attrs) { + std::vector *in_attrs, + std::vector *out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0)); diff --git a/src/operator/numpy/np_broadcast_reduce_op_value.cu b/src/operator/numpy/np_broadcast_reduce_op_value.cu index 684348fcaa37..c5111c2954cd 100644 --- a/src/operator/numpy/np_broadcast_reduce_op_value.cu +++ b/src/operator/numpy/np_broadcast_reduce_op_value.cu @@ -26,10 +26,10 @@ namespace mxnet { namespace op { -NNVM_REGISTER_OP(_np_sum) +NNVM_REGISTER_OP(_npi_sum) .set_attr("FCompute", NumpyReduceAxesCompute); -NNVM_REGISTER_OP(_backward_np_sum) +NNVM_REGISTER_OP(_backward_npi_sum) .set_attr("FCompute", NumpyReduceAxesBackwardUseNone); NNVM_REGISTER_OP(_np_max)