From 3847c760ea0e5315e5d6a354a894a92c8032a586 Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Tue, 23 Nov 2021 02:45:05 +0000 Subject: [PATCH 1/4] refine a test case, test=develop --- python/paddle/fluid/tests/unittests/test_allclose_layer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/fluid/tests/unittests/test_allclose_layer.py b/python/paddle/fluid/tests/unittests/test_allclose_layer.py index c376a5c95c3935..7b201f60db5397 100644 --- a/python/paddle/fluid/tests/unittests/test_allclose_layer.py +++ b/python/paddle/fluid/tests/unittests/test_allclose_layer.py @@ -61,7 +61,7 @@ def allclose_check(self, use_cuda, dtype='float32'): # for corner case x = np.array([10.1, 10.1]).astype(dtype) y = np.array([10, 10]).astype(dtype) - result_c, = exe.run(feed={'a': x, 'b': y}, fetch_list=[result_corner]) + result_c = exe.run(feed={'a': x, 'b': y}, fetch_list=[result_corner]) corner_res = (dtype == 'float64') self.assertEqual(result_c[0], corner_res) From ca1d35ea3c57cf18a9a4e4b45500f57538726b0e Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Mon, 6 Dec 2021 04:51:18 +0000 Subject: [PATCH 2/4] rm python, test=develop --- paddle/pten/core/CMakeLists.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/pten/core/CMakeLists.txt b/paddle/pten/core/CMakeLists.txt index 0a2504f50327c1..e19d0a490cef39 100644 --- a/paddle/pten/core/CMakeLists.txt +++ b/paddle/pten/core/CMakeLists.txt @@ -1,9 +1,9 @@ if(WITH_GPU) - cc_library(convert_utils SRCS convert_utils.cc DEPS data_type place gpu_info python) + cc_library(convert_utils SRCS convert_utils.cc DEPS data_type place gpu_info) elseif(WITH_ROCM) - cc_library(convert_utils SRCS convert_utils.cc DEPS data_type place gpu_info python) + cc_library(convert_utils SRCS convert_utils.cc DEPS data_type place gpu_info) else() - cc_library(convert_utils SRCS convert_utils.cc DEPS data_type place python) + cc_library(convert_utils SRCS convert_utils.cc DEPS data_type place) endif() cc_library(kernel_factory SRCS kernel_factory.cc DEPS enforce) From b3ace0d0f52f3ea1f1a94d9d086318928f3e5e7e Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Mon, 6 Dec 2021 04:52:57 +0000 Subject: [PATCH 3/4] refine, test=develop --- python/paddle/fluid/tests/unittests/test_allclose_layer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/fluid/tests/unittests/test_allclose_layer.py b/python/paddle/fluid/tests/unittests/test_allclose_layer.py index 7b201f60db5397..c376a5c95c3935 100644 --- a/python/paddle/fluid/tests/unittests/test_allclose_layer.py +++ b/python/paddle/fluid/tests/unittests/test_allclose_layer.py @@ -61,7 +61,7 @@ def allclose_check(self, use_cuda, dtype='float32'): # for corner case x = np.array([10.1, 10.1]).astype(dtype) y = np.array([10, 10]).astype(dtype) - result_c = exe.run(feed={'a': x, 'b': y}, fetch_list=[result_corner]) + result_c, = exe.run(feed={'a': x, 'b': y}, fetch_list=[result_corner]) corner_res = (dtype == 'float64') self.assertEqual(result_c[0], corner_res) From b92db9ec6d123ca7ee5ee6a092d42cf85b5e18dc Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Mon, 6 Dec 2021 09:10:40 +0000 Subject: [PATCH 4/4] fix cmake generate error, and fix circular import, test=develop --- paddle/fluid/pybind/eager_method.cc | 2 +- paddle/fluid/pybind/eager_utils.cc | 34 +++++++++++++++++++ paddle/fluid/pybind/eager_utils.h | 2 ++ paddle/pten/core/convert_utils.cc | 34 ------------------- paddle/pten/core/convert_utils.h | 1 - .../fluid/eager/eager_tensor_patch_methods.py | 2 +- 6 files changed, 38 insertions(+), 37 deletions(-) diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index f040566260c74a..e01396a4e3ca76 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -42,7 +42,7 @@ static PyObject* eager_tensor_method_numpy(EagerTensorObject* self, return Py_None; } auto tensor_dims = self->eagertensor.shape(); - auto numpy_dtype = pten::TensorDtype2NumpyDtype(self->eagertensor.type()); + auto numpy_dtype = TensorDtype2NumpyDtype(self->eagertensor.type()); auto sizeof_dtype = pten::DataTypeSize(self->eagertensor.type()); Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 9268fc8e7b976c..c8b6f2c06c731a 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -17,9 +17,11 @@ limitations under the License. */ #include "paddle/fluid/eager/api/all.h" #include "paddle/fluid/eager/autograd_meta.h" #include "paddle/fluid/memory/allocation/allocator.h" +#include "paddle/fluid/operators/py_func_op.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/pybind/eager.h" #include "paddle/fluid/pybind/eager_utils.h" +#include "paddle/fluid/pybind/tensor_py.h" #include "paddle/pten/common/data_type.h" #include "paddle/pten/core/convert_utils.h" #include "paddle/pten/core/dense_tensor.h" @@ -37,6 +39,38 @@ extern PyTypeObject* g_xpuplace_pytype; extern PyTypeObject* g_npuplace_pytype; extern PyTypeObject* g_cudapinnedplace_pytype; +int TensorDtype2NumpyDtype(pten::DataType dtype) { + switch (dtype) { + case pten::DataType::BOOL: + return pybind11::detail::npy_api::NPY_BOOL_; + case pten::DataType::INT8: + return pybind11::detail::npy_api::NPY_INT8_; + case pten::DataType::UINT8: + return pybind11::detail::npy_api::NPY_UINT8_; + case pten::DataType::INT16: + return pybind11::detail::npy_api::NPY_INT16_; + case pten::DataType::INT32: + return pybind11::detail::npy_api::NPY_INT32_; + case pten::DataType::INT64: + return pybind11::detail::npy_api::NPY_INT64_; + case pten::DataType::FLOAT16: + return pybind11::detail::NPY_FLOAT16_; + case pten::DataType::FLOAT32: + return pybind11::detail::npy_api::NPY_FLOAT_; + case pten::DataType::FLOAT64: + return pybind11::detail::npy_api::NPY_DOUBLE_; + case pten::DataType::COMPLEX64: + return pybind11::detail::NPY_COMPLEX64; + case pten::DataType::COMPLEX128: + return pybind11::detail::NPY_COMPLEX128; + default: + PADDLE_THROW(paddle::platform::errors::InvalidArgument( + "Unknow pten::DataType, the int value = %d.", + static_cast(dtype))); + return 0; + } +} + bool PyObject_CheckLongOrConvertToLong(PyObject** obj) { if ((PyLong_Check(*obj) && !PyBool_Check(*obj))) { return true; diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index 49f56a61c31f1f..f311e62b8965e1 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -21,6 +21,8 @@ typedef struct { PyObject_HEAD egr::EagerTensor eagertensor; } EagerTensorObject; +int TensorDtype2NumpyDtype(pten::DataType dtype); + bool PyObject_CheckLongOrConvertToLong(PyObject** obj); bool PyObject_CheckFloatOrConvertToFloat(PyObject** obj); bool PyObject_CheckStr(PyObject* obj); diff --git a/paddle/pten/core/convert_utils.cc b/paddle/pten/core/convert_utils.cc index e457c57d59e55c..211734f3315bc3 100644 --- a/paddle/pten/core/convert_utils.cc +++ b/paddle/pten/core/convert_utils.cc @@ -12,8 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/pten/core/convert_utils.h" -#include "paddle/fluid/operators/py_func_op.h" -#include "paddle/fluid/pybind/tensor_py.h" // See Note [ Why still include the fluid headers? ] #include "paddle/fluid/platform/device/gpu/gpu_info.h" @@ -272,36 +270,4 @@ std::string DataType2String(DataType dtype) { } } -int TensorDtype2NumpyDtype(pten::DataType dtype) { - switch (dtype) { - case pten::DataType::BOOL: - return pybind11::detail::npy_api::NPY_BOOL_; - case pten::DataType::INT8: - return pybind11::detail::npy_api::NPY_INT8_; - case pten::DataType::UINT8: - return pybind11::detail::npy_api::NPY_UINT8_; - case pten::DataType::INT16: - return pybind11::detail::npy_api::NPY_INT16_; - case pten::DataType::INT32: - return pybind11::detail::npy_api::NPY_INT32_; - case pten::DataType::INT64: - return pybind11::detail::npy_api::NPY_INT64_; - case pten::DataType::FLOAT16: - return pybind11::detail::NPY_FLOAT16_; - case pten::DataType::FLOAT32: - return pybind11::detail::npy_api::NPY_FLOAT_; - case pten::DataType::FLOAT64: - return pybind11::detail::npy_api::NPY_DOUBLE_; - case pten::DataType::COMPLEX64: - return pybind11::detail::NPY_COMPLEX64; - case pten::DataType::COMPLEX128: - return pybind11::detail::NPY_COMPLEX128; - default: - PADDLE_THROW(paddle::platform::errors::InvalidArgument( - "Unknow pten::DataType, the int value = %d.", - static_cast(dtype))); - return 0; - } -} - } // namespace pten diff --git a/paddle/pten/core/convert_utils.h b/paddle/pten/core/convert_utils.h index e5990eb0a89f03..32ed753b4b0abb 100644 --- a/paddle/pten/core/convert_utils.h +++ b/paddle/pten/core/convert_utils.h @@ -48,6 +48,5 @@ pten::LoD TransToPtenLoD(const paddle::framework::LoD& lod); size_t DataTypeSize(DataType dtype); DataType String2DataType(const std::string& str); std::string DataType2String(DataType dtype); -int TensorDtype2NumpyDtype(pten::DataType dtype); } // namespace pten diff --git a/python/paddle/fluid/eager/eager_tensor_patch_methods.py b/python/paddle/fluid/eager/eager_tensor_patch_methods.py index 206c5cf23e6dad..b61bf78116aeb3 100644 --- a/python/paddle/fluid/eager/eager_tensor_patch_methods.py +++ b/python/paddle/fluid/eager/eager_tensor_patch_methods.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.fluid.core as core +from .. import core as core def monkey_patch_eagertensor():