diff --git a/paddle/fluid/eager/autograd_meta.h b/paddle/fluid/eager/autograd_meta.h index 9b616a496930f9..d815e167c97d8c 100644 --- a/paddle/fluid/eager/autograd_meta.h +++ b/paddle/fluid/eager/autograd_meta.h @@ -117,6 +117,10 @@ class AutogradMeta : public AbstractAutogradMeta { } } + bool Persistable() const { return persistable_; } + + void SetPersistable(bool persistable) { persistable_ = persistable; } + private: // TODO(jiabin) :Should we use pointer instead of object? egr::EagerTensor grad_; diff --git a/paddle/fluid/pybind/eager_properties.cc b/paddle/fluid/pybind/eager_properties.cc index 1255ea0da0f2fc..00a863be5f9801 100644 --- a/paddle/fluid/pybind/eager_properties.cc +++ b/paddle/fluid/pybind/eager_properties.cc @@ -47,17 +47,15 @@ static const int numpy_initialized_m = init_numpy_p(); extern PyTypeObject* pEagerTensorType; -PyObject* eager_tensor_properties_get_shape(EagerTensorObject* self, - void* closure) { - auto ddim = self->eagertensor.shape(); - std::vector value; - size_t rank = static_cast(ddim.size()); - value.resize(rank); - for (size_t i = 0; i < rank; i++) { - value[i] = ddim[i]; - } +PyObject* eager_tensor_properties_get_name(EagerTensorObject* self, + void* closure) { + return ToPyObject(self->eagertensor.name()); +} - return ToPyObject(value); +int eager_tensor_properties_set_name(EagerTensorObject* self, PyObject* value, + void* closure) { + self->eagertensor.set_name(CastPyArg2AttrString(value, 0)); + return 0; } PyObject* eager_tensor_properties_get_stop_gradient(EagerTensorObject* self, @@ -73,9 +71,38 @@ int eager_tensor_properties_set_stop_gradient(EagerTensorObject* self, return 0; } -PyObject* eager_tensor_properties_get_dtype(EagerTensorObject* self, +PyObject* eager_tensor_properties_get_persistable(EagerTensorObject* self, + void* closure) { + auto meta = egr::EagerUtils::unsafe_autograd_meta(self->eagertensor); + return ToPyObject(meta->Persistable()); +} + +int eager_tensor_properties_set_persistable(EagerTensorObject* self, + PyObject* value, void* closure) { + auto meta = egr::EagerUtils::unsafe_autograd_meta(self->eagertensor); + meta->SetPersistable(CastPyArg2AttrBoolean(value, 0)); + return 0; +} + +PyObject* eager_tensor_properties_get_shape(EagerTensorObject* self, void* closure) { - return ToPyObject(pten::DataType2String(self->eagertensor.type())); + auto ddim = self->eagertensor.shape(); + std::vector value; + size_t rank = static_cast(ddim.size()); + value.resize(rank); + for (size_t i = 0; i < rank; i++) { + value[i] = ddim[i]; + } + + return ToPyObject(value); +} + +PyObject* eager_tensor_properties_get_place(EagerTensorObject* self, + void* closure) { + auto place = self->eagertensor.place(); + auto obj = ::pybind11::cast(place); + obj.inc_ref(); + return obj.ptr(); } PyObject* eager_tensor_properties_get_place_str(EagerTensorObject* self, @@ -85,15 +112,29 @@ PyObject* eager_tensor_properties_get_place_str(EagerTensorObject* self, return ToPyObject(ostr.str()); } +PyObject* eager_tensor_properties_get_dtype(EagerTensorObject* self, + void* closure) { + return ToPyObject(pten::DataType2String(self->eagertensor.type())); +} + struct PyGetSetDef variable_properties[] = { - {"shape", (getter)eager_tensor_properties_get_shape, nullptr, nullptr, - nullptr}, + {"name", (getter)eager_tensor_properties_get_name, + (setter)eager_tensor_properties_set_name, nullptr, nullptr}, {"stop_gradient", (getter)eager_tensor_properties_get_stop_gradient, (setter)eager_tensor_properties_set_stop_gradient, nullptr, nullptr}, - {"dtype", (getter)eager_tensor_properties_get_dtype, nullptr, nullptr, + {"persistable", (getter)eager_tensor_properties_get_persistable, + (setter)eager_tensor_properties_set_persistable, nullptr, nullptr}, + {"shape", (getter)eager_tensor_properties_get_shape, nullptr, nullptr, + nullptr}, + // {"is_leaf", (getter)eager_tensor_properties_get_is_leaf, nullptr, + // nullptr, + // nullptr}, + {"place", (getter)eager_tensor_properties_get_place, nullptr, nullptr, nullptr}, {"_place_str", (getter)eager_tensor_properties_get_place_str, nullptr, nullptr, nullptr}, + {"dtype", (getter)eager_tensor_properties_get_dtype, nullptr, nullptr, + nullptr}, {nullptr, nullptr, nullptr, nullptr, nullptr}}; } // namespace pybind diff --git a/python/paddle/fluid/tests/unittests/test_egr_python_api.py b/python/paddle/fluid/tests/unittests/test_egr_python_api.py index 24767227294484..cf8b47b9ade694 100644 --- a/python/paddle/fluid/tests/unittests/test_egr_python_api.py +++ b/python/paddle/fluid/tests/unittests/test_egr_python_api.py @@ -25,33 +25,23 @@ def test_scale_base(self): with eager_guard(): paddle.set_device("cpu") arr = np.ones([4, 16, 16, 32]).astype('float32') - a = paddle.to_tensor(arr, 'float32', core.CPUPlace()) - print(arr) - print("=============") - print(a) - a = core.eager.scale(a, 2.0, 0.9, True, False) + tensor = paddle.to_tensor(arr, 'float32', core.CPUPlace()) + print(tensor) + tensor = core.eager.scale(tensor, 2.0, 0.9, True, False) for i in range(0, 100): - a = core.eager.scale(a, 2.0, 0.9, True, False) - print(a.shape) - print(a.stop_gradient) - a.stop_gradient = False - print(a.stop_gradient) - a.stop_gradient = True - print(a.stop_gradient) - print(a) - - -with eager_guard(): - paddle.set_device("cpu") - arr = np.ones([4, 16, 16, 32]).astype('float32') - a = paddle.to_tensor(arr, 'float32', core.CPUPlace()) - a = core.eager.scale(a, 2.0, 0.9, True, False) - for i in range(0, 100): - a = core.eager.scale(a, 2.0, 0.9, True, False) - print(a.shape) - print(a.stop_gradient) - a.stop_gradient = False - print(a.stop_gradient) - a.stop_gradient = True - print(a.stop_gradient) - print(a) + tensor = core.eager.scale(tensor, 2.0, 0.9, True, False) + print(tensor) + self.assertEqual(tensor.shape, [4, 16, 16, 32]) + self.assertEqual(tensor.stop_gradient, True) + tensor.stop_gradient = False + self.assertEqual(tensor.stop_gradient, False) + tensor.stop_gradient = True + self.assertEqual(tensor.stop_gradient, False) + tensor.name = 'tensor_name_test' + self.assertEqual(tensor.name, 'tensor_name_test') + self.assertEqual(tensor.persistable, False) + tensor.persistable = True + self.assertEqual(tensor.persistable, True) + tensor.persistable = False + self.assertEqual(tensor.persistable, False) + self.assertTrue(tensor.place.is_cpu_place())