Skip to content

Commit

Permalink
[Cleanup][B-16]Replace to_variable (#61538)
Browse files Browse the repository at this point in the history
  • Loading branch information
co63oc authored Feb 3, 2024
1 parent fe95803 commit aa1484b
Show file tree
Hide file tree
Showing 10 changed files with 32 additions and 34 deletions.
7 changes: 3 additions & 4 deletions test/dygraph_to_static/test_reinforcement_learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@

import paddle
import paddle.nn.functional as F
from paddle.base.dygraph import to_variable
from paddle.nn import Layer

SEED = 2020
Expand Down Expand Up @@ -112,14 +111,14 @@ def choose_best_action(probs):
return idx, np.array([mask]).astype("float32")

def select_action(state):
state = to_variable(state)
state = paddle.to_tensor(state)
state.stop_gradient = True
loss_probs = policy(state)

probs = loss_probs.numpy()

action, _mask = sample_action(probs[0])
mask = to_variable(_mask)
mask = paddle.to_tensor(_mask)
mask.stop_gradient = True

loss_probs = paddle.log(loss_probs)
Expand Down Expand Up @@ -148,7 +147,7 @@ def finish_episode():

R_numpy = np.ones_like(log_prob_numpy).astype("float32")
_R = -1 * R * R_numpy
_R = to_variable(_R)
_R = paddle.to_tensor(_R)
_R.stop_gradient = True
cur_loss = paddle.multiply(_R, log_prob)
policy_loss.append(cur_loss)
Expand Down
4 changes: 2 additions & 2 deletions test/dygraph_to_static/test_save_load.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def test_save_load_same_result(self):
x_data = np.random.randn(30, 10, 32).astype('float32')
batch_num = 3

x = base.dygraph.to_variable(x_data)
x = paddle.to_tensor(x_data)
net = Linear(32, 64)
adam = Adam(learning_rate=0.1, parameters=net.parameters())

Expand All @@ -100,7 +100,7 @@ def test_save_load_same_result(self):
# Switch into eval mode.
dygraph_net.eval()

x = base.dygraph.to_variable(x_data)
x = paddle.to_tensor(x_data)
# predict output
with enable_to_static_guard(False):
dygraph_out, dygraph_loss = dygraph_net(x)
Expand Down
4 changes: 2 additions & 2 deletions test/dygraph_to_static/test_se_resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -494,8 +494,8 @@ def predict_dygraph(self, data):
se_resnext.eval()

label = np.random.random([1, 1]).astype("int64")
img = base.dygraph.to_variable(data)
label = base.dygraph.to_variable(label)
img = paddle.to_tensor(data)
label = paddle.to_tensor(label)
pred_res, _, _, _ = se_resnext(img, label)

return pred_res.numpy()
Expand Down
5 changes: 2 additions & 3 deletions test/dygraph_to_static/test_sentiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@

import paddle
from paddle import base
from paddle.base.dygraph import to_variable
from paddle.nn import Embedding, Linear

SEED = 2020
Expand Down Expand Up @@ -172,7 +171,7 @@ def __init__(self, dict_dim, batch_size, seq_len):
sparse=False,
)
h_0 = np.zeros((self.batch_size, self.hid_dim), dtype="float32")
h_0 = to_variable(h_0)
h_0 = paddle.to_tensor(h_0)
self._fc1 = Linear(self.hid_dim, self.hid_dim * 3)
self._fc2 = Linear(self.hid_dim, self.fc_hid_dim)
self._fc_prediction = Linear(self.fc_hid_dim, self.class_dim)
Expand Down Expand Up @@ -219,7 +218,7 @@ def __init__(self, dict_dim, batch_size, seq_len):
sparse=False,
)
h_0 = np.zeros((self.batch_size, self.hid_dim), dtype="float32")
h_0 = to_variable(h_0)
h_0 = paddle.to_tensor(h_0)
self._fc1 = Linear(self.hid_dim, self.hid_dim * 3)
self._fc2 = Linear(self.hid_dim * 2, self.fc_hid_dim)
self._fc_prediction = Linear(self.fc_hid_dim, self.class_dim)
Expand Down
24 changes: 12 additions & 12 deletions test/legacy_test/test_repeat_interleave_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,16 +236,16 @@ def test_dygraph_api(self):
index_x = np.array([1, 1, 2, 1, 2, 2]).astype('int32')

with base.dygraph.guard():
x = base.dygraph.to_variable(input_x)
index = base.dygraph.to_variable(index_x)
x = paddle.to_tensor(input_x)
index = paddle.to_tensor(index_x)
z = paddle.repeat_interleave(x, index, None)
np_z = z.numpy()
expect_out = np.repeat(input_x, index_x, axis=None)
np.testing.assert_allclose(expect_out, np_z, rtol=1e-05)

# case repeats int
with base.dygraph.guard():
x = base.dygraph.to_variable(input_x)
x = paddle.to_tensor(input_x)
index = 2
z = paddle.repeat_interleave(x, index, None)
np_z = z.numpy()
Expand All @@ -254,16 +254,16 @@ def test_dygraph_api(self):

# case 1:
with base.dygraph.guard():
x = base.dygraph.to_variable(self.data_x)
index = base.dygraph.to_variable(self.data_index)
x = paddle.to_tensor(self.data_x)
index = paddle.to_tensor(self.data_index)
z = paddle.repeat_interleave(x, index, -1)
np_z = z.numpy()
expect_out = np.repeat(self.data_x, self.data_index, axis=-1)
np.testing.assert_allclose(expect_out, np_z, rtol=1e-05)

with base.dygraph.guard():
x = base.dygraph.to_variable(self.data_x)
index = base.dygraph.to_variable(self.data_index)
x = paddle.to_tensor(self.data_x)
index = paddle.to_tensor(self.data_index)
z = paddle.repeat_interleave(x, index, 1)
np_z = z.numpy()
expect_out = np.repeat(self.data_x, self.data_index, axis=1)
Expand All @@ -272,16 +272,16 @@ def test_dygraph_api(self):
# case 2:
index_x = np.array([1, 2, 1]).astype('int32')
with base.dygraph.guard():
x = base.dygraph.to_variable(self.data_x)
index = base.dygraph.to_variable(index_x)
x = paddle.to_tensor(self.data_x)
index = paddle.to_tensor(index_x)
z = paddle.repeat_interleave(x, index, axis=0)
np_z = z.numpy()
expect_out = np.repeat(self.data_x, index, axis=0)
np.testing.assert_allclose(expect_out, np_z, rtol=1e-05)

# case 3 zero_dim:
with base.dygraph.guard():
x = base.dygraph.to_variable(self.data_zero_dim_x)
x = paddle.to_tensor(self.data_zero_dim_x)
index = 2
z = paddle.repeat_interleave(x, index, None)
np_z = z.numpy()
Expand All @@ -290,8 +290,8 @@ def test_dygraph_api(self):

# case 4 zero_dim_index
with base.dygraph.guard():
x = base.dygraph.to_variable(self.data_zero_dim_x)
index = base.dygraph.to_variable(self.data_zero_dim_index)
x = paddle.to_tensor(self.data_zero_dim_x)
index = paddle.to_tensor(self.data_zero_dim_index)
z = paddle.repeat_interleave(x, index, None)
np_z = z.numpy()
expect_out = np.repeat(
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_roll_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ def test_dygraph_api(self):
self.input_data()
# case 1:
with base.dygraph.guard():
x = base.dygraph.to_variable(self.data_x)
x = paddle.to_tensor(self.data_x)
z = paddle.roll(x, shifts=1)
np_z = z.numpy()
expect_out = np.array(
Expand All @@ -247,7 +247,7 @@ def test_dygraph_api(self):

# case 2:
with base.dygraph.guard():
x = base.dygraph.to_variable(self.data_x)
x = paddle.to_tensor(self.data_x)
z = paddle.roll(x, shifts=1, axis=0)
np_z = z.numpy()
expect_out = np.array(
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_rot90_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,7 @@ def run5():
def test_dygraph(self):
img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
with base.dygraph.guard():
inputs = base.dygraph.to_variable(img)
inputs = paddle.to_tensor(img)

ret = paddle.rot90(inputs, k=1, axes=[0, 1])
ret = ret.rot90(1, axes=[0, 1])
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_scatter_nd_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -559,7 +559,7 @@ class TestDygraph(unittest.TestCase):
def test_dygraph(self):
with base.dygraph.guard(base.CPUPlace()):
index_data = np.array([[1, 1], [0, 1], [1, 3]]).astype(np.int64)
index = base.dygraph.to_variable(index_data)
index = paddle.to_tensor(index_data)
updates = paddle.rand(shape=[3, 9, 10], dtype='float32')
shape = [3, 5, 9, 10]
output = paddle.scatter_nd(index, updates, shape)
Expand All @@ -569,7 +569,7 @@ def test_dygraph_1(self):
x = paddle.rand(shape=[3, 5, 9, 10], dtype='float32')
updates = paddle.rand(shape=[3, 9, 10], dtype='float32')
index_data = np.array([[1, 1], [0, 1], [1, 3]]).astype(np.int64)
index = base.dygraph.to_variable(index_data)
index = paddle.to_tensor(index_data)
output = paddle.scatter_nd_add(x, index, updates)


Expand Down
6 changes: 3 additions & 3 deletions test/legacy_test/test_scatter_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -676,9 +676,9 @@ def test_dygraph(self):
[[1, 1], [2, 2], [3, 3], [4, 4]]
).astype(np.float64)

x = base.dygraph.to_variable(x_data)
index = base.dygraph.to_variable(index_data)
updates = base.dygraph.to_variable(updates_data)
x = paddle.to_tensor(x_data)
index = paddle.to_tensor(index_data)
updates = paddle.to_tensor(updates_data)

output1 = self.scatter(x, index, updates, overwrite=False)
self.assertEqual(
Expand Down
6 changes: 3 additions & 3 deletions test/legacy_test/test_slice_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -985,7 +985,7 @@ class TestImperativeVarBaseGetItem(unittest.TestCase):
def test_getitem_with_long(self):
with base.dygraph.guard():
data = np.random.random((2, 80, 16128)).astype('float32')
var = base.dygraph.to_variable(data)
var = paddle.to_tensor(data)
sliced = var[:, 10:, : var.shape[1]] # var.shape[1] is 80L here
self.assertEqual(sliced.shape, [2, 70, 80])

Expand All @@ -996,15 +996,15 @@ def test_getitem_with_float(self):
def test_float_in_slice_item():
with base.dygraph.guard():
data = np.random.random((2, 80, 16128)).astype('float32')
var = base.dygraph.to_variable(data)
var = paddle.to_tensor(data)
sliced = var[:, 1.1:, : var.shape[1]]

self.assertRaises(Exception, test_float_in_slice_item)

def test_float_in_index():
with base.dygraph.guard():
data = np.random.random((2, 80, 16128)).astype('float32')
var = base.dygraph.to_variable(data)
var = paddle.to_tensor(data)
sliced = var[1.1]

self.assertRaises(Exception, test_float_in_index)
Expand Down

0 comments on commit aa1484b

Please sign in to comment.