Skip to content

Commit

Permalink
remove less_equal, greater_than, greater_equal, equal, not_equal
Browse files Browse the repository at this point in the history
  • Loading branch information
LiYuRio committed Nov 28, 2022
1 parent b83f615 commit 5d33cb6
Show file tree
Hide file tree
Showing 12 changed files with 36 additions and 312 deletions.
278 changes: 1 addition & 277 deletions python/paddle/fluid/layers/control_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,6 @@
'array_write',
'create_array',
'less_than',
'less_equal',
'greater_than',
'greater_equal',
'equal',
'not_equal',
'array_read',
'array_length',
'cond',
Expand Down Expand Up @@ -1871,277 +1866,6 @@ def less_than(x, y, force_cpu=None, cond=None, name=None):
return cond


@templatedoc()
def less_equal(x, y, cond=None, name=None):
"""
:alias_main: paddle.less_equal
:alias: paddle.less_equal,paddle.tensor.less_equal,paddle.tensor.logic.less_equal
:old_api: paddle.fluid.layers.less_equal
This OP returns the truth value of :math:`x <= y` elementwise, which is equivalent function to the overloaded operator `<=`.
Args:
x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
cond(Variable, optional): Optional output which can be any created Variable that meets the requirements to store the result of *less_equal*.
if cond is None, a new Varibale will be created to store the result.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable, the output data type is bool: The tensor variable storing the output, the output shape is same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
label = fluid.layers.assign(np.array([1, 3], dtype='int32'))
limit = fluid.layers.assign(np.array([1, 2], dtype='int32'))
out = fluid.layers.less_equal(x=label, y=limit) #out=[True, False]
out1 = label<= limit #out1=[True, False]
"""
check_variable_and_dtype(
x, "x", ["float32", "float64", "int32", "int64"], "less_equal"
)
check_variable_and_dtype(
y, "y", ["float32", "float64", "int32", "int64"], "less_equal"
)
if cond is not None:
check_type(cond, "cond", Variable, "less_equal")

helper = LayerHelper("less_equal", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True

attrs = dict()

helper.append_op(
type='less_equal',
inputs={'X': [x], 'Y': [y]},
outputs={'Out': [cond]},
attrs=attrs,
)
return cond


@templatedoc()
def greater_than(x, y, cond=None, name=None):
"""
:alias_main: paddle.greater_than
:alias: paddle.greater_than,paddle.tensor.greater_than,paddle.tensor.logic.greater_than
:old_api: paddle.fluid.layers.greater_than
This OP returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`.
Args:
x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
cond(Variable, optional): Optional output which can be any created Variable that meets the requirements to store the result of *greater_than*.
if cond is None, a new Varibale will be created to store the result.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable, the output data type is bool: The tensor variable storing the output, the output shape is same as input :attr:`x` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
label = fluid.layers.assign(np.array([2, 3], dtype='int32'))
limit = fluid.layers.assign(np.array([3, 2], dtype='int32'))
out = fluid.layers.greater_than(x=label, y=limit) #out=[False, True]
out1 = label > limit #out1=[False, True]
"""
check_variable_and_dtype(
x, "x", ["float32", "float64", "int32", "int64"], "greater_than"
)
check_variable_and_dtype(
y, "y", ["float32", "float64", "int32", "int64"], "greater_than"
)
if cond is not None:
check_type(cond, "cond", Variable, "greater_than")

helper = LayerHelper("greater_than", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True

attrs = dict()

if in_dygraph_mode():
return _C_ops.greater_than(x, y)
else:
helper.append_op(
type='greater_than',
inputs={'X': [x], 'Y': [y]},
outputs={'Out': [cond]},
attrs=attrs,
)
return cond


@templatedoc()
def greater_equal(x, y, cond=None, name=None):
"""
:alias_main: paddle.greater_equal
:alias: paddle.greater_equal,paddle.tensor.greater_equal,paddle.tensor.logic.greater_equal
:old_api: paddle.fluid.layers.greater_equal
This OP returns the truth value of :math:`x >= y` elementwise, which is equivalent function to the overloaded operator `>=`.
Args:
x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
cond(Variable, optional): Optional output which can be any created Variable that meets the requirements to store the result of *greater_equal*.
if cond is None, a new Varibale will be created to store the result.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable, the output data type is bool: The tensor variable storing the output, the output shape is same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
label = fluid.layers.assign(np.array([2, 2], dtype='int32'))
limit = fluid.layers.assign(np.array([2, 3], dtype='int32'))
out = fluid.layers.greater_equal(x=label, y=limit) #out=[True, False]
out_1 = label >= limit #out1=[True, False]
"""
check_variable_and_dtype(
x, "x", ["float32", "float64", "int32", "int64"], "greater_equal"
)
check_variable_and_dtype(
y, "y", ["float32", "float64", "int32", "int64"], "greater_equal"
)
if cond is not None:
check_type(cond, "cond", Variable, "greater_equal")

helper = LayerHelper("greater_equal", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True

attrs = dict()

helper.append_op(
type='greater_equal',
inputs={'X': [x], 'Y': [y]},
outputs={'Out': [cond]},
attrs=attrs,
)
return cond


def equal(x, y, cond=None, name=None):
"""
This layer returns the truth value of :math:`x == y` elementwise.
Args:
x(Variable): Tensor, data type is float32, float64, int32, int64.
y(Variable): Tensor, data type is float32, float64, int32, int64.
cond(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of *equal*.
if cond is None, a new Varibale will be created to store the result.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: output Tensor, it's shape is the same as the input's Tensor,
and the data type is bool.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
out_cond =fluid.data(name="input1", shape=[2], dtype='bool')
label = fluid.layers.assign(np.array([3, 3], dtype="int32"))
limit = fluid.layers.assign(np.array([3, 2], dtype="int32"))
label_cond = fluid.layers.assign(np.array([1, 2], dtype="int32"))
out1 = fluid.layers.equal(x=label,y=limit) #out1=[True, False]
out2 = fluid.layers.equal(x=label_cond,y=limit, cond=out_cond) #out2=[False, True] out_cond=[False, True]
"""
if in_dygraph_mode():
return _C_ops.equal(x, y)

check_variable_and_dtype(
x, "x", ["float32", "float64", "int32", "int64"], "equal"
)
check_variable_and_dtype(
y, "y", ["float32", "float64", "int32", "int64"], "equal"
)
if cond is not None:
check_type(cond, "cond", Variable, "equal")

helper = LayerHelper("equal", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True

helper.append_op(
type='equal', inputs={'X': [x], 'Y': [y]}, outputs={'Out': [cond]}
)
return cond


def not_equal(x, y, cond=None, name=None):
"""
:alias_main: paddle.not_equal
:alias: paddle.not_equal,paddle.tensor.not_equal,paddle.tensor.logic.not_equal
:old_api: paddle.fluid.layers.not_equal
This OP returns the truth value of :math:`x != y` elementwise, which is equivalent function to the overloaded operator `!=`.
Args:
x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
cond(Variable, optional): Optional output which can be any created Variable that meets the requirements to store the result of *not_equal*.
if cond is None, a new Varibale will be created to store the result.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable, the output data type is bool: The tensor variable storing the output, the output shape is same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
limit = fluid.layers.fill_constant(shape=[1], value=1, dtype='int64')
out = fluid.layers.not_equal(x=label, y=limit)
"""
check_variable_and_dtype(
x, "x", ["float32", "float64", "int32", "int64"], "not_equal"
)
check_variable_and_dtype(
y, "y", ["float32", "float64", "int32", "int64"], "not_equal"
)
if cond is not None:
check_type(cond, "cond", Variable, "not_equal")

helper = LayerHelper("not_equal", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True

helper.append_op(
type='not_equal', inputs={'X': [x], 'Y': [y]}, outputs={'Out': [cond]}
)
return cond


def array_read(array, i):
"""
This OP is used to read data at the specified position from the input array
Expand Down Expand Up @@ -3590,7 +3314,7 @@ def _check_args(branch_index, branch_fns, default):
pred_fn_pairs = []
for index, fn in branch_fns:
new_index = fill_constant(shape=[1], dtype="int64", value=index)
pred = equal(branch_index, new_index)
pred = paddle.equal(branch_index, new_index)
pred_fn_pairs.append((pred, fn))

return pred_fn_pairs, default
Expand Down
10 changes: 5 additions & 5 deletions python/paddle/fluid/layers/rnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -1337,7 +1337,7 @@ def _beam_search_step(self, time, logits, next_cell_states, beam_state):
)
next_finished = paddle.logical_or(
next_finished,
control_flow.equal(token_indices, self.end_token_tensor),
paddle.equal(token_indices, self.end_token_tensor),
)

beam_search_output = self.OutputWrapper(
Expand Down Expand Up @@ -1722,7 +1722,7 @@ def _create_array_out_of_while(dtype):
if max_step_num is not None:
paddle.logical_and(
paddle.logical_not(nn.reduce_all(global_finished)),
control_flow.less_equal(step_idx, max_step_num),
paddle.less_equal(step_idx, max_step_num),
cond,
)
else:
Expand Down Expand Up @@ -2013,7 +2013,7 @@ def initialize(self):
variable[s], and the tensor's shape is `[batch_size, ...]`. \
`initial_finished` is a bool tensor with shape `[batch_size]`.
"""
init_finished = control_flow.equal(
init_finished = paddle.equal(
self.sequence_length,
tensor.fill_constant(
shape=[1], dtype=self.sequence_length.dtype, value=0
Expand Down Expand Up @@ -2084,7 +2084,7 @@ def next_inputs(self, time, outputs, states, sample_ids):
if self.sequence_length.dtype != time.dtype:
self.sequence_length = tensor.cast(self.sequence_length, time.dtype)
next_time = time + 1
finished = control_flow.less_equal(self.sequence_length, next_time)
finished = paddle.less_equal(self.sequence_length, next_time)

def _slice(x): # TODO: use Variable.__getitem__
axes = [0 if self.time_major else 1]
Expand Down Expand Up @@ -2227,7 +2227,7 @@ def next_inputs(self, time, outputs, states, sample_ids):
argument `states`. `finished` is a `bool` Tensor with \
shape `[batch_size]`.
"""
finished = control_flow.equal(sample_ids, self.end_token)
finished = paddle.equal(sample_ids, self.end_token)
next_inputs = self.embedding_fn(sample_ids)
return finished, next_inputs, states

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -490,7 +490,7 @@ def beam_search(self, inputs):
next_finished = fluid.layers.cast(next_finished, "bool")
next_finished = paddle.logical_or(
next_finished,
fluid.layers.equal(token_indices, end_token_tensor),
paddle.equal(token_indices, end_token_tensor),
)
next_finished = fluid.layers.cast(next_finished, "float32")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -871,7 +871,7 @@ def gather(input, indices, batch_pos):
log_probs = gather(log_probs, topk_indices, batch_pos)
finished = gather(finished, beam_indices, batch_pos)
finished = paddle.logical_or(
finished, layers.equal(token_indices, end_token_tensor)
finished, paddle.equal(token_indices, end_token_tensor)
)
trg_word = paddle.reshape(token_indices, [-1, 1])

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def build_model(self):
y = paddle.static.data(
name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32'
)
out = paddle.fluid.layers.equal(x, y, **self.attrs)
out = paddle.equal(x, y)
self.fetch_list = [out.name]

def run_model(self, exec_mode):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def build_model(self):
y = paddle.static.data(
name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32'
)
out = paddle.fluid.layers.not_equal(x, y, **self.attrs)
out = paddle.not_equal(x, y)
self.fetch_list = [out.name]

def run_model(self, exec_mode):
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fluid/tests/unittests/test_case.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,8 @@ def fn_3():
y = layers.fill_constant(shape=[1], dtype='float32', value=1)
z = layers.fill_constant(shape=[1], dtype='float32', value=3)

pred_1 = layers.equal(x, y) # true
pred_2 = layers.equal(x, z) # false
pred_1 = paddle.equal(x, y) # true
pred_2 = paddle.equal(x, z) # false

out = layers.case(((pred_1, fn_1), (pred_2, fn_2)), fn_3)

Expand Down
Loading

0 comments on commit 5d33cb6

Please sign in to comment.