From 204aadd10885b87839b783fc88b4ee4a68091171 Mon Sep 17 00:00:00 2001 From: co63oc Date: Mon, 6 Nov 2023 12:14:52 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=90Hackathon=205th=20No.43=E3=80=91API?= =?UTF-8?q?=E8=BD=AC=E6=8D=A221-41=20(#320)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add tests * Fix * Fix * Fix * Fix * Fix --- paconvert/api_mapping.json | 209 ++++++++++++++++++ paconvert/api_matcher.py | 69 ++++++ tests/apibase.py | 2 +- tests/test_broadcast_shapes.py | 76 +++++++ tests/test_broadcast_tensors.py | 78 +++++++ tests/test_can_cast.py | 36 +++ tests/test_finfo.py | 63 ++++++ tests/test_iinfo.py | 58 +++++ tests/test_nn_CTCLoss.py | 72 ++++++ tests/test_nn_GaussianNLLLoss.py | 94 ++++++++ tests/test_nn_MultiLabelMarginLoss.py | 34 +++ tests/test_nn_MultiLabelSoftMarginLoss.py | 87 ++++++++ tests/test_nn_MultiMarginLoss.py | 89 ++++++++ tests/test_nn_PoissonNLLLoss.py | 89 ++++++++ tests/test_nn_SmoothL1Loss.py | 101 +++++++++ tests/test_nn_Transformer.py | 150 +++++++++++++ tests/test_nn_TransformerEncoder.py | 117 ++++++++++ tests/test_nn_TransformerEncoderLayer.py | 130 +++++++++++ tests/test_nn_functional_ctc_loss.py | 51 +++++ ...st_nn_functional_multilabel_margin_loss.py | 34 +++ tests/test_nn_functional_threshold.py | 109 +++++++++ .../test_nn_functional_triplet_margin_loss.py | 100 +++++++++ tests/test_utils_data_DataLoader.py | 181 +++++++++++++++ 23 files changed, 2028 insertions(+), 1 deletion(-) create mode 100644 tests/test_broadcast_shapes.py create mode 100644 tests/test_broadcast_tensors.py create mode 100644 tests/test_can_cast.py create mode 100644 tests/test_finfo.py create mode 100644 tests/test_iinfo.py create mode 100644 tests/test_nn_CTCLoss.py create mode 100644 tests/test_nn_GaussianNLLLoss.py create mode 100644 tests/test_nn_MultiLabelMarginLoss.py create mode 100644 tests/test_nn_MultiLabelSoftMarginLoss.py create mode 100644 tests/test_nn_MultiMarginLoss.py create mode 100644 tests/test_nn_PoissonNLLLoss.py create mode 100644 tests/test_nn_SmoothL1Loss.py create mode 100644 tests/test_nn_Transformer.py create mode 100644 tests/test_nn_TransformerEncoder.py create mode 100644 tests/test_nn_TransformerEncoderLayer.py create mode 100644 tests/test_nn_functional_ctc_loss.py create mode 100644 tests/test_nn_functional_multilabel_margin_loss.py create mode 100644 tests/test_nn_functional_threshold.py create mode 100644 tests/test_nn_functional_triplet_margin_loss.py create mode 100644 tests/test_utils_data_DataLoader.py diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index c291132ed..15efd6da4 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -4029,6 +4029,14 @@ "Matcher": "GenericMatcher", "paddle_api": "'bool'" }, + "torch.broadcast_shapes": { + "Matcher": "BroadcastShapesMatcher", + "paddle_api": "paddle.broadcast_shape" + }, + "torch.broadcast_tensors": { + "Matcher": "BroadcastTensorsMatcher", + "paddle_api": "paddle.broadcast_tensors" + }, "torch.broadcast_to": { "Matcher": "GenericMatcher", "paddle_api": "paddle.broadcast_to", @@ -5928,6 +5936,13 @@ "dim": "axes" } }, + "torch.finfo": { + "Matcher": "IInfoMatcher", + "paddle_api": "paddle.finfo", + "args_list": [ + "type" + ] + }, "torch.fix": { "Matcher": "GenericMatcher", "paddle_api": "paddle.trunc", @@ -6350,6 +6365,13 @@ "out" ] }, + "torch.iinfo": { + "Matcher": "IInfoMatcher", + "paddle_api": "paddle.iinfo", + "args_list": [ + "type" + ] + }, "torch.imag": { "Matcher": "GenericMatcher", "paddle_api": "paddle.imag", @@ -8233,6 +8255,18 @@ "dtype": "" } }, + "torch.nn.GaussianNLLLoss": { + "Matcher": "GenericMatcher", + "paddle_api": "paddle.nn.GaussianNLLLoss", + "args_list": [ + "full", + "eps", + "reduction" + ], + "kwargs_change": { + "eps": "epsilon" + } + }, "torch.nn.GroupNorm": { "Matcher": "GenericMatcher", "paddle_api": "paddle.nn.GroupNorm", @@ -8864,6 +8898,28 @@ "modules": "sublayers" } }, + "torch.nn.MultiLabelSoftMarginLoss": { + "Matcher": "SizeAverageMatcher", + "paddle_api": "paddle.nn.MultiLabelSoftMarginLoss", + "args_list": [ + "weight", + "size_average", + "reduce", + "reduction" + ] + }, + "torch.nn.MultiMarginLoss": { + "Matcher": "SizeAverageMatcher", + "paddle_api": "paddle.nn.MultiMarginLoss", + "args_list": [ + "p", + "margin", + "weight", + "size_average", + "reduce", + "reduction" + ] + }, "torch.nn.MultiheadAttention": { "paddle_api": "paddle.nn.MultiHeadAttention", "args_list": [ @@ -8961,6 +9017,21 @@ "downscale_factor" ] }, + "torch.nn.PoissonNLLLoss": { + "Matcher": "SizeAverageMatcher", + "paddle_api": "paddle.nn.PoissonNLLLoss", + "args_list": [ + "log_input", + "full", + "size_average", + "eps", + "reduce", + "reduction" + ], + "kwargs_change": { + "eps": "epsilon" + } + }, "torch.nn.RNN": { "Matcher": "RNNMatcher", "paddle_api": "paddle.nn.SimpleRNN", @@ -9138,6 +9209,19 @@ "Matcher": "GenericMatcher", "paddle_api": "paddle.nn.Sigmoid" }, + "torch.nn.SmoothL1Loss": { + "Matcher": "SmoothL1LossMatcher", + "paddle_api": "paddle.nn.SmoothL1Loss", + "args_list": [ + "size_average", + "reduce", + "reduction", + "beta" + ], + "kwargs_change": { + "beta": "delta" + } + }, "torch.nn.SoftMarginLoss": { "Matcher": "SizeAverageMatcher", "paddle_api": "paddle.nn.SoftMarginLoss", @@ -9233,6 +9317,35 @@ "Matcher": "GenericMatcher", "paddle_api": "paddle.nn.Tanhshrink" }, + "torch.nn.Transformer": { + "Matcher": "GenericMatcher", + "paddle_api": "paddle.nn.Transformer", + "args_list": [ + "d_model", + "nhead", + "num_encoder_layers", + "num_decoder_layers", + "dim_feedforward", + "dropout", + "activation", + "custom_encoder", + "custom_decoder", + "layer_norm_eps", + "batch_first", + "norm_first", + "device", + "dtype" + ], + "kwargs_change": { + "norm_first": "normalize_before", + "device": "", + "dtype": "" + }, + "unsupport_args": [ + "layer_norm_eps", + "batch_first" + ] + }, "torch.nn.TransformerDecoder": { "Matcher": "GenericMatcher", "paddle_api": "paddle.nn.TransformerDecoder", @@ -9270,6 +9383,49 @@ "dim_feedforward": 2048 } }, + "torch.nn.TransformerEncoder": { + "Matcher": "GenericMatcher", + "paddle_api": "paddle.nn.TransformerEncoder", + "args_list": [ + "encoder_layer", + "num_layers", + "norm", + "enable_nested_tensor", + "mask_check" + ], + "kwargs_change": { + "enable_nested_tensor": "", + "mask_check": "" + } + }, + "torch.nn.TransformerEncoderLayer": { + "Matcher": "GenericMatcher", + "paddle_api": "paddle.nn.TransformerEncoderLayer", + "args_list": [ + "d_model", + "nhead", + "dim_feedforward", + "dropout", + "activation", + "layer_norm_eps", + "batch_first", + "norm_first", + "device", + "dtype" + ], + "kwargs_change": { + "norm_first": "normalize_before", + "device": "", + "dtype": "" + }, + "unsupport_args": [ + "layer_norm_eps", + "batch_first" + ], + "paddle_default_kwargs": { + "dim_feedforward": 2048 + } + }, "torch.nn.TripletMarginLoss": { "Matcher": "SizeAverageMatcher", "paddle_api": "paddle.nn.TripletMarginLoss", @@ -10645,6 +10801,26 @@ "input": "x" } }, + "torch.nn.functional.triplet_margin_loss": { + "Matcher": "SizeAverageMatcher", + "paddle_api": "paddle.nn.functional.triplet_margin_loss", + "args_list": [ + "anchor", + "positive", + "negative", + "margin", + "p", + "eps", + "swap", + "size_average", + "reduce", + "reduction" + ], + "kwargs_change": { + "anchor": "input", + "eps": "epsilon" + } + }, "torch.nn.functional.triplet_margin_with_distance_loss": { "Matcher": "GenericMatcher", "paddle_api": "paddle.nn.functional.triplet_margin_with_distance_loss", @@ -13229,6 +13405,39 @@ "datasets" ] }, + "torch.utils.data.DataLoader": { + "Matcher": "GenericMatcher", + "paddle_api": "paddle.io.DataLoader", + "args_list": [ + "dataset", + "batch_size", + "shuffle", + "sampler", + "batch_sampler", + "num_workers", + "collate_fn", + "pin_memory", + "drop_last", + "timeout", + "worker_init_fn", + "multiprocessing_context", + "generator", + "prefetch_factor", + "persistent_workers", + "pin_memory_device" + ], + "kwargs_change": { + "pin_memory": "", + "multiprocessing_context": "", + "generator": "", + "persistent_workers": "", + "pin_memory_device": "" + }, + "unsupport_args": [ + "sampler", + "prefetch_factor" + ] + }, "torch.utils.data.Dataset": { "Matcher": "GenericMatcher", "paddle_api": "paddle.io.Dataset" diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 68844c824..f1129d789 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -443,6 +443,75 @@ def generate_code(self, kwargs): return code +class BroadcastTensorsMatcher(BaseMatcher): + def get_paddle_nodes(self, args, kwargs): + if len(args) == 1 and isinstance(args[0], ast.Starred): + star_var = astor.to_source(args[0].value).strip("\n") + code = "{}({})".format(self.get_paddle_api(), star_var) + else: + new_args = self.parse_args(args) + code = "{}([{}])".format(self.get_paddle_api(), ",".join(new_args)) + return ast.parse(code).body + + +class BroadcastShapesMatcher(BaseMatcher): + def get_paddle_nodes(self, args, kwargs): + if len(args) == 1 and isinstance(args[0], ast.Starred): + return None + new_args = self.parse_args(args) + code = new_args[0] + # Call the paddle.broadcast_shape multiple times + for i in range(1, len(new_args)): + code = "{}({}, {})".format(self.get_paddle_api(), code, new_args[i]) + return ast.parse(code).body + + +class IInfoMatcher(BaseMatcher): + def generate_aux_code(self): + CODE_TEMPLATE = textwrap.dedent( + """ + def _STR_2_PADDLE_DTYPE(type): + type_map = { + "int32": paddle.int32, + "uint8": paddle.uint8, + "int8": paddle.int8, + "int16": paddle.int16, + "int32": paddle.int32, + "int64": paddle.int64, + "float16": paddle.float16, + "float32": paddle.float32, + "float64": paddle.float64, + "bfloat16": paddle.bfloat16, + } + return type_map.get(type) + """ + ) + return CODE_TEMPLATE + + def generate_code(self, kwargs): + self.write_aux_code() + type = kwargs.pop("type") + return "{}(paddle_aux._STR_2_PADDLE_DTYPE({}))".format( + self.get_paddle_api(), type + ) + + +class SmoothL1LossMatcher(BaseMatcher): + def get_paddle_nodes(self, args, kwargs): + kwargs = self.parse_kwargs(kwargs) + beta = kwargs.get("beta", None) + if beta is not None: + beta = beta.replace("(", "").replace(")", "") + try: + beta = float(beta) + if float(beta) != 1.0: + return None + except: + return None + code = SizeAverageMatcher.generate_code(self, kwargs) + return ast.parse(code).body + + class SwapAxesMatcher(BaseMatcher): def generate_code(self, kwargs): if "input" not in kwargs: diff --git a/tests/apibase.py b/tests/apibase.py index 8337d44ab..e7e1b00f4 100644 --- a/tests/apibase.py +++ b/tests/apibase.py @@ -174,7 +174,7 @@ def compare( ) return - if isinstance(pytorch_result, (bool, np.number, int, str, type(None))): + if isinstance(pytorch_result, (bool, np.number, int, float, str, type(None))): assert type(paddle_result) == type( pytorch_result ), "paddle result's type [{}] should be the same with pytorch's type [{}]".format( diff --git a/tests/test_broadcast_shapes.py b/tests/test_broadcast_shapes.py new file mode 100644 index 000000000..1a7d60b52 --- /dev/null +++ b/tests/test_broadcast_shapes.py @@ -0,0 +1,76 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.broadcast_shapes") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + x = (2,) + y = (3, 1) + result = torch.broadcast_shapes(x, y) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.broadcast_shapes((2,), (3, 1)) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + x = (2,) + y = (3, 1) + z = (1, 1, 1) + result = torch.broadcast_shapes(x, y, z) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.broadcast_shapes((2,), (3, 1), (1, 1, 1)) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + x = (2,) + y = (3, 1) + result = torch.broadcast_shapes(x, y, (1, 1, 2)) + """ + ) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_broadcast_tensors.py b/tests/test_broadcast_tensors.py new file mode 100644 index 000000000..954a720d3 --- /dev/null +++ b/tests/test_broadcast_tensors.py @@ -0,0 +1,78 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.broadcast_tensors") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([[0,1,2]]) + y = torch.tensor([[0],[1]]) + result = torch.broadcast_tensors(x, y) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + y = torch.tensor([[0],[1]]) + result = torch.broadcast_tensors(torch.tensor([[0,1,2]]), y) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([[0,1,2]]) + y = torch.tensor([[0],[1],[2]]) + z = torch.tensor([[20]]) + result = torch.broadcast_tensors(x, y, z) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([[0,1,2]]) + y = torch.tensor([[0],[1]]) + result = torch.broadcast_tensors(x, y, torch.tensor([[20]])) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + tensors = torch.tensor([[0,1,2]]), torch.tensor([[0],[1]]), torch.tensor([[20]]) + result = torch.broadcast_tensors(*tensors) + """ + ) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_can_cast.py b/tests/test_can_cast.py new file mode 100644 index 000000000..1487616c1 --- /dev/null +++ b/tests/test_can_cast.py @@ -0,0 +1,36 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.can_cast") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.tensor([[0,1,2]]) + y = torch.tensor([[0],[1]]) + result = torch.can_cast(x, y) + """ + ) + obj.run( + pytorch_code, + ["result"], + unsupport=True, + reason="paddle not support this API now", + ) diff --git a/tests/test_finfo.py b/tests/test_finfo.py new file mode 100644 index 000000000..63fd9cb81 --- /dev/null +++ b/tests/test_finfo.py @@ -0,0 +1,63 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.finfo", is_aux_api=True) + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + bits = torch.finfo(torch.float16).bits + min = torch.finfo(torch.float16).min + max = torch.finfo(torch.float16).max + """ + ) + obj.run( + pytorch_code, + ["bits", "min", "max"], + check_value=False, + check_stop_gradient=False, + ) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.float32 + bits = torch.finfo(x).bits + min = torch.finfo(x).min + max = torch.finfo(x).max + """ + ) + obj.run(pytorch_code, ["bits", "min", "max"]) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.bfloat16 + bits = torch.finfo(type=x).bits + min = torch.finfo(type=x).min + max = torch.finfo(type=x).max + """ + ) + obj.run(pytorch_code, ["bits", "min", "max"]) diff --git a/tests/test_iinfo.py b/tests/test_iinfo.py new file mode 100644 index 000000000..d580e10af --- /dev/null +++ b/tests/test_iinfo.py @@ -0,0 +1,58 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.iinfo", is_aux_api=True) + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + bits = torch.iinfo(torch.int32).bits + min = torch.iinfo(torch.int32).min + max = torch.iinfo(torch.int32).max + """ + ) + obj.run(pytorch_code, ["bits", "min", "max"]) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.int16 + bits = torch.iinfo(x).bits + min = torch.iinfo(x).min + max = torch.iinfo(x).max + """ + ) + obj.run(pytorch_code, ["bits", "min", "max"]) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + x = torch.uint8 + bits = torch.iinfo(type=x).bits + min = torch.iinfo(type=x).min + max = torch.iinfo(type=x).max + """ + ) + obj.run(pytorch_code, ["bits", "min", "max"]) diff --git a/tests/test_nn_CTCLoss.py b/tests/test_nn_CTCLoss.py new file mode 100644 index 000000000..1ba8d69a9 --- /dev/null +++ b/tests/test_nn_CTCLoss.py @@ -0,0 +1,72 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.nn.CTCLoss") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + log_probs = torch.tensor([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04], + [3.02332580e-01, 1.46755889e-01, 9.23385918e-02]], + [[1.86260208e-01, 3.45560730e-01, 3.96767467e-01], + [5.38816750e-01, 4.19194520e-01, 6.85219526e-01]], + [[2.04452246e-01, 8.78117442e-01, 2.73875929e-02], + [6.70467496e-01, 4.17304814e-01, 5.58689833e-01]], + [[1.40386939e-01, 1.98101491e-01, 8.00744593e-01], + [9.68261600e-01, 3.13424170e-01, 6.92322612e-01]], + [[8.76389146e-01, 8.94606650e-01, 8.50442126e-02], + [3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]], dtype=torch.float32) + labels = torch.tensor([[1, 2, 2], [1, 2, 2]], dtype=torch.int32) + input_lengths = torch.tensor([5, 5], dtype=torch.int64) + label_lengths = torch.tensor([3, 3], dtype=torch.int64) + result = torch.nn.CTCLoss(blank=0, reduction='mean')(log_probs, labels, input_lengths, label_lengths) + """ + ) + obj.run( + pytorch_code, + ["result"], + unsupport=True, + reason="torch log_softmax + ctc_loss is equivalent to paddle ctc_loss", + ) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + T = 50 + C = 20 + N = 16 + S = 30 + S_min = 10 + input = torch.arange(T * N * C).to(dtype=torch.float32).reshape((T, N, C)).log_softmax(0).detach().requires_grad_() + target = torch.randint(low=1, high=C, size=(N, S), dtype=torch.int32) + input_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.int64) + target_lengths = torch.randint(low=S_min, high=S, size=(N,), dtype=torch.int64) + ctc_loss = torch.nn.CTCLoss() + result = ctc_loss(input, target, input_lengths, target_lengths) + """ + ) + obj.run( + pytorch_code, + ["result"], + unsupport=True, + reason="torch log_softmax + ctc_loss is equivalent to paddle ctc_loss", + ) diff --git a/tests/test_nn_GaussianNLLLoss.py b/tests/test_nn_GaussianNLLLoss.py new file mode 100644 index 000000000..65f50197d --- /dev/null +++ b/tests/test_nn_GaussianNLLLoss.py @@ -0,0 +1,94 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.nn.GaussianNLLLoss") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.GaussianNLLLoss() + input = torch.ones([5, 2]).to(dtype=torch.float32) + label = torch.ones([5, 2]).to(dtype=torch.float32) + variance = torch.ones([5, 2]).to(dtype=torch.float32) + result = loss(input, label, variance) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.GaussianNLLLoss(full=False) + input = torch.ones([5, 2]).to(dtype=torch.float32) + label = torch.ones([5, 2]).to(dtype=torch.float32) + variance = torch.ones([5, 2]).to(dtype=torch.float32) + result = loss(input, label, variance) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.GaussianNLLLoss(eps=1e-08, + full=False) + input = torch.full([5, 2], 1).to(dtype=torch.float32) + label = torch.full([5, 2], 2).to(dtype=torch.float32) + variance = torch.ones([5, 2]).to(dtype=torch.float32) + result = loss(input, label, variance) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.GaussianNLLLoss(full=False, + eps=1e-08, + reduction='mean') + input = torch.full([5, 2], 1).to(dtype=torch.float32) + label = torch.full([5, 2], 2).to(dtype=torch.float32) + variance = torch.ones([5, 2]).to(dtype=torch.float32) + result = loss(input, label, variance) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.GaussianNLLLoss(full=False, + eps=1e-08, + reduction='sum') + input = torch.full([5, 2], 1).to(dtype=torch.float32) + label = torch.full([5, 2], 2).to(dtype=torch.float32) + variance = torch.ones([5, 2]).to(dtype=torch.float32) + result = loss(input, label, variance) + """ + ) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_nn_MultiLabelMarginLoss.py b/tests/test_nn_MultiLabelMarginLoss.py new file mode 100644 index 000000000..c16d8ea59 --- /dev/null +++ b/tests/test_nn_MultiLabelMarginLoss.py @@ -0,0 +1,34 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.nn.MultiLabelMarginLoss") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.MultiLabelMarginLoss() + input = torch.tensor([[0.1, 0.2, 0.4, 0.8]]).to(dtype=torch.float32) + label = torch.LongTensor([[3, 0, -1, 1]]) + result = loss(input, label) + """ + ) + obj.run( + pytorch_code, ["result"], unsupport=True, reason="The API is not supported." + ) diff --git a/tests/test_nn_MultiLabelSoftMarginLoss.py b/tests/test_nn_MultiLabelSoftMarginLoss.py new file mode 100644 index 000000000..d2bcb2b76 --- /dev/null +++ b/tests/test_nn_MultiLabelSoftMarginLoss.py @@ -0,0 +1,87 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.nn.MultiLabelSoftMarginLoss") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.MultiLabelSoftMarginLoss() + input = torch.tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]]).to(dtype=torch.float32) + label = torch.tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]]).to(dtype=torch.float32) + result = loss(input, label) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.MultiLabelSoftMarginLoss(reduction='sum') + input = torch.tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]]).to(dtype=torch.float32) + label = torch.tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]]).to(dtype=torch.float32) + result = loss(input, label) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.MultiLabelSoftMarginLoss(reduction='sum', + size_average=None) + input = torch.tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]]).to(dtype=torch.float32) + label = torch.tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]]).to(dtype=torch.float32) + result = loss(input, label) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.MultiLabelSoftMarginLoss(weight=None, + size_average=None, + reduce=None, reduction='mean') + input = torch.tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]]).to(dtype=torch.float32) + label = torch.tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]]).to(dtype=torch.float32) + result = loss(input, label) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.MultiLabelSoftMarginLoss(None, None, None, 'mean') + input = torch.tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]]).to(dtype=torch.float32) + label = torch.tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]]).to(dtype=torch.float32) + result = loss(input, label) + """ + ) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_nn_MultiMarginLoss.py b/tests/test_nn_MultiMarginLoss.py new file mode 100644 index 000000000..ebdc87022 --- /dev/null +++ b/tests/test_nn_MultiMarginLoss.py @@ -0,0 +1,89 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.nn.MultiMarginLoss") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.MultiMarginLoss() + input = torch.tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]]).to(dtype=torch.float32) + label = torch.tensor([0, 1, 2]) + result = loss(input, label) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.MultiMarginLoss(reduction='sum') + input = torch.tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]]).to(dtype=torch.float32) + label = torch.tensor([0, 1, 2]) + result = loss(input, label) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.MultiMarginLoss(reduction='sum', + p=1) + input = torch.tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]]).to(dtype=torch.float32) + label = torch.tensor([0, 1, 2]) + result = loss(input, label) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.MultiMarginLoss(p=1, margin=1.0, + weight=None, size_average=None, + reduce=None, reduction='mean') + input = torch.tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]]).to(dtype=torch.float32) + label = torch.tensor([0, 1, 2]) + result = loss(input, label) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.MultiMarginLoss(1, 1.0, + None, None, + None, 'mean') + input = torch.tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]]).to(dtype=torch.float32) + label = torch.tensor([0, 1, 2]) + result = loss(input, label) + """ + ) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_nn_PoissonNLLLoss.py b/tests/test_nn_PoissonNLLLoss.py new file mode 100644 index 000000000..19328e4e1 --- /dev/null +++ b/tests/test_nn_PoissonNLLLoss.py @@ -0,0 +1,89 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.nn.PoissonNLLLoss") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.PoissonNLLLoss() + input = torch.ones([5, 2]).to(dtype=torch.float32) + label = torch.ones([5, 2]).to(dtype=torch.float32) + result = loss(input, label) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.PoissonNLLLoss(log_input=True, full=False) + input = torch.ones([5, 2]).to(dtype=torch.float32) + label = torch.ones([5, 2]).to(dtype=torch.float32) + result = loss(input, label) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.PoissonNLLLoss(log_input=True, eps=1e-08, + size_average=None, full=False) + input = torch.full([5, 2], 1).to(dtype=torch.float32) + label = torch.full([5, 2], 2).to(dtype=torch.float32) + result = loss(input, label) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.PoissonNLLLoss(log_input=True, full=False, + size_average=None, eps=1e-08, + reduce=None, reduction='mean') + input = torch.full([5, 2], 1).to(dtype=torch.float32) + label = torch.full([5, 2], 2).to(dtype=torch.float32) + result = loss(input, label) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.PoissonNLLLoss(True, False, + None, 1e-08, + None, 'sum') + input = torch.full([5, 2], 1).to(dtype=torch.float32) + label = torch.full([5, 2], 2).to(dtype=torch.float32) + result = loss(input, label) + """ + ) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_nn_SmoothL1Loss.py b/tests/test_nn_SmoothL1Loss.py new file mode 100644 index 000000000..5baf99519 --- /dev/null +++ b/tests/test_nn_SmoothL1Loss.py @@ -0,0 +1,101 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.nn.SmoothL1Loss") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.SmoothL1Loss() + input = torch.ones([3, 3]).to(dtype=torch.float32) + label = torch.full([3, 3], 2).to(dtype=torch.float32) + result = loss(input, label) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.SmoothL1Loss(reduction='sum') + input = torch.ones([3, 3]).to(dtype=torch.float32) + label = torch.full([3, 3], 2).to(dtype=torch.float32) + result = loss(input, label) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.SmoothL1Loss(beta=1.0, reduction='none') + input = torch.ones([3, 3]).to(dtype=torch.float32) + label = torch.full([3, 3], 2).to(dtype=torch.float32) + result = loss(input, label) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.SmoothL1Loss(size_average=None, + reduce=None, reduction='mean', beta=1.0) + input = torch.ones([3, 3]).to(dtype=torch.float32) + label = torch.full([3, 3], 2).to(dtype=torch.float32) + result = loss(input, label) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.SmoothL1Loss(None, + None, 'mean', 1.0) + input = torch.ones([3, 3]).to(dtype=torch.float32) + label = torch.full([3, 3], 2).to(dtype=torch.float32) + result = loss(input, label) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_6(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.SmoothL1Loss(beta=1.5, reduction='none') + input = torch.ones([3, 3]).to(dtype=torch.float32) + label = torch.full([3, 3], 2).to(dtype=torch.float32) + result = loss(input, label) + """ + ) + obj.run( + pytorch_code, ["result"], unsupport=True, reason="beta !=1.0 is not supported." + ) diff --git a/tests/test_nn_Transformer.py b/tests/test_nn_Transformer.py new file mode 100644 index 000000000..fab88af7e --- /dev/null +++ b/tests/test_nn_Transformer.py @@ -0,0 +1,150 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.nn.Transformer") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + transformer_model = torch.nn.Transformer() + src = torch.rand((10, 32, 512)) + tgt = torch.rand((10, 32, 512)) + result = transformer_model(src, tgt) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + transformer_model = torch.nn.Transformer(d_model=512, + nhead=8, num_encoder_layers=6, + num_decoder_layers=6, dim_feedforward=2048, + dropout=0.1, activation='relu' + ) + src = torch.rand((10, 32, 512)) + tgt = torch.rand((10, 32, 512)) + result = transformer_model(src, tgt) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + transformer_model = torch.nn.Transformer(d_model=512, + num_decoder_layers=6, dim_feedforward=2048, + dropout=0.1, activation='relu', + custom_encoder=None, custom_decoder=None, + nhead=8, num_encoder_layers=6) + src = torch.rand((10, 32, 512)) + tgt = torch.rand((10, 32, 512)) + result = transformer_model(src, tgt) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + transformer_model = torch.nn.Transformer(d_model=512, + nhead=8, num_encoder_layers=6, + num_decoder_layers=6, dim_feedforward=2048, + dropout=0.1, activation='relu', + custom_encoder=None, custom_decoder=None, + norm_first=False, device=None, dtype=None) + src = torch.rand((10, 32, 512)) + tgt = torch.rand((10, 32, 512)) + result = transformer_model(src, tgt) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + transformer_model = torch.nn.Transformer(512, + 8, 6, + 6, 2048, + 0.1, 'relu', + None, None) + src = torch.rand((10, 32, 512)) + tgt = torch.rand((10, 32, 512)) + result = transformer_model(src, tgt) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_6(): + pytorch_code = textwrap.dedent( + """ + import torch + transformer_model = torch.nn.Transformer(512, + 8, 6, + 6, 2048, + 0.1, 'relu', + None, None, + layer_norm_eps=1e-05) + src = torch.rand((10, 32, 512)) + tgt = torch.rand((10, 32, 512)) + result = transformer_model(src, tgt) + """ + ) + obj.run( + pytorch_code, + ["result"], + check_value=False, + unsupport=True, + reason="paddle unsupport layer_norm_eps args", + ) + + +def test_case_7(): + pytorch_code = textwrap.dedent( + """ + import torch + transformer_model = torch.nn.Transformer(512, + 8, 6, + 6, 2048, + 0.1, 'relu', + None, None, + batch_first=False + ) + src = torch.rand((10, 32, 512)) + tgt = torch.rand((10, 32, 512)) + result = transformer_model(src, tgt) + """ + ) + obj.run( + pytorch_code, + ["result"], + check_value=False, + unsupport=True, + reason="paddle unsupport batch_first args", + ) diff --git a/tests/test_nn_TransformerEncoder.py b/tests/test_nn_TransformerEncoder.py new file mode 100644 index 000000000..ef903150b --- /dev/null +++ b/tests/test_nn_TransformerEncoder.py @@ -0,0 +1,117 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.nn.TransformerEncoder") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn as nn + layer = nn.TransformerEncoderLayer(d_model=512, nhead=8) + transformer = nn.TransformerEncoder(layer, num_layers=6) + tgt = torch.rand(20, 32, 512) + result = transformer(tgt) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn as nn + layer = nn.TransformerEncoderLayer(d_model=512, nhead=8) + transformer = nn.TransformerEncoder(layer, 6) + tgt = torch.rand(20, 32, 512) + result = transformer(tgt) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn as nn + layer = nn.TransformerEncoderLayer(d_model=512, nhead=8) + transformer = nn.TransformerEncoder(encoder_layer=layer, num_layers=6) + tgt = torch.rand(20, 32, 512) + result = transformer(tgt) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn as nn + layer = nn.TransformerEncoderLayer(d_model=512, nhead=8) + transformer = nn.TransformerEncoder(encoder_layer=layer, num_layers=6, norm=None) + tgt = torch.rand(20, 32, 512) + result = transformer(tgt) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn as nn + layer = nn.TransformerEncoderLayer(d_model=512, nhead=8) + transformer = nn.TransformerEncoder(layer, 6, None) + tgt = torch.rand(20, 32, 512) + result = transformer(tgt) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_6(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn as nn + layer = nn.TransformerEncoderLayer(d_model=512, nhead=8) + transformer = nn.TransformerEncoder(layer, 6, None, enable_nested_tensor=True) + tgt = torch.rand(20, 32, 512) + result = transformer(tgt) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_7(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn as nn + layer = nn.TransformerEncoderLayer(d_model=512, nhead=8) + transformer = nn.TransformerEncoder(layer, 6, None, mask_check=True) + tgt = torch.rand(20, 32, 512) + result = transformer(tgt) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_nn_TransformerEncoderLayer.py b/tests/test_nn_TransformerEncoderLayer.py new file mode 100644 index 000000000..561e94d9b --- /dev/null +++ b/tests/test_nn_TransformerEncoderLayer.py @@ -0,0 +1,130 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.nn.TransformerEncoderLayer") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn as nn + tgt = torch.ones(10, 32, 512) + model = nn.TransformerEncoderLayer(d_model=512, nhead=8) + result = model(tgt) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn as nn + tgt = torch.ones(10, 32, 512) + model = nn.TransformerEncoderLayer(d_model=512, nhead=8,norm_first=True) + result = model(tgt) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn as nn + tgt = torch.ones(10, 32, 512) + model = nn.TransformerEncoderLayer(d_model=512, nhead=8,dtype=torch.float32) + result = model(tgt) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn as nn + tgt = torch.ones(10, 32, 512) + model = nn.TransformerEncoderLayer(d_model=512, nhead=8,batch_first=True) + result = model(tgt) + """ + ) + obj.run( + pytorch_code, + ["result"], + unsupport=True, + reason="paddle unsupport batch_first args", + ) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn as nn + tgt = torch.ones(10, 32, 512) + model = nn.TransformerEncoderLayer(d_model=512, nhead=8,layer_norm_eps=1e-05) + result = model(tgt) + """ + ) + obj.run( + pytorch_code, + ["result"], + unsupport=True, + reason="paddle unsupport layer_norm_eps args", + ) + + +def test_case_6(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn as nn + tgt = torch.ones(10, 32, 512) + model = nn.TransformerEncoderLayer(d_model=512, nhead=8, + dim_feedforward=2048, + dropout=0.1, activation='relu', + norm_first=False, + device=None, + dtype=torch.float32) + result = model(tgt) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_7(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn as nn + tgt = torch.ones(10, 32, 512) + model = nn.TransformerEncoderLayer(512, 8, + 2048, + 0.1, 'relu', + norm_first=False, + device=None, + dtype=torch.float32) + result = model(tgt) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_nn_functional_ctc_loss.py b/tests/test_nn_functional_ctc_loss.py new file mode 100644 index 000000000..1e4fc63d0 --- /dev/null +++ b/tests/test_nn_functional_ctc_loss.py @@ -0,0 +1,51 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.nn.functional.ctc_loss") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + log_probs = torch.tensor([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04], + [3.02332580e-01, 1.46755889e-01, 9.23385918e-02]], + [[1.86260208e-01, 3.45560730e-01, 3.96767467e-01], + [5.38816750e-01, 4.19194520e-01, 6.85219526e-01]], + [[2.04452246e-01, 8.78117442e-01, 2.73875929e-02], + [6.70467496e-01, 4.17304814e-01, 5.58689833e-01]], + [[1.40386939e-01, 1.98101491e-01, 8.00744593e-01], + [9.68261600e-01, 3.13424170e-01, 6.92322612e-01]], + [[8.76389146e-01, 8.94606650e-01, 8.50442126e-02], + [3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]], dtype=torch.float32) + labels = torch.tensor([[1, 2, 2], [1, 2, 2]], dtype=torch.int32) + input_lengths = torch.tensor([5, 5], dtype=torch.int64) + label_lengths = torch.tensor([3, 3], dtype=torch.int64) + result = torch.nn.functional.ctc_loss(log_probs, labels, + input_lengths, + label_lengths, + blank=0, + reduction='mean') + """ + ) + obj.run( + pytorch_code, + ["result"], + unsupport=True, + reason="pytorch and paddle return different results", + ) diff --git a/tests/test_nn_functional_multilabel_margin_loss.py b/tests/test_nn_functional_multilabel_margin_loss.py new file mode 100644 index 000000000..9f7e6d456 --- /dev/null +++ b/tests/test_nn_functional_multilabel_margin_loss.py @@ -0,0 +1,34 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.nn.functional.multilabel_margin_loss") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + loss = torch.nn.functional.multilabel_margin_loss + input = torch.tensor([[0.1, 0.2, 0.4, 0.8]]).to(dtype=torch.float32) + label = torch.LongTensor([[3, 0, -1, 1]]) + result = loss(input, label) + """ + ) + obj.run( + pytorch_code, ["result"], unsupport=True, reason="The API is not supported." + ) diff --git a/tests/test_nn_functional_threshold.py b/tests/test_nn_functional_threshold.py new file mode 100644 index 000000000..fc794cd4e --- /dev/null +++ b/tests/test_nn_functional_threshold.py @@ -0,0 +1,109 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.nn.functional.threshold") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn as nn + x = torch.tensor([[[-1.3020, -0.1005, 0.5766, 0.6351, -0.8893, 0.0253, -0.1756, 1.2913], + [-0.8833, -0.1369, -0.0168, -0.5409, -0.1511, -0.1240, -1.1870, -1.8816]]]) + result = nn.functional.threshold(x, 0.5, 0.0) + """ + ) + obj.run( + pytorch_code, + ["result"], + unsupport=True, + reason="paddle does not support value != 0.0 and value is mandatory in torch", + ) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn as nn + x = torch.tensor([[[-1.3020, -0.1005, 0.5766, 0.6351, -0.8893, 0.0253, -0.1756, 1.2913], + [-0.8833, -0.1369, -0.0168, -0.5409, -0.1511, -0.1240, -1.1870, -1.8816]]]) + result = nn.functional.threshold(x, threshold=0.5, value=0.0) + """ + ) + obj.run( + pytorch_code, + ["result"], + unsupport=True, + reason="paddle does not support value != 0.0 and value is mandatory in torch", + ) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn as nn + x = torch.tensor([[[-1.3020, -0.1005, 0.5766, 0.6351, -0.8893, 0.0253, -0.1756, 1.2913], + [-0.8833, -0.1369, -0.0168, -0.5409, -0.1511, -0.1240, -1.1870, -1.8816]]]) + result = nn.functional.threshold(x, value=0.0, threshold=0.5) + """ + ) + obj.run( + pytorch_code, + ["result"], + unsupport=True, + reason="paddle does not support value != 0.0 and value is mandatory in torch", + ) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn as nn + x = torch.tensor([[[-1.3020, -0.1005, 0.5766, 0.6351, -0.8893, 0.0253, -0.1756, 1.2913], + [-0.8833, -0.1369, -0.0168, -0.5409, -0.1511, -0.1240, -1.1870, -1.8816]]]) + result = nn.functional.threshold(x, 0.5, 0.0, False) + """ + ) + obj.run( + pytorch_code, + ["result"], + unsupport=True, + reason="paddle does not support value != 0.0 and value is mandatory in torch", + ) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn as nn + x = torch.tensor([[[-1.3020, -0.1005, 0.5766, 0.6351, -0.8893, 0.0253, -0.1756, 1.2913], + [-0.8833, -0.1369, -0.0168, -0.5409, -0.1511, -0.1240, -1.1870, -1.8816]]]) + result = nn.functional.threshold(x, threshold=0.5, value=0.1, inplace=False) + """ + ) + obj.run( + pytorch_code, + ["result"], + unsupport=True, + reason="paddle does not support value != 0.0 and value is mandatory in torch", + ) diff --git a/tests/test_nn_functional_triplet_margin_loss.py b/tests/test_nn_functional_triplet_margin_loss.py new file mode 100644 index 000000000..70ecabc8f --- /dev/null +++ b/tests/test_nn_functional_triplet_margin_loss.py @@ -0,0 +1,100 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.nn.functional.triplet_margin_loss") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn.functional as F + input = torch.tensor([[1, 5, 3], [0, 3, 2], [1, 4, 1]]).to(dtype=torch.float32) + positive = torch.tensor([[5, 1, 2], [3, 2, 1], [3, -1, 1]]).to(dtype=torch.float32) + negative = torch.tensor([[2, 1, -3], [1, 1, -1], [4, -2, 1]]).to(dtype=torch.float32) + result = F.triplet_margin_loss(input, positive, negative, margin=1.0, reduction='none') + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn.functional as F + input = torch.tensor([[1, 5, 3], [0, 3, 2], [1, 4, 1]]).to(dtype=torch.float32) + positive = torch.tensor([[5, 1, 2], [3, 2, 1], [3, -1, 1]]).to(dtype=torch.float32) + negative = torch.tensor([[2, 1, -3], [1, 1, -1], [4, -2, 1]]).to(dtype=torch.float32) + result = F.triplet_margin_loss(input, positive, negative, 1.0, 2) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn.functional as F + input = torch.tensor([[1, 5, 3], [0, 3, 2], [1, 4, 1]]).to(dtype=torch.float32) + positive = torch.tensor([[5, 1, 2], [3, 2, 1], [3, -1, 1]]).to(dtype=torch.float32) + negative = torch.tensor([[2, 1, -3], [1, 1, -1], [4, -2, 1]]).to(dtype=torch.float32) + result = F.triplet_margin_loss(margin=1.0, reduction='none', + anchor=input, positive=positive, negative=negative) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn.functional as F + input = torch.tensor([[1, 5, 3], [0, 3, 2], [1, 4, 1]]).to(dtype=torch.float32) + positive = torch.tensor([[5, 1, 2], [3, 2, 1], [3, -1, 1]]).to(dtype=torch.float32) + negative = torch.tensor([[2, 1, -3], [1, 1, -1], [4, -2, 1]]).to(dtype=torch.float32) + result = F.triplet_margin_loss(anchor=input, + positive = positive, + negative = negative, + margin=1.0, p=2, eps=1e-06, + swap=False, size_average=None, + reduce=None, reduction='mean') + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + import torch.nn.functional as F + input = torch.tensor([[1, 5, 3], [0, 3, 2], [1, 4, 1]]).to(dtype=torch.float32) + positive = torch.tensor([[5, 1, 2], [3, 2, 1], [3, -1, 1]]).to(dtype=torch.float32) + negative = torch.tensor([[2, 1, -3], [1, 1, -1], [4, -2, 1]]).to(dtype=torch.float32) + result = F.triplet_margin_loss(input, + positive, + negative, + 1.0, 2, 1e-06, + False, None, + None, 'mean') + """ + ) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_utils_data_DataLoader.py b/tests/test_utils_data_DataLoader.py new file mode 100644 index 000000000..0a1829a60 --- /dev/null +++ b/tests/test_utils_data_DataLoader.py @@ -0,0 +1,181 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +import paddle +from apibase import APIBase + + +class DataLoaderAPIBase(APIBase): + def compare( + self, + name, + pytorch_result, + paddle_result, + check_value=True, + check_dtype=True, + check_stop_gradient=True, + rtol=1.0e-6, + atol=0.0, + ): + assert isinstance(paddle_result, paddle.io.DataLoader) + + +obj = DataLoaderAPIBase("torch.utils.data.DataLoader") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + from torch.utils.data import Dataset + import torch + class Data(Dataset): + def __init__(self): + self.x = [1,2,3,4] + + def __getitem__(self, idx): + return self.x[idx] + + def __len__(self): + return len(self.x) + + + data = Data() + result = torch.utils.data.DataLoader(data) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + from torch.utils.data import Dataset + import torch + class Data(Dataset): + def __init__(self): + self.x = [1,2,3,4] + + def __getitem__(self, idx): + return self.x[idx] + + def __len__(self): + return len(self.x) + + + data = Data() + result = torch.utils.data.DataLoader(dataset=data) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + from torch.utils.data import Dataset + import torch + class Data(Dataset): + def __init__(self): + self.x = [1,2,3,4] + + def __getitem__(self, idx): + return self.x[idx] + + def __len__(self): + return len(self.x) + + + data = Data() + result = torch.utils.data.DataLoader(dataset=data, + shuffle=False, + batch_sampler=None, num_workers=0, batch_size=1) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + from torch.utils.data import Dataset + import torch + class Data(Dataset): + def __init__(self): + self.x = [1,2,3,4] + + def __getitem__(self, idx): + return self.x[idx] + + def __len__(self): + return len(self.x) + + + data = Data() + result = torch.utils.data.DataLoader(dataset=data, + batch_size=1, shuffle=False, sampler=None, + batch_sampler=None, num_workers=0, collate_fn=None, + pin_memory=False, drop_last=False, timeout=0, + worker_init_fn=None, + multiprocessing_context=None, + generator=None, + prefetch_factor=None, + persistent_workers=False, + pin_memory_device='') + """ + ) + obj.run( + pytorch_code, + ["result"], + unsupport=True, + reason="The parameter sampler not support.", + ) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + from torch.utils.data import Dataset + import torch + class Data(Dataset): + def __init__(self): + self.x = [1,2,3,4] + + def __getitem__(self, idx): + return self.x[idx] + + def __len__(self): + return len(self.x) + + + data = Data() + result = torch.utils.data.DataLoader(data, + 1, False, None, + None, 0, None, + False, False, 0, + None, + None, + None, + prefetch_factor=None, + persistent_workers=False, + pin_memory_device='') + """ + ) + obj.run( + pytorch_code, + ["result"], + unsupport=True, + reason="The parameter sampler not support.", + )