Skip to content

Commit

Permalink
【Hackathon 5th No.43】API转换21-41 (#320)
Browse files Browse the repository at this point in the history
* Add tests

* Fix

* Fix

* Fix

* Fix

* Fix
  • Loading branch information
co63oc authored Nov 6, 2023
1 parent 2dd61d9 commit 204aadd
Show file tree
Hide file tree
Showing 23 changed files with 2,028 additions and 1 deletion.
209 changes: 209 additions & 0 deletions paconvert/api_mapping.json
Original file line number Diff line number Diff line change
Expand Up @@ -4029,6 +4029,14 @@
"Matcher": "GenericMatcher",
"paddle_api": "'bool'"
},
"torch.broadcast_shapes": {
"Matcher": "BroadcastShapesMatcher",
"paddle_api": "paddle.broadcast_shape"
},
"torch.broadcast_tensors": {
"Matcher": "BroadcastTensorsMatcher",
"paddle_api": "paddle.broadcast_tensors"
},
"torch.broadcast_to": {
"Matcher": "GenericMatcher",
"paddle_api": "paddle.broadcast_to",
Expand Down Expand Up @@ -5928,6 +5936,13 @@
"dim": "axes"
}
},
"torch.finfo": {
"Matcher": "IInfoMatcher",
"paddle_api": "paddle.finfo",
"args_list": [
"type"
]
},
"torch.fix": {
"Matcher": "GenericMatcher",
"paddle_api": "paddle.trunc",
Expand Down Expand Up @@ -6350,6 +6365,13 @@
"out"
]
},
"torch.iinfo": {
"Matcher": "IInfoMatcher",
"paddle_api": "paddle.iinfo",
"args_list": [
"type"
]
},
"torch.imag": {
"Matcher": "GenericMatcher",
"paddle_api": "paddle.imag",
Expand Down Expand Up @@ -8233,6 +8255,18 @@
"dtype": ""
}
},
"torch.nn.GaussianNLLLoss": {
"Matcher": "GenericMatcher",
"paddle_api": "paddle.nn.GaussianNLLLoss",
"args_list": [
"full",
"eps",
"reduction"
],
"kwargs_change": {
"eps": "epsilon"
}
},
"torch.nn.GroupNorm": {
"Matcher": "GenericMatcher",
"paddle_api": "paddle.nn.GroupNorm",
Expand Down Expand Up @@ -8864,6 +8898,28 @@
"modules": "sublayers"
}
},
"torch.nn.MultiLabelSoftMarginLoss": {
"Matcher": "SizeAverageMatcher",
"paddle_api": "paddle.nn.MultiLabelSoftMarginLoss",
"args_list": [
"weight",
"size_average",
"reduce",
"reduction"
]
},
"torch.nn.MultiMarginLoss": {
"Matcher": "SizeAverageMatcher",
"paddle_api": "paddle.nn.MultiMarginLoss",
"args_list": [
"p",
"margin",
"weight",
"size_average",
"reduce",
"reduction"
]
},
"torch.nn.MultiheadAttention": {
"paddle_api": "paddle.nn.MultiHeadAttention",
"args_list": [
Expand Down Expand Up @@ -8961,6 +9017,21 @@
"downscale_factor"
]
},
"torch.nn.PoissonNLLLoss": {
"Matcher": "SizeAverageMatcher",
"paddle_api": "paddle.nn.PoissonNLLLoss",
"args_list": [
"log_input",
"full",
"size_average",
"eps",
"reduce",
"reduction"
],
"kwargs_change": {
"eps": "epsilon"
}
},
"torch.nn.RNN": {
"Matcher": "RNNMatcher",
"paddle_api": "paddle.nn.SimpleRNN",
Expand Down Expand Up @@ -9138,6 +9209,19 @@
"Matcher": "GenericMatcher",
"paddle_api": "paddle.nn.Sigmoid"
},
"torch.nn.SmoothL1Loss": {
"Matcher": "SmoothL1LossMatcher",
"paddle_api": "paddle.nn.SmoothL1Loss",
"args_list": [
"size_average",
"reduce",
"reduction",
"beta"
],
"kwargs_change": {
"beta": "delta"
}
},
"torch.nn.SoftMarginLoss": {
"Matcher": "SizeAverageMatcher",
"paddle_api": "paddle.nn.SoftMarginLoss",
Expand Down Expand Up @@ -9233,6 +9317,35 @@
"Matcher": "GenericMatcher",
"paddle_api": "paddle.nn.Tanhshrink"
},
"torch.nn.Transformer": {
"Matcher": "GenericMatcher",
"paddle_api": "paddle.nn.Transformer",
"args_list": [
"d_model",
"nhead",
"num_encoder_layers",
"num_decoder_layers",
"dim_feedforward",
"dropout",
"activation",
"custom_encoder",
"custom_decoder",
"layer_norm_eps",
"batch_first",
"norm_first",
"device",
"dtype"
],
"kwargs_change": {
"norm_first": "normalize_before",
"device": "",
"dtype": ""
},
"unsupport_args": [
"layer_norm_eps",
"batch_first"
]
},
"torch.nn.TransformerDecoder": {
"Matcher": "GenericMatcher",
"paddle_api": "paddle.nn.TransformerDecoder",
Expand Down Expand Up @@ -9270,6 +9383,49 @@
"dim_feedforward": 2048
}
},
"torch.nn.TransformerEncoder": {
"Matcher": "GenericMatcher",
"paddle_api": "paddle.nn.TransformerEncoder",
"args_list": [
"encoder_layer",
"num_layers",
"norm",
"enable_nested_tensor",
"mask_check"
],
"kwargs_change": {
"enable_nested_tensor": "",
"mask_check": ""
}
},
"torch.nn.TransformerEncoderLayer": {
"Matcher": "GenericMatcher",
"paddle_api": "paddle.nn.TransformerEncoderLayer",
"args_list": [
"d_model",
"nhead",
"dim_feedforward",
"dropout",
"activation",
"layer_norm_eps",
"batch_first",
"norm_first",
"device",
"dtype"
],
"kwargs_change": {
"norm_first": "normalize_before",
"device": "",
"dtype": ""
},
"unsupport_args": [
"layer_norm_eps",
"batch_first"
],
"paddle_default_kwargs": {
"dim_feedforward": 2048
}
},
"torch.nn.TripletMarginLoss": {
"Matcher": "SizeAverageMatcher",
"paddle_api": "paddle.nn.TripletMarginLoss",
Expand Down Expand Up @@ -10645,6 +10801,26 @@
"input": "x"
}
},
"torch.nn.functional.triplet_margin_loss": {
"Matcher": "SizeAverageMatcher",
"paddle_api": "paddle.nn.functional.triplet_margin_loss",
"args_list": [
"anchor",
"positive",
"negative",
"margin",
"p",
"eps",
"swap",
"size_average",
"reduce",
"reduction"
],
"kwargs_change": {
"anchor": "input",
"eps": "epsilon"
}
},
"torch.nn.functional.triplet_margin_with_distance_loss": {
"Matcher": "GenericMatcher",
"paddle_api": "paddle.nn.functional.triplet_margin_with_distance_loss",
Expand Down Expand Up @@ -13229,6 +13405,39 @@
"datasets"
]
},
"torch.utils.data.DataLoader": {
"Matcher": "GenericMatcher",
"paddle_api": "paddle.io.DataLoader",
"args_list": [
"dataset",
"batch_size",
"shuffle",
"sampler",
"batch_sampler",
"num_workers",
"collate_fn",
"pin_memory",
"drop_last",
"timeout",
"worker_init_fn",
"multiprocessing_context",
"generator",
"prefetch_factor",
"persistent_workers",
"pin_memory_device"
],
"kwargs_change": {
"pin_memory": "",
"multiprocessing_context": "",
"generator": "",
"persistent_workers": "",
"pin_memory_device": ""
},
"unsupport_args": [
"sampler",
"prefetch_factor"
]
},
"torch.utils.data.Dataset": {
"Matcher": "GenericMatcher",
"paddle_api": "paddle.io.Dataset"
Expand Down
69 changes: 69 additions & 0 deletions paconvert/api_matcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -443,6 +443,75 @@ def generate_code(self, kwargs):
return code


class BroadcastTensorsMatcher(BaseMatcher):
def get_paddle_nodes(self, args, kwargs):
if len(args) == 1 and isinstance(args[0], ast.Starred):
star_var = astor.to_source(args[0].value).strip("\n")
code = "{}({})".format(self.get_paddle_api(), star_var)
else:
new_args = self.parse_args(args)
code = "{}([{}])".format(self.get_paddle_api(), ",".join(new_args))
return ast.parse(code).body


class BroadcastShapesMatcher(BaseMatcher):
def get_paddle_nodes(self, args, kwargs):
if len(args) == 1 and isinstance(args[0], ast.Starred):
return None
new_args = self.parse_args(args)
code = new_args[0]
# Call the paddle.broadcast_shape multiple times
for i in range(1, len(new_args)):
code = "{}({}, {})".format(self.get_paddle_api(), code, new_args[i])
return ast.parse(code).body


class IInfoMatcher(BaseMatcher):
def generate_aux_code(self):
CODE_TEMPLATE = textwrap.dedent(
"""
def _STR_2_PADDLE_DTYPE(type):
type_map = {
"int32": paddle.int32,
"uint8": paddle.uint8,
"int8": paddle.int8,
"int16": paddle.int16,
"int32": paddle.int32,
"int64": paddle.int64,
"float16": paddle.float16,
"float32": paddle.float32,
"float64": paddle.float64,
"bfloat16": paddle.bfloat16,
}
return type_map.get(type)
"""
)
return CODE_TEMPLATE

def generate_code(self, kwargs):
self.write_aux_code()
type = kwargs.pop("type")
return "{}(paddle_aux._STR_2_PADDLE_DTYPE({}))".format(
self.get_paddle_api(), type
)


class SmoothL1LossMatcher(BaseMatcher):
def get_paddle_nodes(self, args, kwargs):
kwargs = self.parse_kwargs(kwargs)
beta = kwargs.get("beta", None)
if beta is not None:
beta = beta.replace("(", "").replace(")", "")
try:
beta = float(beta)
if float(beta) != 1.0:
return None
except:
return None
code = SizeAverageMatcher.generate_code(self, kwargs)
return ast.parse(code).body


class SwapAxesMatcher(BaseMatcher):
def generate_code(self, kwargs):
if "input" not in kwargs:
Expand Down
2 changes: 1 addition & 1 deletion tests/apibase.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ def compare(
)
return

if isinstance(pytorch_result, (bool, np.number, int, str, type(None))):
if isinstance(pytorch_result, (bool, np.number, int, float, str, type(None))):
assert type(paddle_result) == type(
pytorch_result
), "paddle result's type [{}] should be the same with pytorch's type [{}]".format(
Expand Down
Loading

0 comments on commit 204aadd

Please sign in to comment.