From b4f9c72cfb0940e0461d4c40b0ef4c21b4c9c791 Mon Sep 17 00:00:00 2001 From: xinhe Date: Tue, 30 May 2023 16:11:07 +0800 Subject: [PATCH] fix bug in ipex fallback (#918) Signed-off-by: Xin He --- neural_compressor/adaptor/pytorch_ipex.yaml | 18 +++--- .../strategy/utils/tuning_sampler.py | 2 +- .../strategy/utils/tuning_space.py | 9 ++- test/strategy/test_tuning_space_v2.py | 60 ++++++++++++++++++- 4 files changed, 74 insertions(+), 15 deletions(-) diff --git a/neural_compressor/adaptor/pytorch_ipex.yaml b/neural_compressor/adaptor/pytorch_ipex.yaml index ba76d506500..640609ac296 100644 --- a/neural_compressor/adaptor/pytorch_ipex.yaml +++ b/neural_compressor/adaptor/pytorch_ipex.yaml @@ -22,7 +22,7 @@ fp32: ['*'] # '*' means all op types int8: &1_12_capabilities { 'static': &cap_1_12_s8 { - 'conv2d': &cap_s8_1_12_Conv2d { + 'Conv2d': &cap_s8_1_12_Conv2d { 'weight': { 'dtype': ['int8'], 'scheme': ['sym'], @@ -36,9 +36,9 @@ 'algorithm': ['minmax', 'kl'] }, }, - 'conv1d': *cap_s8_1_12_Conv2d, - 'conv3d': *cap_s8_1_12_Conv2d, - 'linear': *cap_s8_1_12_Conv2d, + 'Conv1d': *cap_s8_1_12_Conv2d, + 'Conv3d': *cap_s8_1_12_Conv2d, + 'Linear': *cap_s8_1_12_Conv2d, 'default': { 'weight': { 'dtype': ['int8'], @@ -67,7 +67,7 @@ fp32: ['*'] # '*' means all op types int8: &1_10_capabilities { 'static': &cap_1_10_s8 { - 'conv2d': &cap_s8_1_10_Conv2d { + 'Conv2d': &cap_s8_1_10_Conv2d { 'weight': { 'dtype': ['int8'], 'scheme': ['sym'], @@ -81,9 +81,9 @@ 'algorithm': ['minmax'] }, }, - 'conv1d': *cap_s8_1_10_Conv2d, - 'conv3d': *cap_s8_1_10_Conv2d, - 'linear': *cap_s8_1_10_Conv2d, + 'Conv1d': *cap_s8_1_10_Conv2d, + 'Conv3d': *cap_s8_1_10_Conv2d, + 'Linear': *cap_s8_1_10_Conv2d, 'default': &cap_s8_1_10_default { 'weight': { 'dtype': ['int8'], @@ -98,8 +98,6 @@ 'algorithm': ['minmax'] } }, - 'add': *cap_s8_1_10_default, - 'matmul': *cap_s8_1_10_default, }, 'dynamic': {}, 'quant_aware': {} diff --git a/neural_compressor/strategy/utils/tuning_sampler.py b/neural_compressor/strategy/utils/tuning_sampler.py index 4380aafda34..7bf241b5343 100644 --- a/neural_compressor/strategy/utils/tuning_sampler.py +++ b/neural_compressor/strategy/utils/tuning_sampler.py @@ -428,7 +428,7 @@ def __iter__(self): if self.accumulate and skip_first: # skip the first one skip_first = False continue - logger.debug(f"fallback {op_name_type} to {target_dtype}") + logger.info(f"fallback {op_name_type} to {target_dtype}") yield new_tune_cfg # need to skip the first one class LowerBitsSampler(TuningSampler): diff --git a/neural_compressor/strategy/utils/tuning_space.py b/neural_compressor/strategy/utils/tuning_space.py index 1b80f7cb8fe..bf322ee80b7 100644 --- a/neural_compressor/strategy/utils/tuning_space.py +++ b/neural_compressor/strategy/utils/tuning_space.py @@ -268,9 +268,14 @@ def _merge_model_wise_cfg(self, cap: Dict, model_wise_usr_cfg: Dict, fw_cap: Dic def _merge_op_wise_cfg(self, cap: Dict, op_wise_usr_cfg: Dict, fw_cap: Dict): op_name_types = {key[0]: key for key in cap['op'].keys()} for op_name_pattern, op_user_cfg in op_wise_usr_cfg.items(): - op_name_pattern = re.compile(op_name_pattern) + if isinstance(op_name_pattern, str): + op_name_pattern = re.compile(op_name_pattern) + str_flag=True + else: + str_flag=False for op_name in op_name_types: - if op_name_pattern.fullmatch(op_name): + if str_flag and op_name_pattern.fullmatch(str(op_name)) \ + or op_name_pattern == op_name: op_name_type = op_name_types[op_name] cap['op'][op_name_type] = self._merge_op_cfg(cap['op'][op_name_type], op_user_cfg, diff --git a/test/strategy/test_tuning_space_v2.py b/test/strategy/test_tuning_space_v2.py index 19edb7ef892..6cd4b86dc10 100644 --- a/test/strategy/test_tuning_space_v2.py +++ b/test/strategy/test_tuning_space_v2.py @@ -166,6 +166,51 @@ } }, ], + # op4 have tuple name as IPEX + (('op_name4', 0), 'op_type4'): [ + { + 'activation': + { + 'dtype': ['int8'], + 'quant_mode': 'static', + 'scheme': ['sym'], + 'granularity': ['per_channel', 'per_tensor'], + 'algorithm': ['minmax', 'kl'] + }, + 'weight': + { + 'dtype': ['int8'], + 'scheme': ['sym'], + 'granularity': ['per_channel', 'per_tensor'] + } + }, + { + 'activation': + { + 'dtype': ['int8'], + 'quant_mode': 'dynamic', + 'scheme': ['sym'], + 'granularity': ['per_channel', 'per_tensor'], + 'algorithm': ['minmax', 'kl'] + }, + 'weight': + { + 'dtype': ['int8'], + 'scheme': ['sym'], + 'granularity': ['per_channel', 'per_tensor'] + } + }, + { + 'activation': + { + 'dtype': 'fp32' + }, + 'weight': + { + 'dtype': 'fp32' + } + }, + ], # op5, weight only ('op_name5', 'op_type5'): [ @@ -227,6 +272,14 @@ def setUp(self) -> None: 'dtype': ['fp32'] } }, + ('op_name4', 0): { + 'activation': { + 'dtype': ['fp32'] + }, + 'weight': { + 'dtype': ['fp32'] + } + }, } @@ -311,19 +364,22 @@ def initial_op_quant_mode(items_lst, target_quant_mode, op_item_dtype_dict): def test_tuning_space_merge_op_wise(self): # op-wise conf = { - 'op_name_dict': self.op_wise_user_cfg_for_fallback, - + 'op_name_dict': self.op_wise_user_cfg_for_fallback, } conf = DotDict(conf) # test fallback tuning_space2 = TuningSpace(deepcopy(self.capability), deepcopy(conf)) logger.debug(tuning_space2.root_item.get_details()) op_name1_only_fp32 = True + op_name4_only_fp32 = True for quant_mode in ['static', 'dynamic']: for item in tuning_space2.query_items_by_quant_mode(quant_mode): if item.name[0] == 'op_name1': op_name1_only_fp32 = False + if item.name[0] == ('op_name4', 0): + op_name4_only_fp32 = False self.assertTrue(op_name1_only_fp32) + self.assertTrue(op_name4_only_fp32)