Skip to content

Commit

Permalink
fix(strategy): rename optimize_level to quant_level. (#313)
Browse files Browse the repository at this point in the history
Signed-off-by: intel-zhangyi <[email protected]>
  • Loading branch information
intel-zhangyi authored Dec 20, 2022
1 parent 4f7b6de commit 3e9c291
Show file tree
Hide file tree
Showing 6 changed files with 26 additions and 26 deletions.
10 changes: 5 additions & 5 deletions neural_compressor/conf/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -806,7 +806,7 @@ def percent_to_float(data):
'pre_post_process_quantization': True},
'model_wise': {'weight': {'bit': [7.0]},
'activation': {}},
'optimization_level': 1,
'quant_level': 1,
}): {
Optional('approach', default='post_training_static_quant'): And(
str,
Expand Down Expand Up @@ -900,10 +900,10 @@ def percent_to_float(data):
Optional('op_wise', default=None): {
str: ops_schema
},
Optional('optimization_level', default=1): And(int, lambda level: level in [0, 1]),
Optional('quant_level', default=1): And(int, lambda level: level in [0, 1]),
},
Optional('use_bf16', default=True): bool,
Optional('optimization_level', default=1): And(int, lambda level: level in [0, 1]),
Optional('quant_level', default=1): And(int, lambda level: level in [0, 1]),
Optional('graph_optimization'): graph_optimization_schema,
Optional('mixed_precision'): mixed_precision_schema,

Expand Down Expand Up @@ -1178,7 +1178,7 @@ def percent_to_float(data):
'activation': {}},
}): dict,
Optional('use_bf16', default=False): bool,
Optional('optimization_level', default=1): int,
Optional('quant_level', default=1): int,
Optional('tuning', default={
'strategy': {'name': 'basic'},
'accuracy_criterion': {'relative': 0.01, 'higher_is_better': True},
Expand Down Expand Up @@ -1415,7 +1415,7 @@ def map_pyconfig_to_cfg(self, pythonic_config):
'tuning.exit_policy.max_trials': pythonic_config.quantization.max_trials,
'tuning.exit_policy.performance_only': pythonic_config.quantization.performance_only,
'use_bf16': pythonic_config.quantization.use_bf16,
'quantization.optimization_level': pythonic_config.quantization.optimization_level,
'quantization.quant_level': pythonic_config.quantization.quant_level,
'reduce_range': pythonic_config.quantization.reduce_range
})
if pythonic_config.quantization.strategy_kwargs:
Expand Down
4 changes: 2 additions & 2 deletions neural_compressor/conf/pythonic_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def __init__(self,
performance_only=False,
reduce_range=None,
use_bf16=True,
optimization_level=1,
quant_level=1,
accuracy_criterion=accuracy_criterion):
excluded_precisions = ["bf16"] if not use_bf16 else []
super().__init__(
Expand All @@ -61,7 +61,7 @@ def __init__(self,
reduce_range=reduce_range,
excluded_precisions=excluded_precisions,
accuracy_criterion=accuracy_criterion,
optimization_level=optimization_level
quant_level=quant_level
)
self._approach = approach

Expand Down
22 changes: 11 additions & 11 deletions neural_compressor/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,7 @@ def __init__(self,
performance_only=False,
reduce_range=None,
excluded_precisions=[],
optimization_level=1,
quant_level=1,
accuracy_criterion=accuracy_criterion):
self.inputs = inputs
self.outputs = outputs
Expand All @@ -337,7 +337,7 @@ def __init__(self,
self.use_bf16 = "bf16" not in self.excluded_precisions
self.accuracy_criterion = accuracy_criterion
self.calibration_sampling_size = calibration_sampling_size
self.optimization_level = optimization_level
self.quant_level = quant_level

@property
def accuracy_criterion(self):
Expand All @@ -359,12 +359,12 @@ def excluded_precisions(self, excluded_precisions):
self._use_bf16 = "bf16" not in excluded_precisions

@property
def optimization_level(self):
return self._optimization_level
def quant_level(self):
return self._quant_level

@optimization_level.setter
def optimization_level(self, optimization_level):
self._optimization_level = optimization_level
@quant_level.setter
def quant_level(self, quant_level):
self._quant_level = quant_level

@property
def reduce_range(self):
Expand Down Expand Up @@ -591,7 +591,7 @@ def __init__(self,
op_name_list=None,
reduce_range=None,
excluded_precisions=[],
optimization_level=1,
quant_level=1,
tuning_criterion=tuning_criterion,
accuracy_criterion=accuracy_criterion,
):
Expand All @@ -611,7 +611,7 @@ def __init__(self,
max_trials=tuning_criterion.max_trials,
reduce_range=reduce_range,
excluded_precisions=excluded_precisions,
optimization_level=optimization_level,
quant_level=quant_level,
accuracy_criterion=accuracy_criterion)
self.approach = approach

Expand Down Expand Up @@ -644,7 +644,7 @@ def __init__(self,
op_name_list=None,
reduce_range=None,
excluded_precisions=[],
optimization_level=1):
quant_level=1):
super().__init__(inputs=inputs,
outputs=outputs,
device=device,
Expand All @@ -653,7 +653,7 @@ def __init__(self,
op_name_list=op_name_list,
reduce_range=reduce_range,
excluded_precisions=excluded_precisions,
optimization_level=optimization_level)
quant_level=quant_level)
self._approach = 'quant_aware_training'

@property
Expand Down
2 changes: 1 addition & 1 deletion neural_compressor/experimental/quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ def pre_process(self):
self._create_eval_dataloader(cfg)
self._create_calib_dataloader(cfg)
strategy = cfg.tuning.strategy.name.lower()
if cfg.quantization.optimization_level == 0:
if cfg.quantization.quant_level == 0:
strategy = "conservative"
logger.info(f"On the premise that the accuracy meets the conditions, improve the performance.")

Expand Down
6 changes: 3 additions & 3 deletions test/strategy/test_mse_v2_2.x.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def fake_eval_func(_):

conf = PostTrainingQuantConfig(
approach="static",
optimization_level=1,
quant_level=1,
tuning_criterion=TuningCriterion(strategy="mse_v2"))

q_model = fit(
Expand All @@ -96,7 +96,7 @@ def fake_eval_func(_):

conf = PostTrainingQuantConfig(
approach="static",
optimization_level=1,
quant_level=1,
tuning_criterion=TuningCriterion(
strategy="mse_v2",
strategy_kwargs={
Expand Down Expand Up @@ -126,7 +126,7 @@ def fake_eval_func(model):

conf = PostTrainingQuantConfig(
approach="static",
optimization_level=1,
quant_level=1,
tuning_criterion=TuningCriterion(strategy="mse_v2"))

q_model = fit(
Expand Down
8 changes: 4 additions & 4 deletions test/strategy/test_optimization_level_2.x.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@ def _fake_eval(model):
dataloader = DATALOADERS["tensorflow"](dataset)

# tuning and accuracy criterion
optimization_level = 0
conf = PostTrainingQuantConfig(optimization_level=0)
quant_level = 0
conf = PostTrainingQuantConfig(quant_level=0)

# fit
q_model = fit(model=self.constant_graph,
Expand Down Expand Up @@ -135,8 +135,8 @@ def _fake_eval(model):
dataloader = DATALOADERS["pytorch"](dataset)

# tuning and accuracy criterion
optimization_level = 0
conf = PostTrainingQuantConfig(optimization_level=optimization_level)
quant_level = 0
conf = PostTrainingQuantConfig(quant_level=quant_level)

# fit
q_model = fit(model=resnet18,
Expand Down

0 comments on commit 3e9c291

Please sign in to comment.