Skip to content

Commit

Permalink
Set default value for use_bf16 and fixed random seed setting error (#186
Browse files Browse the repository at this point in the history
)

Signed-off-by: Cheng, Penghui <[email protected]>
  • Loading branch information
PenghuiCheng authored Dec 1, 2022
1 parent 79db56f commit 83825af
Show file tree
Hide file tree
Showing 3 changed files with 63 additions and 31 deletions.
6 changes: 5 additions & 1 deletion neural_compressor/conf/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -840,7 +840,7 @@ def percent_to_float(data):
str: ops_schema
},
},
Optional('use_bf16', default=False): bool,
Optional('use_bf16', default=True): bool,
Optional('graph_optimization'): graph_optimization_schema,
Optional('mixed_precision'): mixed_precision_schema,

Expand Down Expand Up @@ -1371,6 +1371,10 @@ def map_pyconfig_to_cfg(self, pythonic_config):
'tuning.tensorboard': pythonic_config.options.tensorboard,
})
if pythonic_config.benchmark is not None:
if pythonic_config.benchmark.inputs != []:
mapping.update({'model.inputs': pythonic_config.benchmark.inputs})
if pythonic_config.benchmark.outputs != []:
mapping.update({'model.outputs': pythonic_config.benchmark.outputs})
mapping.update({
'evaluation.performance.warmup': pythonic_config.benchmark.warmup,
'evaluation.performance.iteration': pythonic_config.benchmark.iteration,
Expand Down
5 changes: 3 additions & 2 deletions neural_compressor/conf/pythonic_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,11 +39,12 @@ def __init__(self,
max_trials=100,
performance_only=False,
reduce_range=None,
use_bf16=False,
use_bf16=True,
accuracy_criterion=accuracy_criterion):
extra_precisions = ["bf16"] if use_bf16 else []
super().__init__(inputs, outputs, backend, device, calibration_sampling_size, op_type_list,
op_name_list, strategy, objective, timeout, max_trials, performance_only,
reduce_range, use_bf16, accuracy_criterion)
reduce_range, extra_precisions, accuracy_criterion)
self._approach = approach

@property
Expand Down
83 changes: 55 additions & 28 deletions neural_compressor/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,15 +136,42 @@ def tensorboard(self, tensorboard):


class BenchmarkConfig:
def __init__(self, warmup=5, iteration=-1, cores_per_instance=None, num_of_instance=None,
inter_num_of_threads=None, intra_num_of_threads=None):
def __init__(self,
inputs=[],
outputs=[],
warmup=5,
iteration=-1,
cores_per_instance=None,
num_of_instance=None,
inter_num_of_threads=None,
intra_num_of_threads=None):
self._inputs = inputs
self._outputs = outputs
self._warmup = warmup
self._iteration = iteration
self._cores_per_instance = cores_per_instance
self._num_of_instance = num_of_instance
self._inter_num_of_threads = inter_num_of_threads
self._intra_num_of_threads = intra_num_of_threads

@property
def outputs(self):
return self._outputs

@outputs.setter
def outputs(self, outputs):
if check_value('outputs', outputs, str):
self._outputs = outputs

@property
def inputs(self):
return self._inputs

@inputs.setter
def inputs(self, inputs):
if check_value('inputs', inputs, str):
self._inputs = inputs

@property
def warmup(self):
return self._warmup
Expand Down Expand Up @@ -285,7 +312,7 @@ def __init__(self,
max_trials=100,
performance_only=False,
reduce_range=None,
extra_precisions=[],
extra_precisions=["bf16"],
accuracy_criterion=accuracy_criterion):
self._inputs = inputs
self._outputs = outputs
Expand Down Expand Up @@ -503,16 +530,16 @@ def strategy(self, strategy):

class PostTrainingQuantConfig(_BaseQuantizationConfig):
def __init__(self,
device='cpu',
device="cpu",
backend="NA",
inputs=[],
outputs=[],
approach='auto',
approach="auto",
calibration_sampling_size=[100],
op_type_list=None,
op_name_list=None,
reduce_range=None,
extra_precisions = [],
extra_precisions = ["bf16"],
tuning_criterion=tuning_criterion,
accuracy_criterion=accuracy_criterion,
):
Expand Down Expand Up @@ -551,7 +578,7 @@ def __init__(self,
op_type_list=None,
op_name_list=None,
reduce_range=None,
extra_precisions=[]):
extra_precisions=["bf16"]):
super().__init__(inputs=inputs, outputs=outputs, device=device, backend=backend,
op_type_list=op_type_list, op_name_list=op_name_list,
reduce_range=reduce_range, extra_precisions=extra_precisions)
Expand Down Expand Up @@ -789,16 +816,16 @@ def dynamic_axes(self, dynamic_axes):


class Torch2ONNXConfig(ExportConfig):
def __init__(
self,
dtype="int8",
opset_version=14,
quant_format="QDQ",
example_inputs=None,
input_names=None,
output_names=None,
dynamic_axes=None,
**kwargs,
def __init__(
self,
dtype="int8",
opset_version=14,
quant_format="QDQ",
example_inputs=None,
input_names=None,
output_names=None,
dynamic_axes=None,
**kwargs,
):
super().__init__(
dtype=dtype,
Expand All @@ -813,16 +840,16 @@ def __init__(


class TF2ONNXConfig(ExportConfig):
def __init__(
self,
dtype="int8",
opset_version=14,
quant_format="QDQ",
example_inputs=None,
input_names=None,
output_names=None,
dynamic_axes=None,
**kwargs,
def __init__(
self,
dtype="int8",
opset_version=14,
quant_format="QDQ",
example_inputs=None,
input_names=None,
output_names=None,
dynamic_axes=None,
**kwargs,
):
super().__init__(
dtype=dtype,
Expand All @@ -837,7 +864,7 @@ def __init__(


def set_random_seed(seed: int):
options.random_seed
options.random_seed = seed


def set_workspace(workspace: str):
Expand Down

0 comments on commit 83825af

Please sign in to comment.