Skip to content

Commit

Permalink
Improve UT Coverage for TF 3x (#1852)
Browse files Browse the repository at this point in the history
Signed-off-by: zehao-intel <[email protected]>
Signed-off-by: chensuyue <[email protected]>
  • Loading branch information
zehao-intel authored Jun 14, 2024
1 parent 794b276 commit 4e45f8f
Show file tree
Hide file tree
Showing 42 changed files with 556 additions and 4,011 deletions.
3 changes: 3 additions & 0 deletions .azure-pipelines/scripts/ut/3x/coverage.3x_pt
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@ branch = True
include =
*/neural_compressor/common/*
*/neural_compressor/torch/*
omit =
*/neural_compressor/torch/algorithms/habana_fp8/*
*/neural_compressor/torch/amp/*
exclude_lines =
pragma: no cover
raise NotImplementedError
Expand Down
22 changes: 19 additions & 3 deletions .azure-pipelines/scripts/ut/3x/run_3x_tf.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,20 +16,36 @@ inc_path=$(python -c 'import neural_compressor; print(neural_compressor.__path__
cd /neural-compressor/test/3x || exit 1
rm -rf torch
rm -rf onnxrt
rm -rf tensorflow/quantization/ptq/newapi
mv tensorflow/keras ../3x_keras
mv tensorflow/quantization/itex ./3x_itex
mv tensorflow/quantization/ptq/newapi ../3x_newapi

LOG_DIR=/neural-compressor/log_dir
mkdir -p ${LOG_DIR}
ut_log_name=${LOG_DIR}/ut_3x_tf.log

# test for tensorflow ut
pytest --cov="${inc_path}" -vs --disable-warnings --html=report_tf_quant.html --self-contained-html ./tensorflow/quantization 2>&1 | tee -a ${ut_log_name}
rm -rf tensorflow/quantization
pytest --cov="${inc_path}" --cov-append -vs --disable-warnings --html=report_tf.html --self-contained-html . 2>&1 | tee -a ${ut_log_name}

# test for tensorflow new api ut
pip uninstall tensorflow -y
pip install /tf_dataset/tf_binary/230928/tensorflow*.whl
pip install cmake
pip install protobuf==3.20.3
pip install horovod==0.27.0
pip list
rm -rf tensorflow/*
mkdir -p tensorflow/quantization/ptq
mv ../3x_newapi tensorflow/quantization/ptq/newapi
find . -name "test*.py" | sed "s,\.\/,python -m pytest --cov=${inc_path} --cov-append -vs --disable-warnings ,g" > run.sh
cat run.sh
bash run.sh 2>&1 | tee -a ${ut_log_name}

# test for itex ut
rm -rf tensorflow/*
mv ../3x_keras tensorflow/keras
mv ../3x_itex tensorflow/quantization/itex
pip uninstall tensorflow -y
pip install intel-extension-for-tensorflow[cpu]
pytest --cov="${inc_path}" --cov-append -vs --disable-warnings --html=report_keras.html --self-contained-html ./tensorflow 2>&1 | tee -a ${ut_log_name}

Expand Down
46 changes: 0 additions & 46 deletions .azure-pipelines/scripts/ut/3x/run_3x_tf_new_api.sh

This file was deleted.

14 changes: 0 additions & 14 deletions .azure-pipelines/ut-3x-tf.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,20 +41,6 @@ stages:
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut_3x"

- stage: NewTF
displayName: Unit Test 3x New TF API
dependsOn: []
jobs:
- job:
displayName: Unit Test 3x New TF API
steps:
- template: template/ut-template.yml
parameters:
dockerConfigName: "commonDockerConfig"
utScriptFileName: "3x/run_3x_tf_new_api"
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut_3x_tf_new_api"

- stage: TensorFlow_baseline
displayName: Unit Test 3x TensorFlow baseline
dependsOn: []
Expand Down
4 changes: 0 additions & 4 deletions .github/checkgroup.yml
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,7 @@ subprojects:
- "Model-Test (Run ONNX Model resnet50-v1-12)"
- "Model-Test (Run PyTorch Model resnet18)"
- "Model-Test (Run PyTorch Model resnet18_fx)"
- "Model-Test (Run TensorFlow Model darknet19)"
- "Model-Test (Run TensorFlow Model inception_v1)"
- "Model-Test (Run TensorFlow Model resnet-101)"
- "Model-Test (Run TensorFlow Model resnet50v1.5)"
- "Model-Test (Run TensorFlow Model ssd_mobilenet_v1_ckpt)"
- "Model-Test (Run TensorFlow Model ssd_resnet50_v1)"

- id: "Model Tests 3x workflow"
Expand Down
131 changes: 6 additions & 125 deletions neural_compressor/tensorflow/algorithms/static_quant/keras.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,46 +90,13 @@ def __init__(self, framework_specific_info):
os.mkdir(DEFAULT_WORKSPACE)
self.tmp_dir = (DEFAULT_WORKSPACE + "tmp_model.keras") if self.keras3 else (DEFAULT_WORKSPACE + "tmp_model")

def _check_itex(self):
"""Check if the Intel® Extension for TensorFlow has been installed."""
try:
import intel_extension_for_tensorflow
except:
raise ImportError(
"The Intel® Extension for TensorFlow is not installed. "
"Please install it to run models on ITEX backend"
)

def convert_bf16(self):
"""Execute the BF16 conversion."""
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16")
model = self.pre_optimized_model

for layer in model.layers:
if layer.name in self.bf16_ops:
layer.dtype = "mixed_bfloat16"

model.save(self.tmp_dir)
converted_model = tf.keras.models.load_model(self.tmp_dir)
tf.keras.mixed_precision.set_global_policy("float32")

return converted_model

# (TODO) choose the properly quantize mode
def _check_quantize_mode(self, model):
"""Check what quantize mode to use."""
for layer in model.layers:
if "ReLU" in layer.__class__.__name__:
return "MIN_FIRST"
return "SCALED"

def _set_weights(self, qmodel, layer_weights):
"""Set fp32 weights to qmodel."""
for qlayer in qmodel.layers:
if qlayer.get_weights():
if qlayer.name in layer_weights:
qlayer.set_weights(layer_weights[qlayer.name])
else:
else: # pragma: no cover
hit_layer = False
for sub_layer in qlayer.submodules:
if sub_layer.name in layer_weights:
Expand Down Expand Up @@ -164,7 +131,7 @@ def _check_quantize_format(self, model):
self.conv_format[layer.name] = "u8"
break

def _fuse_bn_keras3(self, fuse_conv_bn, fp32_layers):
def _fuse_bn_keras3(self, fuse_conv_bn, fp32_layers): # pragma: no cover
fuse_layers = []
fused_bn_name = ""
for idx, layer in enumerate(fp32_layers):
Expand Down Expand Up @@ -211,7 +178,7 @@ def _fuse_bn_keras3(self, fuse_conv_bn, fp32_layers):

return fuse_layers

def _fuse_bn_keras2(self, fuse_conv_bn, fp32_layers):
def _fuse_bn_keras2(self, fuse_conv_bn, fp32_layers): # pragma: no cover
fuse_layers = []
for idx, layer in enumerate(fp32_layers):
if hasattr(layer, "_inbound_nodes"):
Expand Down Expand Up @@ -272,7 +239,7 @@ def _fuse_bn_keras2(self, fuse_conv_bn, fp32_layers):

return fuse_layers

def _fuse_bn(self, model):
def _fuse_bn(self, model): # pragma: no cover
"""Fusing Batch Normalization."""
model.save(self.tmp_dir)
fuse_bn_model = tf.keras.models.load_model(self.tmp_dir)
Expand Down Expand Up @@ -362,14 +329,6 @@ def quantize(self, quant_config, model, dataloader, iteration, q_func=None):
tune_cfg = converter.parse_to_tune_cfg()
self.tuning_cfg_to_fw(tune_cfg)

# just convert the input model to mixed_bfloat16
if self.bf16_ops and not self.quantize_config["op_wise_config"]:
converted_model = self.convert_bf16()
return converted_model

# if self.backend == "itex":
# self._check_itex()

logger.debug("Dump quantization configurations:")
logger.debug(self.quantize_config)
calib_sampling_size = tune_cfg.get("calib_sampling_size", 1)
Expand Down Expand Up @@ -469,59 +428,6 @@ def _calibrate(self, model, dataloader, calib_interation):

return quantized_model

@dump_elapsed_time(customized_msg="Model inference")
def evaluate(
self,
model,
dataloader,
postprocess=None,
metrics=None,
measurer=None,
iteration=-1,
fp32_baseline=False,
):
"""The function is used to run evaluation on validation dataset.
Args:
model (object): The model to do calibration.
dataloader (generator): generate the data and labels.
postprocess (object, optional): process the result from the model
metric (object, optional): Depends on model category. Defaults to None.
measurer (object, optional): for precise benchmark measurement.
iteration(int, optional): control steps of mini-batch
fp32_baseline (boolean, optional): only for compare_label=False pipeline
"""
# use keras object
keras_model = model.model
logger.info("Start to evaluate the Keras model.")
results = []
for idx, (inputs, labels) in enumerate(dataloader):
# use predict on batch
if measurer is not None:
measurer.start()
predictions = keras_model.predict_on_batch(inputs)
measurer.end()
else:
predictions = keras_model.predict_on_batch(inputs)

if self.fp32_preds_as_label:
self.fp32_results.append(predictions) if fp32_baseline else results.append(predictions)

if postprocess is not None:
predictions, labels = postprocess((predictions, labels))
if metrics:
for metric in metrics:
if not hasattr(metric, "compare_label") or (
hasattr(metric, "compare_label") and metric.compare_label
):
metric.update(predictions, labels)
if idx + 1 == iteration:
break

acc = 0 if metrics is None else [metric.result() for metric in metrics]

return acc if not isinstance(acc, list) or len(acc) > 1 else acc[0]

def query_fw_capability(self, model):
"""The function is used to return framework tuning capability.
Expand Down Expand Up @@ -621,7 +527,7 @@ def tuning_cfg_to_fw(self, tuning_cfg):
for each_op_info in tuning_cfg["op"]:
op_name = each_op_info[0]

if tuning_cfg["op"][each_op_info]["activation"]["dtype"] == "bf16":
if tuning_cfg["op"][each_op_info]["activation"]["dtype"] == "bf16": # pragma: no cover
if each_op_info[1] in bf16_type:
bf16_ops.append(op_name)
continue
Expand Down Expand Up @@ -693,31 +599,6 @@ def _get_specified_version_cfg(self, data):

return default_config

def get_version(self):
"""Get the current backend version information.
Returns:
[string]: version string.
"""
return self.cur_config["version"]["name"]

def get_precisions(self):
"""Get supported precisions for current backend.
Returns:
[string list]: the precisions' name.
"""
return self.cur_config["precisions"]["names"]

def get_op_types(self):
"""Get the supported op types by all precisions.
Returns:
[dictionary list]: A list composed of dictionary which key is precision
and value is the op types.
"""
return self.cur_config["ops"]

def get_quantization_capability(self):
"""Get the supported op types' quantization capability.
Expand Down Expand Up @@ -846,7 +727,7 @@ def _parse_inputs(self, BN_fused_layers=None, conv_names=None):

try:
model_input = self.model.input
except ValueError:
except ValueError: # pragma: no cover
model_input = self.model.inputs[0]

return input_layer_dict, model_input
Expand Down
Loading

0 comments on commit 4e45f8f

Please sign in to comment.