From 82fc480726f8884a78d59e8fe5fbbadd0b280d57 Mon Sep 17 00:00:00 2001 From: zehao-intel Date: Wed, 5 Jun 2024 21:29:53 +0800 Subject: [PATCH] Remove Depracated TF2ONNX UT Case (#1843) Signed-off-by: zehao-intel --- ...test_tensorflow_qdq_convert_to_onnx_qdq.py | 59 ------------------- 1 file changed, 59 deletions(-) diff --git a/test/itex/test_tensorflow_qdq_convert_to_onnx_qdq.py b/test/itex/test_tensorflow_qdq_convert_to_onnx_qdq.py index 81c9960a8b8..d9e35afc356 100644 --- a/test/itex/test_tensorflow_qdq_convert_to_onnx_qdq.py +++ b/test/itex/test_tensorflow_qdq_convert_to_onnx_qdq.py @@ -57,65 +57,6 @@ def tearDownClass(self): if version1_gte_version2(tf.version.VERSION, "2.8.0"): shutil.rmtree("workspace") - @disable_random() - @unittest.skipIf(version1_lt_version2(tf.version.VERSION, "2.8.0"), "Only supports tf greater 2.7.0") - def test_convert_tf_qdq_to_onnx_qdq(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding="SAME") - add = tf.raw_ops.Add(x=normed, y=conv2, name="addv2") - relu = tf.nn.relu(add) - relu6 = tf.nn.relu6(relu, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - from neural_compressor.config import TF2ONNXConfig - - config = TF2ONNXConfig() - output_graph.export("workspace/tf_qdq_to_onnx_qdq.onnx", config) - - import onnx - - onnx_model = onnx.load("workspace/tf_qdq_to_onnx_qdq.onnx") - onnx.checker.check_model(onnx_model) - - import onnxruntime as ort - - from neural_compressor.data import DATALOADERS, Datasets - - ort_session = ort.InferenceSession("workspace/tf_qdq_to_onnx_qdq.onnx") - dataset = Datasets("tensorflow")["dummy"]((100, 56, 56, 16)) - dataloader = DATALOADERS["tensorflow"](dataset) - it = iter(dataloader) - input = next(it) - input_dict = {"input:0": input[0]} - outputs = ort_session.run(None, input_dict) - self.assertNotEqual(outputs, None) - @disable_random() @unittest.skipIf(version1_lt_version2(tf.version.VERSION, "2.8.0"), "Only supports tf greater 2.7.0") def test_convert_tf_fp32_to_onnx_fp32(self):