Skip to content

Commit

Permalink
conv w/a rollback ut update (intel#1038)
Browse files Browse the repository at this point in the history
  • Loading branch information
lvliang-intel authored and chensuyue committed Jul 2, 2022
1 parent 2f8f4db commit bbdc037
Show file tree
Hide file tree
Showing 3 changed files with 136 additions and 41 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ def apply_conv3d_add_addn_relu_fusion(self, match_node_name):
new_node.CopyFrom(node)
self.add_output_graph_node(new_node)

def apply_conv3d_add_addn_fusion(self, match_node_name):
def apply_conv3d_add_addn_fusion(self, match_node_name): # pragma: no cover
#Remove this WA once TF bug is fixed
return self.apply_conv3d_add_fusion(match_node_name[:2])

Expand Down Expand Up @@ -1651,7 +1651,7 @@ def apply_newly_conv_biasadd_swishf32_fusion(self, match_node_name):
new_node.CopyFrom(node)
self.add_output_graph_node(new_node)

def apply_newly_conv_biasadd_addn_fusion(self, match_node_name):
def apply_newly_conv_biasadd_addn_fusion(self, match_node_name): # pragma: no cover
#Remove this WA once TF bug is fixed
return self.apply_newly_conv_biasadd_fusion(match_node_name[:2])

Expand Down
13 changes: 6 additions & 7 deletions test/tfnewapi/test_tensorflow_graph_conv_fusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,8 +214,7 @@ def test_conv_addv2_fusion(self):
conv2_weights = tf.compat.v1.get_variable("weight_conv2", [3, 3, 16, 16],
initializer=tf.compat.v1.random_normal_initializer())
conv2 = tf.nn.conv2d(x, conv2_weights, strides=[1, 2, 2, 1], padding="SAME")
leaky_relu = tf.nn.leaky_relu(conv2)
sumadd = tf.raw_ops.AddV2(x=conv1, y=leaky_relu, name='addv2')
sumadd = tf.raw_ops.AddV2(x=conv1, y=conv2, name='addv2')

out_name = sumadd.name.split(':')[0]
with tf.compat.v1.Session() as sess:
Expand All @@ -232,13 +231,13 @@ def test_conv_addv2_fusion(self):
quantizer.model = output_graph_def
output_graph = quantizer.fit()

found_conv_sumadd_fusion = False
found_conv_fusion = False
for i in output_graph.graph_def.node:
if i.op == '_QuantizedConv2D' and \
i.attr['fused_ops'].list.s == [b'BiasAdd', b'Sum']:
found_conv_sumadd_fusion = True
if i.op.find('QuantizedConv2D') != -1:
found_conv_fusion = True
break

self.assertEqual(found_conv_sumadd_fusion, True)
self.assertEqual(found_conv_fusion, True)

@disable_random()
def test_conv_biasadd_add_relu_fusion(self):
Expand Down
160 changes: 128 additions & 32 deletions test/tfnewapi/test_tensorflow_graph_conv_requantize_fusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,7 @@ def test_single_conv3d_fusion(self):
break
self.assertEqual(found_conv_fusion, True)


@disable_random()
def test_conv3d_biasadd_fusion(self):
x = tf.compat.v1.placeholder(tf.float32, [1,64,64,64,1], name="input")
Expand All @@ -205,9 +206,8 @@ def test_conv3d_biasadd_fusion(self):
conv_weights = tf.compat.v1.get_variable("weight2", [4, 4, 4, 1, 64],
initializer=tf.compat.v1.random_normal_initializer())
conv = tf.nn.conv3d(x_pad, conv_weights, strides=[1,2,2,2,1], padding="VALID")
normed = tf.compat.v1.layers.batch_normalization(conv)

out_name = normed.name.split(':')[0]
relu6 = tf.nn.relu6(conv, name='op_to_store')
out_name = relu6.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(
Expand All @@ -221,14 +221,15 @@ def test_conv3d_biasadd_fusion(self):
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.model = output_graph_def
output_graph = quantizer.fit()
found_conv_fusion = True
found_conv_fusion = False

for i in output_graph.graph_def.node:
if i.op == 'batch_normalization/FusedBatchNormV3':
found_conv_fusion = False
if i.op == '_QuantizedConv3D':
found_conv_fusion = True
break
self.assertEqual(found_conv_fusion, True)

"""
@disable_random()
def test_conv3d_biasadd_add_fusion(self):
x = tf.compat.v1.placeholder(tf.float32, [1,64,64,64,1], name="input")
Expand All @@ -237,8 +238,7 @@ def test_conv3d_biasadd_add_fusion(self):
conv_weights = tf.compat.v1.get_variable("weight3", [4, 4, 4, 1, 64],
initializer=tf.compat.v1.random_normal_initializer())
conv = tf.nn.conv3d(x_pad, conv_weights, strides=[1,2,2,2,1], padding="VALID")
normed = tf.compat.v1.layers.batch_normalization(conv)
add = normed + tf.constant([3.0])
add = tf.raw_ops.AddV2(x=conv, y=tf.constant([3.0]), name="normed_addv2")
out_name = add.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
Expand All @@ -260,6 +260,7 @@ def test_conv3d_biasadd_add_fusion(self):
found_conv_fusion = True
break
self.assertEqual(found_conv_fusion, True)
"""

@disable_random()
def test_conv3d_add_relu_fusion(self):
Expand Down Expand Up @@ -514,16 +515,11 @@ def test_conv3d_add_addn_fusion(self):
quantizer.model = output_graph_def
output_graph = quantizer.fit()

found_conv_sumadd_fusion = False
found_conv_biasadd_fusion = False
found_conv_fusion = False
for i in output_graph.graph_def.node:
if i.op == '_QuantizedConv3D':
if str(b'Sum') in str(i.attr['fused_ops'].list.s):
found_conv_sumadd_fusion = True
if str(i.attr['fused_ops'].list.s) == str([b'BiasAdd', b'Sum']):
found_conv_biasadd_fusion = True
self.assertEqual(found_conv_sumadd_fusion, True)
self.assertEqual(found_conv_biasadd_fusion, True)
found_conv_fusion = True
self.assertEqual(found_conv_fusion, True)

@disable_random()
def test_conv3d_add_addn_relu_fusion(self):
Expand Down Expand Up @@ -624,16 +620,11 @@ def test_conv3d_add_fusion(self):
quantizer.model = output_graph_def
output_graph = quantizer.fit()

found_conv_sumadd_fusion = False
found_conv_biasadd_fusion = False
found_conv_fusion = False
for i in output_graph.graph_def.node:
if i.op == '_QuantizedConv3D':
if str(b'Sum') in str(i.attr['fused_ops'].list.s):
found_conv_sumadd_fusion = True
if str(i.attr['fused_ops'].list.s) == str([b'BiasAdd', b'Sum']):
found_conv_biasadd_fusion = True
self.assertEqual(found_conv_sumadd_fusion, True)
self.assertEqual(found_conv_biasadd_fusion, True)
found_conv_fusion = True
self.assertEqual(found_conv_fusion, True)

@disable_random()
def test_conv3d_add_const_addn_relu_fusion(self):
Expand Down Expand Up @@ -707,16 +698,11 @@ def test_conv3d_add_const_addn_fusion(self):
quantizer.model = output_graph_def
output_graph = quantizer.fit()

found_conv_sumadd_fusion = False
found_conv_biasadd_fusion = False
found_conv_fusion = False
for i in output_graph.graph_def.node:
if i.op == '_QuantizedConv3D':
if str(b'Sum') in str(i.attr['fused_ops'].list.s):
found_conv_sumadd_fusion = True
if str(i.attr['fused_ops'].list.s) == str([b'BiasAdd', b'Sum']):
found_conv_biasadd_fusion = True
self.assertEqual(found_conv_sumadd_fusion, True)
self.assertEqual(found_conv_biasadd_fusion, True)
found_conv_fusion = True
self.assertEqual(found_conv_fusion, True)

@disable_random()
def test_conv3d_add_no_relu_fusion(self):
Expand Down Expand Up @@ -783,6 +769,116 @@ def test_conv3d_add_const_relu_fusion(self):
found_conv_fusion = True
break
self.assertEqual(found_conv_fusion, True)

@disable_random()
def test_conv2d_add_const_leakyrelu_add_fusion(self):
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input")
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
x_pad = tf.pad(x, paddings, "CONSTANT")
top_relu = tf.nn.relu(x_pad)
conv2d_1_weights = tf.compat.v1.get_variable("weight29", [3, 3, 16, 16],
initializer=tf.compat.v1.random_normal_initializer())
conv2d_1 = tf.nn.conv2d(top_relu, conv2d_1_weights, strides=[1, 2, 2, 1], padding="SAME")
y_const = tf.constant(np.random.randn(16), dtype=tf.float32)
add_1 = tf.raw_ops.AddV2(x=conv2d_1, y=y_const, name='addv2_11')
relu = tf.nn.leaky_relu(add_1)
conv2d_2_weights = tf.compat.v1.get_variable("weight30", [3, 3, 16, 16],
initializer=tf.compat.v1.random_normal_initializer())
conv2d_2 = tf.nn.conv2d(top_relu, conv2d_2_weights, strides=[1, 2, 2, 1], padding="SAME")
add_2 = tf.raw_ops.AddV2(x=relu, y=conv2d_2, name='addv2_12')
out_name = add_2.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=sess.graph_def,
output_node_names=[out_name])
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('inteltensorflow_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(100, 56, 56, 16), label=True)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.model = output_graph_def
output_graph = quantizer.fit()
found_conv_fusion = False
for i in output_graph.graph_def.node:
if i.op == '_QuantizedConv2D':
found_conv_fusion = True
self.assertEqual(found_conv_fusion, True)

@disable_random()
def test_conv3d_add_const_leakyrelu_add_fusion(self):
x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input")
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]])
x_pad = tf.pad(x, paddings, "CONSTANT")
top_relu = tf.nn.relu(x_pad)
conv3d_1_weights = tf.compat.v1.get_variable("weight31", [3, 3, 3, 16, 32],
initializer=tf.compat.v1.random_normal_initializer())
conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME")
y_const = tf.constant(np.random.randn(1,1,1,1,32), dtype=tf.float32)
add_1 = tf.raw_ops.AddV2(x=conv3d_1, y=y_const, name='addv2_13')
relu = tf.nn.leaky_relu(add_1)
conv3d_2_weights = tf.compat.v1.get_variable("weight32", [3, 3, 3, 16, 32],
initializer=tf.compat.v1.random_normal_initializer())
conv3d_2 = tf.nn.conv3d(top_relu, conv3d_2_weights, strides=[1, 2, 2, 2, 1], padding="SAME")
add_2 = tf.raw_ops.AddV2(x=relu, y=conv3d_2, name='addv2_14')
out_name = add_2.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=sess.graph_def,
output_node_names=[out_name])
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('inteltensorflow_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(100, 128, 64, 64, 16), label=True)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.model = output_graph_def
output_graph = quantizer.fit()
found_conv_fusion = False
for i in output_graph.graph_def.node:
if i.op == '_QuantizedConv3D':
found_conv_fusion = True
self.assertEqual(found_conv_fusion, True)

@disable_random()
def test_conv3d_add_addn_non_const_fusion(self):
x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input")
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]])
x_pad = tf.pad(x, paddings, "CONSTANT")
top_relu = tf.nn.relu(x_pad)
conv3d_1_weights = tf.compat.v1.get_variable("weight33", [3, 3, 3, 16, 32],
initializer=tf.compat.v1.random_normal_initializer())
conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME")
conv3d_2_weights = tf.compat.v1.get_variable("weight34", [3, 3, 3, 16, 32],
initializer=tf.compat.v1.random_normal_initializer())
conv3d_2 = tf.nn.conv3d(top_relu, conv3d_2_weights, strides=[1, 2, 2, 2, 1], padding="SAME")
add_1 = tf.raw_ops.AddV2(x=conv3d_1, y=conv3d_2, name='addv2_15')
conv3d_3_weights = tf.compat.v1.get_variable("weight35", [3, 3, 3, 16, 32],
initializer=tf.compat.v1.random_normal_initializer())
conv3d_3 = tf.nn.conv3d(top_relu, conv3d_3_weights, strides=[1, 2, 2, 2, 1], padding="SAME")
add = tf.raw_ops.AddV2(x=add_1, y=conv3d_3, name='addv2_16')
out_name = add.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=sess.graph_def,
output_node_names=[out_name])
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('inteltensorflow_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(100, 128, 64, 64, 16), label=True)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.model = output_graph_def
output_graph = quantizer.fit()

found_conv_fusion = False
for i in output_graph.graph_def.node:
if i.op == '_QuantizedConv3D':
found_conv_fusion = True
self.assertEqual(found_conv_fusion, True)

if __name__ == '__main__':
unittest.main()

0 comments on commit bbdc037

Please sign in to comment.