-
Notifications
You must be signed in to change notification settings - Fork 3k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add verbose and optimization args for parity tests (Gelu, Layernorm, … #14739
Changes from 7 commits
3153d9c
4c375f6
1b87be5
209794d
31e6da9
f8d57b3
c79ca88
83de99d
8525f3f
96e8de9
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -85,6 +85,7 @@ def run( | |
formula=0, | ||
sequence_length=2, | ||
fp32_gelu_op=True, | ||
verbose=False, | ||
): | ||
test_name = f"device={device}, float16={float16}, optimized={optimized}, batch_size={batch_size}, sequence_length={sequence_length}, hidden_size={hidden_size}, formula={formula}, fp32_gelu_op={fp32_gelu_op}" | ||
print(f"\nTesting: {test_name}") | ||
|
@@ -108,6 +109,7 @@ def run( | |
Gelu.get_fused_op(formula), | ||
use_gpu=use_gpu, | ||
opt_level=2 if use_gpu else None, | ||
verbose=verbose, | ||
) | ||
onnx_path = optimized_onnx_path | ||
else: | ||
|
@@ -123,7 +125,7 @@ def run( | |
device, | ||
optimized, | ||
test_cases, | ||
verbose=False, | ||
verbose, | ||
) | ||
|
||
# clean up onnx file | ||
|
@@ -135,8 +137,10 @@ def run( | |
|
||
|
||
class TestGeluParity(unittest.TestCase): | ||
verbose = False | ||
optimized = True | ||
|
||
def setUp(self): | ||
self.optimized = True # Change it to False if you want to test parity of non optimized ONNX | ||
self.test_cases = 100 # Number of test cases per test run | ||
self.sequence_length = 2 | ||
self.hidden_size = 768 | ||
|
@@ -159,6 +163,7 @@ def run_test( | |
formula, | ||
enable_assert=True, | ||
fp32_gelu_op=True, | ||
verbose=False, | ||
): | ||
if float16 and device.type == "cpu": # CPU does not support FP16 | ||
return | ||
|
@@ -172,11 +177,12 @@ def run_test( | |
formula, | ||
self.sequence_length, | ||
fp32_gelu_op, | ||
verbose, | ||
) | ||
if enable_assert: | ||
self.assertTrue(num_failure == 0, "Failed: " + test_name) | ||
|
||
def run_one(self, optimized, device, hidden_size=768, formula=0): | ||
def run_one(self, optimized, device, hidden_size=768, formula=0, verbose=False): | ||
for batch_size in [4]: | ||
self.run_test( | ||
batch_size, | ||
|
@@ -186,6 +192,7 @@ def run_one(self, optimized, device, hidden_size=768, formula=0): | |
device=device, | ||
formula=formula, | ||
enable_assert=formula in self.formula_must_pass, | ||
verbose=verbose, | ||
) | ||
|
||
self.run_test( | ||
|
@@ -197,6 +204,7 @@ def run_one(self, optimized, device, hidden_size=768, formula=0): | |
formula=formula, | ||
enable_assert=formula in self.formula_must_pass, | ||
fp32_gelu_op=True, | ||
verbose=verbose, | ||
) | ||
|
||
self.run_test( | ||
|
@@ -208,12 +216,13 @@ def run_one(self, optimized, device, hidden_size=768, formula=0): | |
formula=formula, | ||
enable_assert=formula in self.formula_must_pass, | ||
fp32_gelu_op=False, | ||
verbose=verbose, | ||
) | ||
|
||
def test_cpu(self): | ||
cpu = torch.device("cpu") | ||
for i in self.formula_to_test: | ||
self.run_one(self.optimized, cpu, hidden_size=self.hidden_size, formula=i) | ||
self.run_one(self.optimized, cpu, hidden_size=self.hidden_size, formula=i, verbose=self.verbose) | ||
|
||
def test_cuda(self): | ||
if not torch.cuda.is_available(): | ||
|
@@ -223,8 +232,13 @@ def test_cuda(self): | |
else: | ||
gpu = torch.device("cuda") | ||
for i in self.formula_to_test: | ||
self.run_one(self.optimized, gpu, hidden_size=self.hidden_size, formula=i) | ||
self.run_one(self.optimized, gpu, hidden_size=self.hidden_size, formula=i, verbose=self.verbose) | ||
|
||
|
||
if __name__ == "__main__": | ||
unittest.main() | ||
args, remaining_args = parse_arguments(namespace_filter=unittest) | ||
|
||
TestGeluParity.verbose = args.log_verbose | ||
TestGeluParity.optimized = args.no_optimize | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The logic seems not correct. Shall it be optimized = not no_optimize? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Changed this so no_optimize comes out as optimize. This was just the var output of --no_optimize from argparse |
||
|
||
unittest.main(argv=remaining_args) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
typo: optimize
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Sure done