Skip to content

Commit

Permalink
Fix precision tuning bug for ONNX CUDA EP (#1133)
Browse files Browse the repository at this point in the history
Signed-off-by: yuwenzho <[email protected]>
  • Loading branch information
yuwenzho authored Aug 3, 2023
1 parent 7fbcf54 commit d1f315f
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 1 deletion.
2 changes: 1 addition & 1 deletion neural_compressor/strategy/utils/constant.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
('weight','scheme'), ('weight','algorithm'), ('weight','granularity'),
('weight','bits'), ('weight','group_size'), 'sampling_size']

PRECISION_SET_V2_0 = {'fp32', 'bf16'}
PRECISION_SET_V2_0 = {'fp32', 'bf16', 'fp16'}

auto_query_order = ['static', 'dynamic', 'bf16', 'fp16', 'fp32']
static_query_order = ['static', 'bf16', 'fp16', 'fp32']
Expand Down
6 changes: 6 additions & 0 deletions test/adaptor/onnxrt_adaptor/test_adaptor_onnxrt.py
Original file line number Diff line number Diff line change
Expand Up @@ -1231,6 +1231,12 @@ def eval(model):
calib_dataloader=self.matmul_dataloader, eval_func=eval)
self.assertTrue('QLinearMatMul' not in [i.op_type for i in q_model.nodes()])

config = PostTrainingQuantConfig(approach='static', backend='onnxrt_cuda_ep', device='gpu', quant_level=1)
q_model = quantization.fit(self.distilbert_model, config,
calib_dataloader=DummyNLPDataloader_dict("distilbert-base-uncased-finetuned-sst-2-english"),
eval_func=eval)
self.assertTrue('QLinearMatMul' in [i.op_type for i in q_model.nodes()])

config = PostTrainingQuantConfig(approach='static', recipes={'optypes_to_exclude_output_quant': ['MatMul']})
q_model = quantization.fit(self.matmul_model, config,
calib_dataloader=self.matmul_dataloader, eval_func=eval)
Expand Down

0 comments on commit d1f315f

Please sign in to comment.