diff --git a/examples/onnxrt/nlp/huggingface_model/token_classification/layoutlmv2/quantization/ptq_dynamic/main.py b/examples/onnxrt/nlp/huggingface_model/token_classification/layoutlmv2/quantization/ptq_dynamic/main.py index 95a49ce37ab..c7cf936270d 100644 --- a/examples/onnxrt/nlp/huggingface_model/token_classification/layoutlmv2/quantization/ptq_dynamic/main.py +++ b/examples/onnxrt/nlp/huggingface_model/token_classification/layoutlmv2/quantization/ptq_dynamic/main.py @@ -216,8 +216,6 @@ def _process_dataset(self): self.label = [] self.onnx_inputs = [] for inputs in self.dataset: - # import pdb; - # pdb.set_trace() onnx_inputs = [] has_labels = all(inputs.get(k) is not None for k in self.label_names) if has_labels: @@ -237,8 +235,6 @@ def _process_dataset(self): } """ for key in self.onnx_input_names: - # import pdb; - # pdb.set_trace() if key in inputs: # onnx_inputs[key] = np.array([inputs[key]]) onnx_inputs.append(np.array(inputs[key])) diff --git a/examples/onnxrt/nlp/huggingface_model/token_classification/layoutlmv2/quantization/ptq_static/main.py b/examples/onnxrt/nlp/huggingface_model/token_classification/layoutlmv2/quantization/ptq_static/main.py index b3de22ac766..5540f4c002d 100644 --- a/examples/onnxrt/nlp/huggingface_model/token_classification/layoutlmv2/quantization/ptq_static/main.py +++ b/examples/onnxrt/nlp/huggingface_model/token_classification/layoutlmv2/quantization/ptq_static/main.py @@ -216,8 +216,6 @@ def _process_dataset(self): self.label = [] self.onnx_inputs = [] for inputs in self.dataset: - # import pdb; - # pdb.set_trace() onnx_inputs = [] has_labels = all(inputs.get(k) is not None for k in self.label_names) if has_labels: @@ -237,8 +235,6 @@ def _process_dataset(self): } """ for key in self.onnx_input_names: - # import pdb; - # pdb.set_trace() if key in inputs: # onnx_inputs[key] = np.array([inputs[key]]) onnx_inputs.append(np.array(inputs[key])) diff --git a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/python/models/utils.py b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/python/models/utils.py index 940722075ab..e4d5db6b8ee 100644 --- a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/python/models/utils.py +++ b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/python/models/utils.py @@ -59,7 +59,6 @@ def _compute_padding(self, input, dim): return additional_padding, total_padding def forward(self, input): - #import pdb; pdb.set_trace() if self.padding == "VALID": return F.conv2d( input, @@ -180,7 +179,6 @@ def decode_boxes(rel_codes, boxes, weights): dh = dh / wh pred_ctr_x = dx * widths + ctr_x - #import pdb; pdb.set_trace() pred_ctr_y = dy * heights + ctr_y pred_w = torch.exp(dw) * widths pred_h = torch.exp(dh) * heights @@ -194,5 +192,4 @@ def decode_boxes(rel_codes, boxes, weights): ], dim=2, ) - #import pdb; pdb.set_trace() return pred_boxes diff --git a/neural_coder/coders/tensorflow/amp.py b/neural_coder/coders/tensorflow/amp.py index 70302d78d4a..77f349ef084 100644 --- a/neural_coder/coders/tensorflow/amp.py +++ b/neural_coder/coders/tensorflow/amp.py @@ -22,8 +22,6 @@ def __init__(self, file) -> None: self.keras_edited_flag = False def transform(self): - # import pdb - # pdb.set_trace() lines = self.file.split("\n") for line in lines: if self.is_modify(line): diff --git a/neural_coder/coders/tensorflow/inc.py b/neural_coder/coders/tensorflow/inc.py index 837dff143fb..30455bc27c8 100644 --- a/neural_coder/coders/tensorflow/inc.py +++ b/neural_coder/coders/tensorflow/inc.py @@ -21,8 +21,6 @@ def __init__(self, file) -> None: self.result = [] def transform(self): - # import pdb - # pdb.set_trace() lines = self.file.split("\n") for line in lines: if self.is_modify(line): diff --git a/neural_compressor/strategy/strategy.py b/neural_compressor/strategy/strategy.py index 60101104e3c..06c5b0d0783 100644 --- a/neural_compressor/strategy/strategy.py +++ b/neural_compressor/strategy/strategy.py @@ -485,7 +485,6 @@ def traverse(self): return self.distributed_traverse() self._setup_pre_tuning_algo_scheduler() self._prepare_tuning() - # import pdb;pdb.set_trace() traverse_start_time = time() for op_tuning_cfg in self.next_tune_cfg(): tuning_start_time = time() diff --git a/neural_compressor/torch/algorithms/habana_fp8/fp8_quant.py b/neural_compressor/torch/algorithms/habana_fp8/fp8_quant.py index 0330bd475ad..c80cc443531 100644 --- a/neural_compressor/torch/algorithms/habana_fp8/fp8_quant.py +++ b/neural_compressor/torch/algorithms/habana_fp8/fp8_quant.py @@ -131,7 +131,6 @@ def input_observer_forward_pre_hook(self, input): ### Insert input observer into model, only for fp8_e4m3 static quantization ### observer_cls = observer_mapping[act_observer] - # import pdb;pdb.set_trace() if isinstance(module, white_list): observer_obj = observer_cls(dtype=dtype_mapping[qconfig.act_dtype]) diff --git a/test/pruning_with_pt/pruning_2.x/test_auto_excluding_classifier.py b/test/pruning_with_pt/pruning_2.x/test_auto_excluding_classifier.py index 0eb2d04005a..641525fd26d 100644 --- a/test/pruning_with_pt/pruning_2.x/test_auto_excluding_classifier.py +++ b/test/pruning_with_pt/pruning_2.x/test_auto_excluding_classifier.py @@ -24,7 +24,6 @@ def forward(self, x): class TestPruning(unittest.TestCase): def test_pruning_basic(self): - # import pdb;pdb.set_trace() hidden_size = 32 model = NaiveMLP(hidden_size) # import classifier searching functions diff --git a/test/pruning_with_pt/pruning_2.x/test_auto_slim.py b/test/pruning_with_pt/pruning_2.x/test_auto_slim.py index 7af5cf8de20..b5f09a3c41d 100644 --- a/test/pruning_with_pt/pruning_2.x/test_auto_slim.py +++ b/test/pruning_with_pt/pruning_2.x/test_auto_slim.py @@ -50,7 +50,6 @@ def test_pruning_basic(self): # run mha and ffn pruning compression_manager = prepare_compression(model=model, confs=configs) compression_manager.callbacks.on_train_begin() - # import pdb;pdb.set_trace() for epoch in range(3): model.train() compression_manager.callbacks.on_epoch_begin(epoch)