You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I'm having an error while trying to calculate mAP for my Faster-RCNN object detection model with custom dataset in pytorch while testing.
To Reproduce
Error message:
Traceback (most recent call last):
File "c:\Users\lemon\Desktop\ap_py_2\inference.py", line 409, in <module> print(metric.compute())
File "C:\Users\lemon\miniconda3\envs\cnn-env-03\lib\site-packages\torchmetrics\metric.py", line 531, in wrapped_func value = compute(*args, **kwargs)
File "c:\Users\lemon\Desktop\ap_py_2\mean_ap.py", line 861, in compute precisions, recalls = self._calculate(classes)
File "c:\Users\lemon\Desktop\ap_py_2\mean_ap.py", line 715, in _calculate eval_imgs = [
File "c:\Users\lemon\Desktop\ap_py_2\mean_ap.py", line 716, in <listcomp> self._evaluate_image(img_id, class_id, area, max_detections, ious)
File "c:\Users\lemon\Desktop\ap_py_2\mean_ap.py", line 563, in _evaluate_image return self.__evaluate_image_preds_no_gt(det, idx, det_label_mask, max_det, area_range, nb_iou_thrs)
File "c:\Users\lemon\Desktop\ap_py_2\mean_ap.py", line 518, in __evaluate_image_preds_no_gt det_ignore_area = (det_areas < area_range[0]) | (det_areas > area_range[1])
RuntimeError: value cannot be converted to type int without overflow
Code sample (inference.py)
importnumpyasnpimportcv2importtorchimportglobasglobimportosimporttimefromcopyimportdeepcopyimportpandasaspdimportmatplotlib.pyplotaspltimportseabornassnsfrommodelimportcreate_modelfromconfigimport (
NUM_CLASSES, DEVICE, CLASSES, RESIZE_TO, NUM_WORKERS
)
COLORS=np.random.uniform(130, 255, size=(len(CLASSES), 3))
fromdatasetsimport (
create_test_dataset, create_test_loader
)
frommean_apimportMeanAveragePrecisionmodel=create_model(num_classes=NUM_CLASSES)
checkpoint=torch.load('C:\\Users\\lemon\\Desktop\\outputs_3\\last_model.pth', map_location=DEVICE)
model.load_state_dict(checkpoint['model_state_dict'])
model.to(DEVICE).eval()
DIR_TEST='C:\\Users\\lemon\\Desktop\\ap\\OPIXray\\map_test_org'test_images=glob.glob(f"{DIR_TEST}\\*.jpg")
print(f"Test instances: {len(test_images)}")
detection_threshold=0.2frame_count=0total_fps=0targets= {}
targets['boxes'] = []
targets['labels'] = []
targets['image_id'] = []
test_dataset=create_test_dataset()
test_data_loader=create_test_loader(test_dataset, NUM_WORKERS)
fordataintest_data_loader.dataset:
forjinrange(0,4):
data[1]['boxes'][0][j] =int(data[1]['boxes'][0][j])
targets['boxes'].append(data[1]['boxes'])
targets['labels'].append(data[1]['labels'])
targets['image_id'].append(data[1]['image_id'])
pred_scores= []
pred_boxes= []
pred_classes= []
foriinrange(len(test_images)):
# get the image file name for saving output later onimage_name=test_images[i].split(os.path.sep)[-1].split('.')[0]
image=cv2.imread(test_images[i])
orig_image=image.copy()
# BGR to RGBimage=cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB).astype(np.float32)
image_resized=cv2.resize(image, (RESIZE_TO, RESIZE_TO))
orig_image=image_resized.copy()
# make the pixel range between 0 and 1image_resized/=255.0# bring color channels to frontimage_resized=np.transpose(image_resized, (2, 0, 1)).astype(np.float32)
# convert to tensorimage_resized=torch.tensor(image_resized, dtype=torch.float).cuda()
# add batch dimensionimage_resized=torch.unsqueeze(image_resized, 0)
start_time=time.time()
withtorch.no_grad():
outputs=model(image_resized.to(DEVICE))
end_time=time.time()
# get the current fpsfps=1/ (end_time-start_time)
# add `fps` to `total_fps`total_fps+=fps# increment frame countframe_count+=1# load all detection to CPU for further operationsoutputs= [{k: v.to('cpu') fork, vint.items()} fortinoutputs]
# carry further only if there are detected boxesiflen(outputs[0]['boxes']) !=0:
boxes=outputs[0]['boxes'].data.numpy()
scores=outputs[0]['scores'].data.numpy()
classes=outputs[0]['labels'].data.numpy()
# filter out boxes according to `detection_threshold`boxes=boxes[scores>=detection_threshold].astype(np.int32)
draw_boxes=boxes.copy()
# get all the predicited class names# pred_classes.append(np.array([CLASSES[i] for i in outputs[0]['labels'].cpu().numpy()]))classes=classes[scores>=detection_threshold]
scores=scores[scores>=detection_threshold]
pred_scores.append(scores)
pred_boxes.append(boxes)
pred_classes.append(classes)
ground=targets['boxes']
# print(pred)# print(scores)# print(ground[0].cpu().detach().numpy()) else:
pred_scores.append(np.array([0]))
pred_boxes.append(np.array([[0,0,0,0]]))
pred_classes.append(np.array([0]))
print(f"Image {i+1} done...")
print('-'*50)
print('\nTEST PREDICTIONS COMPLETE')
avg_fps=total_fps/frame_countprint(f"Average FPS: {avg_fps:.3f}\n")
send_pred_map= []
send_ground_map= []
foriinrange(15):
a=torch.from_numpy(pred_scores[i])
b=targets['labels'][i]
c=torch.from_numpy(np.array(pred_boxes[i]))
d=torch.from_numpy(targets['boxes'][i].numpy())
e=torch.from_numpy(pred_classes[i])
print(i)
send_pred_map.append({"boxes": c, "scores": a, "labels": e})
send_ground_map.append({"boxes": d, "labels": b})
metric=MeanAveragePrecision()
metric.update(send_pred_map, send_ground_map)
print(metric.compute())
Expected behavior
I just want to calculate mAP for my model in pytorch.
Environment
TorchMetrics version: 0.11.3 with pip in conda env
Python & PyTorch Version: 3.9.16 & 1.13.1
OS: Windows 11
The text was updated successfully, but these errors were encountered:
Hi @bugramurat, thanks for reporting this issue.
Would it be possible for you to make dump of send_pred_map and send_ground_map (using torch.save for example) in your code and send it to us? That is the essential part that torchmetrics is evaluated on.
I cannot reproduce the error, because I am unable to run your script without model + data.
🐛 Bug
I'm having an error while trying to calculate mAP for my Faster-RCNN object detection model with custom dataset in pytorch while testing.
To Reproduce
Error message:
Code sample (inference.py)
Expected behavior
I just want to calculate mAP for my model in pytorch.
Environment
The text was updated successfully, but these errors were encountered: