From 3ec408c7dfc1738202102b0b655fc3e9343db8eb Mon Sep 17 00:00:00 2001 From: MiXaiLL76 Date: Mon, 24 Jun 2024 18:14:09 +0300 Subject: [PATCH] bump version --- faster_coco_eval/core/coco.py | 57 +++--------- faster_coco_eval/core/cocoeval.py | 114 +++++++++++++---------- faster_coco_eval/core/faster_eval_api.py | 2 +- faster_coco_eval/core/mask.py | 77 +-------------- faster_coco_eval/version.py | 2 +- 5 files changed, 80 insertions(+), 172 deletions(-) diff --git a/faster_coco_eval/core/coco.py b/faster_coco_eval/core/coco.py index e684e12..b009c62 100644 --- a/faster_coco_eval/core/coco.py +++ b/faster_coco_eval/core/coco.py @@ -1,48 +1,5 @@ -__author__ = "tylin" -__version__ = "2.0" -# Interface for accessing the Microsoft COCO dataset. - -# Microsoft COCO is a large image dataset designed for object detection, -# segmentation, and caption generation. pycocotools is a Python API that -# assists in loading, parsing and visualizing the annotations in COCO. -# Please visit http://mscoco.org/ for more information on COCO, including -# for the data, paper, and tutorials. The exact format of the annotations -# is also described on the COCO website. For example usage of the pycocotools -# please see pycocotools_demo.ipynb. In addition to this API, please download both # noqa: E501 -# the COCO images and annotations in order to run the demo. - -# An alternative to using the API is to load the annotations directly -# into Python dictionary -# Using the API provides additional utility functions. Note that this API -# supports both *instance* and *caption* annotations. In the case of -# captions not all functions are defined (e.g. categories are undefined). - -# The following API functions are defined: -# COCO - COCO api class that loads COCO annotation file and prepare data structures. # noqa: E501 -# decodeMask - Decode binary mask M encoded via run-length encoding. -# encodeMask - Encode binary mask M using run-length encoding. -# getAnnIds - Get ann ids that satisfy given filter conditions. -# getCatIds - Get cat ids that satisfy given filter conditions. -# getImgIds - Get img ids that satisfy given filter conditions. -# loadAnns - Load anns with the specified ids. -# loadCats - Load cats with the specified ids. -# loadImgs - Load imgs with the specified ids. -# annToMask - Convert segmentation in an annotation to binary mask. -# showAnns - Display the specified annotations. -# loadRes - Load algorithm results and create API for accessing them. -# download - Download COCO images from mscoco.org server. -# Throughout the API "ann"=annotation, "cat"=category, and "img"=image. -# Help on each functions can be accessed by: "help COCO>function". - -# See also COCO>decodeMask, -# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds, -# COCO>getImgIds, COCO>loadAnns, COCO>loadCats, -# COCO>loadImgs, COCO>annToMask, COCO>showAnns - -# Microsoft COCO Toolbox. version 2.0 -# Data, paper, and tutorials available at: http://mscoco.org/ -# Code written by Piotr Dollar and Tsung-Yi Lin, 2014. -# Licensed under the Simplified BSD License [see bsd.txt] +# Original work Copyright (c) Piotr Dollar and Tsung-Yi Lin, 2014. +# Modified work Copyright (c) 2024 MiXaiLL76 import json import logging @@ -54,6 +11,7 @@ import faster_coco_eval.faster_eval_api_cpp as _C from faster_coco_eval.core import mask as maskUtils +from faster_coco_eval.version import __author__, __version__ logger = logging.getLogger(__name__) @@ -576,3 +534,12 @@ def img_ann_idx_map(self): @property def img_cat_ann_idx_map(self): return self.imgCatToAnnsIdx + + def __repr__(self): + s = self.__class__.__name__ + s += "(" + s += "annotation_file" + s += ") # " + s += "__author__='{}'; ".format(__author__) + s += "__version__='{}';".format(__version__) + return s diff --git a/faster_coco_eval/core/cocoeval.py b/faster_coco_eval/core/cocoeval.py index dd31fe1..1fd9da3 100644 --- a/faster_coco_eval/core/cocoeval.py +++ b/faster_coco_eval/core/cocoeval.py @@ -1,4 +1,5 @@ -__author__ = "tsungyi" +# Original work Copyright (c) Piotr Dollar and Tsung-Yi Lin, 2014. +# Modified work Copyright (c) 2024 MiXaiLL76 import logging from collections import defaultdict @@ -8,60 +9,64 @@ import faster_coco_eval.faster_eval_api_cpp as _C from faster_coco_eval.core import mask as maskUtils from faster_coco_eval.core.coco import COCO +from faster_coco_eval.version import __author__, __version__ logger = logging.getLogger(__name__) class COCOeval: - # Interface for evaluating detection on the Microsoft COCO dataset. - # - # The usage for CocoEval is as follows: - # cocoGt=..., cocoDt=... # load dataset and results - # E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object - # E.params.recThrs = ...; # set parameters as desired - # E.evaluate(); # run per image evaluation - # E.accumulate(); # accumulate per image results - # E.summarize(); # display summary metrics of results - # For example usage see evalDemo.m and http://mscoco.org/. - # - # The evaluation parameters are as follows (defaults in brackets): - # imgIds - [all] N img ids to use for evaluation - # catIds - [all] K cat ids to use for evaluation - # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation - # recThrs - [0:.01:1] R=101 recall thresholds for evaluation - # areaRng - [...] A=4 object area ranges for evaluation - # maxDets - [1 10 100] M=3 thresholds on max detections per image - # iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints' - # iouType replaced the now DEPRECATED useSegm parameter. - # useCats - [1] if true use category labels for evaluation - # Note: if useCats=0 category labels are ignored as in proposal scoring. - # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified. - # - # evaluate(): evaluates detections on every image and every category and - # concats the results into the "evalImgs" with fields: - # dtIds - [1xD] id for each of the D detections (dt) - # gtIds - [1xG] id for each of the G ground truths (gt) - # dtMatches - [TxD] matching gt id at each IoU or 0 - # gtMatches - [TxG] matching dt id at each IoU or 0 - # dtScores - [1xD] confidence of each dt - # gtIgnore - [1xG] ignore flag for each gt - # dtIgnore - [TxD] ignore flag for each dt at each IoU - # - # accumulate(): accumulates the per-image, per-category evaluation - # results in "evalImgs" into the dictionary "eval" with fields: - # params - parameters used for evaluation - # date - date evaluation was performed - # counts - [T,R,K,A,M] parameter dimensions (see above) - # precision - [TxRxKxAxM] precision for every evaluation setting - # recall - [TxKxAxM] max recall for every evaluation setting - # Note: precision and recall==-1 for settings with no gt objects. - # - # See also coco, mask, pycocoDemo, pycocoEvalDemo - # - # Microsoft COCO Toolbox. version 2.0 - # Data, paper, and tutorials available at: http://mscoco.org/ - # Code written by Piotr Dollar and Tsung-Yi Lin, 2015. - # Licensed under the Simplified BSD License [see coco/license.txt] + """Interface for evaluating detection on the Microsoft COCO dataset. + + The usage for CocoEval is as follows: + cocoGt=..., cocoDt=... # load dataset and results + E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object + E.params.recThrs = ...; # set parameters as desired + E.evaluate(); # run per image evaluation + E.accumulate(); # accumulate per image results + E.summarize(); # display summary metrics of results + For example usage see evalDemo.m and http://mscoco.org/. + + The evaluation parameters are as follows (defaults in brackets): + imgIds - [all] N img ids to use for evaluation + catIds - [all] K cat ids to use for evaluation + iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation + recThrs - [0:.01:1] R=101 recall thresholds for evaluation + areaRng - [...] A=4 object area ranges for evaluation + maxDets - [1 10 100] M=3 thresholds on max detections per image + iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints' + iouType replaced the now DEPRECATED useSegm parameter. + useCats - [1] if true use category labels for evaluation + Note: if useCats=0 category labels are ignored as in proposal scoring. + Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified. + + evaluate(): evaluates detections on every image and every category and + concats the results into the "evalImgs" with fields: + dtIds - [1xD] id for each of the D detections (dt) + gtIds - [1xG] id for each of the G ground truths (gt) + dtMatches - [TxD] matching gt id at each IoU or 0 + gtMatches - [TxG] matching dt id at each IoU or 0 + dtScores - [1xD] confidence of each dt + gtIgnore - [1xG] ignore flag for each gt + dtIgnore - [TxD] ignore flag for each dt at each IoU + + accumulate(): accumulates the per-image, per-category evaluation + results in "evalImgs" into the dictionary "eval" with fields: + params - parameters used for evaluation + date - date evaluation was performed + counts - [T,R,K,A,M] parameter dimensions (see above) + precision - [TxRxKxAxM] precision for every evaluation setting + recall - [TxKxAxM] max recall for every evaluation setting + Note: precision and recall==-1 for settings with no gt objects. + + See also coco, mask, pycocoDemo, pycocoEvalDemo + + Microsoft COCO Toolbox. version 2.0 + Data, paper, and tutorials available at: http://mscoco.org/ + Code written by Piotr Dollar and Tsung-Yi Lin, 2015. + Licensed under the Simplified BSD License [see coco/license.txt] + + """ + def __init__( self, cocoGt=None, @@ -525,6 +530,15 @@ def _summarizeKps(): def __str__(self): self.summarize() + def __repr__(self): + s = self.__class__.__name__ + s += "(" + s += "annotation_file" + s += ") # " + s += "__author__='{}'; ".format(__author__) + s += "__version__='{}';".format(__version__) + return s + class Params: """Params for coco evaluation api.""" diff --git a/faster_coco_eval/core/faster_eval_api.py b/faster_coco_eval/core/faster_eval_api.py index 75a8a29..51ae3c5 100644 --- a/faster_coco_eval/core/faster_eval_api.py +++ b/faster_coco_eval/core/faster_eval_api.py @@ -1,5 +1,5 @@ # Original work Copyright (c) Facebook, Inc. and its affiliates. -# Modified work Copyright (c) 2021 Sartorius AG +# Modified work Copyright (c) 2024 MiXaiLL76 import logging import time diff --git a/faster_coco_eval/core/mask.py b/faster_coco_eval/core/mask.py index 2854bfc..e8f9f18 100644 --- a/faster_coco_eval/core/mask.py +++ b/faster_coco_eval/core/mask.py @@ -1,80 +1,7 @@ -__author__ = "tsungyi" - -# import faster_coco_eval.mask_api_cpp as _mask -# import faster_coco_eval.mask_api_cpp as _mask_old +# Original work Copyright (c) Piotr Dollar and Tsung-Yi Lin, 2014. +# Modified work Copyright (c) 2024 MiXaiLL76 import faster_coco_eval.mask_api_new_cpp as _mask -# Interface for manipulating masks stored in RLE format. -# -# RLE is a simple yet efficient format for storing binary masks. RLE -# first divides a vector (or vectorized image) into a series of piecewise -# constant regions and then for each piece simply stores the length of -# that piece. For example, given M=[0 0 1 1 1 0 1] the RLE counts would -# be [2 3 1 1], or for M=[1 1 1 1 1 1 0] the counts would be [0 6 1] -# (note that the odd counts are always the numbers of zeros). Instead of -# storing the counts directly, additional compression is achieved with a -# variable bitrate representation based on a common scheme called LEB128. -# -# Compression is greatest given large piecewise constant regions. -# Specifically, the size of the RLE is proportional to the number of -# *boundaries* in M (or for an image the number of boundaries in the y -# direction). Assuming fairly simple shapes, the RLE representation is -# O(sqrt(n)) where n is number of pixels in the object. Hence space usage -# is substantially lower, especially for large simple objects (large n). -# -# Many common operations on masks can be computed directly using the RLE -# (without need for decoding). This includes computations such as area, -# union, intersection, etc. All of these operations are linear in the -# size of the RLE, in other words they are O(sqrt(n)) where n is the area -# of the object. Computing these operations on the original mask is O(n). -# Thus, using the RLE can result in substantial computational savings. -# -# The following API functions are defined: -# encode - Encode binary masks using RLE. -# decode - Decode binary masks encoded via RLE. -# merge - Compute union or intersection of encoded masks. -# iou - Compute intersection over union between masks. -# area - Compute area of encoded masks. -# toBbox - Get bounding boxes surrounding encoded masks. -# frPyObjects - Convert polygon, bbox, and uncompressed RLE to encoded RLE mask. # noqa: E501 -# -# Usage: -# Rs = encode( masks ) -# masks = decode( Rs ) -# R = merge( Rs, intersect=false ) -# o = iou( dt, gt, iscrowd ) -# a = area( Rs ) -# bbs = toBbox( Rs ) -# Rs = frPyObjects( [pyObjects], h, w ) -# -# In the API the following formats are used: -# Rs - [dict] Run-length encoding of binary masks -# R - dict Run-length encoding of binary mask -# masks - [hxwxn] Binary mask(s) (must have type np.ndarray(dtype=uint8) in column-major order) # noqa: E501 -# iscrowd - [nx1] list of np.ndarray. 1 indicates corresponding gt image has crowd region to ignore # noqa: E501 -# bbs - [nx4] Bounding box(es) stored as [x y w h] -# poly - Polygon stored as [[x1 y1 x2 y2...],[x1 y1 ...],...] (2D list) -# dt,gt - May be either bounding boxes or encoded masks -# Both poly and bbs are 0-indexed (bbox=[0 0 1 1] encloses first pixel). -# -# Finally, a note about the intersection over union (iou) computation. -# The standard iou of a ground truth (gt) and detected (dt) object is -# iou(gt,dt) = area(intersect(gt,dt)) / area(union(gt,dt)) -# For "crowd" regions, we use a modified criteria. If a gt object is -# marked as "iscrowd", we allow a dt to match any subregion of the gt. -# Choosing gt' in the crowd gt that best matches the dt can be done using -# gt'=intersect(dt,gt). Since by definition union(gt',dt)=dt, computing -# iou(gt,dt,iscrowd) = iou(gt',dt) = area(intersect(gt,dt)) / area(dt) -# For crowd gt regions we use this modified criteria above for the iou. -# -# To compile run "python setup.py build_ext --inplace" -# Please do not contact us for help with compiling. -# -# Microsoft COCO Toolbox. version 2.0 -# Data, paper, and tutorials available at: http://mscoco.org/ -# Code written by Piotr Dollar and Tsung-Yi Lin, 2015. -# Licensed under the Simplified BSD License [see coco/license.txt] - iou = _mask.iou merge = _mask.merge frPyObjects = _mask.frPyObjects diff --git a/faster_coco_eval/version.py b/faster_coco_eval/version.py index 5d154e2..b26eadd 100644 --- a/faster_coco_eval/version.py +++ b/faster_coco_eval/version.py @@ -1,2 +1,2 @@ -__version__ = "1.5.7" +__version__ = "1.6.0" __author__ = "MiXaiLL76"