From d9dc88ae1087e0f21934c673bdc256eb196c1598 Mon Sep 17 00:00:00 2001 From: jinsheng Date: Wed, 9 Jun 2021 16:04:29 +0800 Subject: [PATCH] rebase master --- configs/_base_/datasets/interhand3d.py | 2 +- demo/body3d_two_stage_img_demo.py | 23 +++ mmpose/apis/inference_3d.py | 76 ++++++--- .../datasets/animal/animal_macaque_dataset.py | 1 + .../datasets/animal/animal_pose_dataset.py | 1 + .../datasets/body3d/body3d_h36m_dataset.py | 2 +- .../bottom_up/bottom_up_coco_wholebody.py | 122 ++++---------- .../datasets/top_down/topdown_h36m_dataset.py | 152 +++--------------- mmpose/deprecated.py | 2 +- tests/test_datasets/test_animal_dataset.py | 7 + tests/test_datasets/test_body3d_dataset.py | 2 + tests/test_datasets/test_bottom_up_dataset.py | 10 +- tests/test_datasets/test_face_dataset.py | 56 +++++++ tests/test_datasets/test_hand_dataset.py | 6 + 14 files changed, 208 insertions(+), 254 deletions(-) diff --git a/configs/_base_/datasets/interhand3d.py b/configs/_base_/datasets/interhand3d.py index fb87639490..ac9c2e427b 100644 --- a/configs/_base_/datasets/interhand3d.py +++ b/configs/_base_/datasets/interhand3d.py @@ -1,5 +1,5 @@ dataset_info = dict( - dataset_name='interhand2d', + dataset_name='interhand3d', paper_info=dict( author='Moon, Gyeongsik and Yu, Shoou-I and Wen, He and ' 'Shiratori, Takaaki and Lee, Kyoung Mu', diff --git a/demo/body3d_two_stage_img_demo.py b/demo/body3d_two_stage_img_demo.py index b1b0da28ec..05b39f53d5 100644 --- a/demo/body3d_two_stage_img_demo.py +++ b/demo/body3d_two_stage_img_demo.py @@ -1,5 +1,6 @@ import os import os.path as osp +import warnings from argparse import ArgumentParser import mmcv @@ -10,6 +11,7 @@ inference_top_down_pose_model, vis_3d_pose_result) from mmpose.apis.inference import init_pose_model from mmpose.core import SimpleCamera +from mmpose.datasets import DatasetInfo def _keypoint_camera_to_world(keypoints, @@ -150,6 +152,16 @@ def main(): 'model is supported for the 1st stage (2D pose detection)' dataset = pose_det_model.cfg.data['test']['type'] + dataset_info = pose_det_model.cfg.data['test'].get( + 'dataset_info', None) + if dataset_info is None: + warnings.warn( + 'Please set `dataset_info` in the config.' + 'Check https://github.com/open-mmlab/mmpose/pull/663 ' + 'for details.', DeprecationWarning) + else: + dataset_info = DatasetInfo(dataset_info) + img_keys = list(coco.imgs.keys()) for i in mmcv.track_iter_progress(range(len(img_keys))): @@ -174,6 +186,7 @@ def main(): bbox_thr=None, format='xywh', dataset=dataset, + dataset_info=dataset_info, return_heatmap=False, outputs=None) @@ -193,6 +206,14 @@ def main(): '"PoseLifter" model is supported for the 2nd stage ' \ '(2D-to-3D lifting)' dataset = pose_lift_model.cfg.data['test']['type'] + dataset_info = pose_lift_model.cfg.data['test'].get('dataset_info', None) + if dataset_info is None: + warnings.warn( + 'Please set `dataset_info` in the config.' + 'Check https://github.com/open-mmlab/mmpose/pull/663 for details.', + DeprecationWarning) + else: + dataset_info = DatasetInfo(dataset_info) camera_params = None if args.camera_param_file is not None: @@ -207,6 +228,7 @@ def main(): pose_lift_model, pose_results_2d=[pose_det_results], dataset=dataset, + dataset_info=dataset_info, with_track_id=False) image_name = pose_det_results[0]['image_name'] @@ -255,6 +277,7 @@ def main(): pose_lift_model, result=pose_lift_results_vis, img=pose_lift_results[0]['image_name'], + dataset_info=dataset_info, out_file=out_file) diff --git a/mmpose/apis/inference_3d.py b/mmpose/apis/inference_3d.py index fdb4bfb6d5..06e3d0a6bc 100644 --- a/mmpose/apis/inference_3d.py +++ b/mmpose/apis/inference_3d.py @@ -1,3 +1,5 @@ +import warnings + import numpy as np import torch from mmcv.parallel import collate, scatter @@ -71,6 +73,7 @@ def _collate_pose_sequence(pose_results, with_track_id=True): def inference_pose_lifter_model(model, pose_results_2d, dataset, + dataset_info=None, with_track_id=True): """Inference 3D pose from 2D pose sequences using a pose lifter model. @@ -100,11 +103,19 @@ def inference_pose_lifter_model(model, cfg = model.cfg test_pipeline = Compose(cfg.test_pipeline) - flip_pairs = None - if dataset == 'Body3DH36MDataset': - flip_pairs = [[1, 4], [2, 5], [3, 6], [11, 14], [12, 15], [13, 16]] + if dataset_info is not None: + flip_pairs = dataset_info.flip_pairs else: - raise NotImplementedError() + warnings.warn( + 'dataset is deprecated.' + 'Please set `dataset_info` in the config.' + 'Check https://github.com/open-mmlab/mmpose/pull/663 for details.', + DeprecationWarning) + # TODO: These will be removed in the later versions. + if dataset == 'Body3DH36MDataset': + flip_pairs = [[1, 4], [2, 5], [3, 6], [11, 14], [12, 15], [13, 16]] + else: + raise NotImplementedError() pose_sequences_2d = _collate_pose_sequence(pose_results_2d, with_track_id) @@ -184,6 +195,7 @@ def vis_3d_pose_result(model, img=None, dataset='Body3DH36MDataset', kpt_score_thr=0.3, + dataset_info=None, show=False, out_file=None): """Visualize the 3D pose estimation results. @@ -192,30 +204,42 @@ def vis_3d_pose_result(model, model (nn.Module): The loaded model. result (list[dict]) """ - if hasattr(model, 'module'): - model = model.module - palette = np.array([[255, 128, 0], [255, 153, 51], [255, 178, 102], - [230, 230, 0], [255, 153, 255], [153, 204, 255], - [255, 102, 255], [255, 51, 255], [102, 178, 255], - [51, 153, 255], [255, 153, 153], [255, 102, 102], - [255, 51, 51], [153, 255, 153], [102, 255, 102], - [51, 255, 51], [0, 255, 0], [0, 0, 255], [255, 0, 0], - [255, 255, 255]]) - - if dataset == 'Body3DH36MDataset': - skeleton = [[0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], - [7, 8], [8, 9], [9, 10], [8, 11], [11, 12], [12, 13], - [8, 14], [14, 15], [15, 16]] - - pose_kpt_color = palette[[ - 9, 0, 0, 0, 16, 16, 16, 9, 9, 9, 9, 16, 16, 16, 0, 0, 0 - ]] - pose_link_color = palette[[ - 0, 0, 0, 16, 16, 16, 9, 9, 9, 9, 16, 16, 16, 0, 0, 0 - ]] + if dataset_info is not None: + skeleton = dataset_info.skeleton + pose_kpt_color = dataset_info.pose_kpt_color + pose_link_color = dataset_info.pose_link_color else: - raise NotImplementedError + warnings.warn( + 'dataset is deprecated.' + 'Please set `dataset_info` in the config.' + 'Check https://github.com/open-mmlab/mmpose/pull/663 for details.', + DeprecationWarning) + # TODO: These will be removed in the later versions. + palette = np.array([[255, 128, 0], [255, 153, 51], [255, 178, 102], + [230, 230, 0], [255, 153, 255], [153, 204, 255], + [255, 102, 255], [255, 51, 255], [102, 178, 255], + [51, 153, 255], [255, 153, 153], [255, 102, 102], + [255, 51, 51], [153, 255, 153], [102, 255, 102], + [51, 255, 51], [0, 255, 0], [0, 0, 255], + [255, 0, 0], [255, 255, 255]]) + + if dataset == 'Body3DH36MDataset': + skeleton = [[0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], + [7, 8], [8, 9], [9, 10], [8, 11], [11, 12], [12, 13], + [8, 14], [14, 15], [15, 16]] + + pose_kpt_color = palette[[ + 9, 0, 0, 0, 16, 16, 16, 9, 9, 9, 9, 16, 16, 16, 0, 0, 0 + ]] + pose_link_color = palette[[ + 0, 0, 0, 16, 16, 16, 9, 9, 9, 9, 16, 16, 16, 0, 0, 0 + ]] + else: + raise NotImplementedError + + if hasattr(model, 'module'): + model = model.module img = model.show_result( result, diff --git a/mmpose/datasets/datasets/animal/animal_macaque_dataset.py b/mmpose/datasets/datasets/animal/animal_macaque_dataset.py index 18b2a7d950..73cb5a5865 100644 --- a/mmpose/datasets/datasets/animal/animal_macaque_dataset.py +++ b/mmpose/datasets/datasets/animal/animal_macaque_dataset.py @@ -7,6 +7,7 @@ from mmcv import Config from xtcocotools.cocoeval import COCOeval +from ....core.post_processing import oks_nms, soft_oks_nms from ...builder import DATASETS from .._base_ import Kpt2dSviewRgbImgTopDownDataset diff --git a/mmpose/datasets/datasets/animal/animal_pose_dataset.py b/mmpose/datasets/datasets/animal/animal_pose_dataset.py index a16762e340..fed60a7ef7 100644 --- a/mmpose/datasets/datasets/animal/animal_pose_dataset.py +++ b/mmpose/datasets/datasets/animal/animal_pose_dataset.py @@ -7,6 +7,7 @@ from mmcv import Config from xtcocotools.cocoeval import COCOeval +from ....core.post_processing import oks_nms, soft_oks_nms from ...builder import DATASETS from .._base_ import Kpt2dSviewRgbImgTopDownDataset diff --git a/mmpose/datasets/datasets/body3d/body3d_h36m_dataset.py b/mmpose/datasets/datasets/body3d/body3d_h36m_dataset.py index c8370f788c..19b37875eb 100644 --- a/mmpose/datasets/datasets/body3d/body3d_h36m_dataset.py +++ b/mmpose/datasets/datasets/body3d/body3d_h36m_dataset.py @@ -7,8 +7,8 @@ from mmcv import Config from mmpose.core.evaluation import keypoint_mpjpe -from ...builder import DATASETS from mmpose.datasets.datasets._base_ import Kpt3dSviewKpt2dDataset +from ...builder import DATASETS @DATASETS.register_module() diff --git a/mmpose/datasets/datasets/bottom_up/bottom_up_coco_wholebody.py b/mmpose/datasets/datasets/bottom_up/bottom_up_coco_wholebody.py index 8abccabbba..32bcdaa8eb 100644 --- a/mmpose/datasets/datasets/bottom_up/bottom_up_coco_wholebody.py +++ b/mmpose/datasets/datasets/bottom_up/bottom_up_coco_wholebody.py @@ -1,5 +1,7 @@ +import warnings + import numpy as np -from xtcocotools.coco import COCO +from mmcv import Config from xtcocotools.cocoeval import COCOeval from mmpose.datasets.builder import DATASETS @@ -31,6 +33,7 @@ class BottomUpCocoWholeBodyDataset(BottomUpCocoDataset): Default: None. data_cfg (dict): config pipeline (list[dict | callable]): A sequence of data transforms. + dataset_info (DatasetInfo): A class containing all dataset info. test_mode (bool): Store True when building test or validation dataset. Default: False. """ @@ -40,18 +43,26 @@ def __init__(self, img_prefix, data_cfg, pipeline, + dataset_info=None, test_mode=False): - super(BottomUpCocoDataset, - self).__init__(ann_file, img_prefix, data_cfg, pipeline, - test_mode) - self.ann_info['flip_pairs'] = self._make_flip_pairs() - self.ann_info['flip_index'] = self.get_flip_index_from_flip_pairs( - self.ann_info['flip_pairs']) + if dataset_info is None: + warnings.warn( + 'dataset_info is missing. ' + 'Check https://github.com/open-mmlab/mmpose/pull/663 ' + 'for details.', DeprecationWarning) + cfg = Config.fromfile('configs/_base_/datasets/coco_wholebody.py') + dataset_info = cfg._cfg_dict['dataset_info'] + + super(BottomUpCocoDataset, self).__init__( + ann_file, + img_prefix, + data_cfg, + pipeline, + dataset_info=dataset_info, + test_mode=test_mode) self.ann_info['use_different_joint_weights'] = False - self.ann_info['joint_weights'] = \ - np.ones((self.ann_info['num_joints'], 1), dtype=np.float32) self.body_num = 17 self.foot_num = 6 @@ -59,84 +70,8 @@ def __init__(self, self.left_hand_num = 21 self.right_hand_num = 21 - # 'https://github.com/jin-s13/COCO-WholeBody/blob/master/' - # 'evaluation/myeval_wholebody.py#L170' - self.sigmas_body = [ - 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, - 0.062, 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 - ] - self.sigmas_foot = [0.068, 0.066, 0.066, 0.092, 0.094, 0.094] - self.sigmas_face = [ - 0.042, 0.043, 0.044, 0.043, 0.040, 0.035, 0.031, 0.025, 0.020, - 0.023, 0.029, 0.032, 0.037, 0.038, 0.043, 0.041, 0.045, 0.013, - 0.012, 0.011, 0.011, 0.012, 0.012, 0.011, 0.011, 0.013, 0.015, - 0.009, 0.007, 0.007, 0.007, 0.012, 0.009, 0.008, 0.016, 0.010, - 0.017, 0.011, 0.009, 0.011, 0.009, 0.007, 0.013, 0.008, 0.011, - 0.012, 0.010, 0.034, 0.008, 0.008, 0.009, 0.008, 0.008, 0.007, - 0.010, 0.008, 0.009, 0.009, 0.009, 0.007, 0.007, 0.008, 0.011, - 0.008, 0.008, 0.008, 0.01, 0.008 - ] - self.sigmas_lefthand = [ - 0.029, 0.022, 0.035, 0.037, 0.047, 0.026, 0.025, 0.024, 0.035, - 0.018, 0.024, 0.022, 0.026, 0.017, 0.021, 0.021, 0.032, 0.02, - 0.019, 0.022, 0.031 - ] - self.sigmas_righthand = [ - 0.029, 0.022, 0.035, 0.037, 0.047, 0.026, 0.025, 0.024, 0.035, - 0.018, 0.024, 0.022, 0.026, 0.017, 0.021, 0.021, 0.032, 0.02, - 0.019, 0.022, 0.031 - ] - - self.sigmas_wholebody = ( - self.sigmas_body + self.sigmas_foot + self.sigmas_face + - self.sigmas_lefthand + self.sigmas_righthand) - - self.sigmas = np.array(self.sigmas_wholebody) - - self.coco = COCO(ann_file) - - cats = [ - cat['name'] for cat in self.coco.loadCats(self.coco.getCatIds()) - ] - self.classes = ['__background__'] + cats - self.num_classes = len(self.classes) - self._class_to_ind = dict(zip(self.classes, range(self.num_classes))) - self._class_to_coco_ind = dict(zip(cats, self.coco.getCatIds())) - self._coco_ind_to_class_ind = dict( - (self._class_to_coco_ind[cls], self._class_to_ind[cls]) - for cls in self.classes[1:]) - self.img_ids = self.coco.getImgIds() - if not test_mode: - self.img_ids = [ - img_id for img_id in self.img_ids - if len(self.coco.getAnnIds(imgIds=img_id, iscrowd=None)) > 0 - ] - self.num_images = len(self.img_ids) - self.id2name, self.name2id = self._get_mapping_id_name(self.coco.imgs) - self.dataset_name = 'coco_wholebody' - print(f'=> num_images: {self.num_images}') - @staticmethod - def _make_flip_pairs(): - body = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], - [15, 16]] - foot = [[17, 20], [18, 21], [19, 22]] - - face = [[23, 39], [24, 38], [25, 37], [26, 36], [27, 35], [28, 34], - [29, 33], [30, 32], [40, 49], [41, 48], [42, 47], [43, 46], - [44, 45], [54, 58], [55, 57], [59, 68], [60, 67], [61, 66], - [62, 65], [63, 70], [64, 69], [71, 77], [72, 76], [73, 75], - [78, 82], [79, 81], [83, 87], [84, 86], [88, 90]] - - hand = [[91, 112], [92, 113], [93, 114], [94, 115], [95, 116], - [96, 117], [97, 118], [98, 119], [99, 120], [100, 121], - [101, 122], [102, 123], [103, 124], [104, 125], [105, 126], - [106, 127], [107, 128], [108, 129], [109, 130], [110, 131], - [111, 132]] - - return body + foot + face + hand - def _get_joints(self, anno): """Get joints for all people in an image.""" num_people = len(anno) @@ -220,11 +155,16 @@ def _do_python_keypoint_eval(self, res_file): """Keypoint evaluation using COCOAPI.""" coco_det = self.coco.loadRes(res_file) + cuts = np.cumsum([ + 0, self.body_num, self.foot_num, self.face_num, self.left_hand_num, + self.right_hand_num + ]) + coco_eval = COCOeval( self.coco, coco_det, 'keypoints_body', - np.array(self.sigmas_body), + self.sigmas[cuts[0]:cuts[1]], use_area=True) coco_eval.params.useSegm = None coco_eval.evaluate() @@ -235,7 +175,7 @@ def _do_python_keypoint_eval(self, res_file): self.coco, coco_det, 'keypoints_foot', - np.array(self.sigmas_foot), + self.sigmas[cuts[1]:cuts[2]], use_area=True) coco_eval.params.useSegm = None coco_eval.evaluate() @@ -246,7 +186,7 @@ def _do_python_keypoint_eval(self, res_file): self.coco, coco_det, 'keypoints_face', - np.array(self.sigmas_face), + self.sigmas[cuts[2]:cuts[3]], use_area=True) coco_eval.params.useSegm = None coco_eval.evaluate() @@ -257,7 +197,7 @@ def _do_python_keypoint_eval(self, res_file): self.coco, coco_det, 'keypoints_lefthand', - np.array(self.sigmas_lefthand), + self.sigmas[cuts[3]:cuts[4]], use_area=True) coco_eval.params.useSegm = None coco_eval.evaluate() @@ -268,7 +208,7 @@ def _do_python_keypoint_eval(self, res_file): self.coco, coco_det, 'keypoints_righthand', - np.array(self.sigmas_righthand), + self.sigmas[cuts[4]:cuts[5]], use_area=True) coco_eval.params.useSegm = None coco_eval.evaluate() @@ -279,7 +219,7 @@ def _do_python_keypoint_eval(self, res_file): self.coco, coco_det, 'keypoints_wholebody', - np.array(self.sigmas_wholebody), + self.sigmas, use_area=True) coco_eval.params.useSegm = None coco_eval.evaluate() diff --git a/mmpose/datasets/datasets/top_down/topdown_h36m_dataset.py b/mmpose/datasets/datasets/top_down/topdown_h36m_dataset.py index 38f1d2a6a1..2968186f87 100644 --- a/mmpose/datasets/datasets/top_down/topdown_h36m_dataset.py +++ b/mmpose/datasets/datasets/top_down/topdown_h36m_dataset.py @@ -1,18 +1,17 @@ import os +import warnings from collections import OrderedDict import json_tricks as json import numpy as np -from xtcocotools.coco import COCO +from mmcv import Config -from mmpose.core.evaluation.top_down_eval import (keypoint_epe, - keypoint_pck_accuracy) from ...builder import DATASETS -from .topdown_base_dataset import TopDownBaseDataset +from .._base_ import Kpt2dSviewRgbImgTopDownDataset @DATASETS.register_module() -class TopDownH36MDataset(TopDownBaseDataset): +class TopDownH36MDataset(Kpt2dSviewRgbImgTopDownDataset): """Human3.6M dataset for top-down 2D pose estimation. `Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human @@ -45,6 +44,7 @@ class TopDownH36MDataset(TopDownBaseDataset): Default: None. data_cfg (dict): config pipeline (list[dict | callable]): A sequence of data transforms. + dataset_info (DatasetInfo): A class containing all dataset info. test_mode (bool): Store True when building test or validation dataset. Default: False. """ @@ -54,83 +54,31 @@ def __init__(self, img_prefix, data_cfg, pipeline, + dataset_info=None, test_mode=False): - super(TopDownH36MDataset, self).__init__( - ann_file, img_prefix, data_cfg, pipeline, test_mode=test_mode) - assert self.ann_info['num_joints'] == 17 + if dataset_info is None: + warnings.warn( + 'dataset_info is missing. ' + 'Check https://github.com/open-mmlab/mmpose/pull/663 ' + 'for details.', DeprecationWarning) + cfg = Config.fromfile('configs/_base_/datasets/h36m.py') + dataset_info = cfg._cfg_dict['dataset_info'] + + super().__init__( + ann_file, + img_prefix, + data_cfg, + pipeline, + dataset_info=dataset_info, + test_mode=test_mode) - self.ann_info['flip_pairs'] = [[1, 4], [2, 5], [3, 6], [11, 14], - [12, 15], [13, 16]] - self.ann_info['upper_body_ids'] = (0, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16) - self.ann_info['lower_body_ids'] = (1, 2, 3, 4, 5, 6) self.ann_info['use_different_joint_weights'] = False - - self.ann_info['joint_weights'] = np.ones( - (self.ann_info['num_joints'], 1), dtype=np.float32) - - self.coco = COCO(ann_file) - self.img_ids = self.coco.getImgIds() - self.num_images = len(self.img_ids) - self.id2name, self.name2id = self._get_mapping_id_name(self.coco.imgs) - - self.dataset_name = 'h36m' self.db = self._get_db() print(f'=> num_images: {self.num_images}') print(f'=> load {len(self.db)} samples') - @staticmethod - def _get_mapping_id_name(imgs): - """ - Args: - imgs (dict): dict of image info. - - Returns: - tuple: Image name & id mapping dicts. - - - id2name (dict): Mapping image id to name. - - name2id (dict): Mapping image name to id. - """ - id2name = {} - name2id = {} - for image_id, image in imgs.items(): - file_name = image['file_name'] - id2name[image_id] = file_name - name2id[file_name] = image_id - - return id2name, name2id - - def _xywh2cs(self, x, y, w, h, padding=1.): - """This encodes bbox(x,y,w,h) into (center, scale) - - Args: - x, y, w, h - - Returns: - center (np.ndarray[float32](2,)): center of the bbox (x, y). - scale (np.ndarray[float32](2,)): scale of the bbox w & h. - """ - aspect_ratio = self.ann_info['image_size'][0] / self.ann_info[ - 'image_size'][1] - center = np.array([x + w * 0.5, y + h * 0.5], dtype=np.float32) - - if (not self.test_mode) and np.random.rand() < 0.3: - center += 0.4 * (np.random.rand(2) - 0.5) * [w, h] - - if w > aspect_ratio * h: - h = w * 1.0 / aspect_ratio - elif w < aspect_ratio * h: - w = h * aspect_ratio - - # pixel std is 200.0 - scale = np.array([w / 200.0, h / 200.0], dtype=np.float32) - # padding to include proper amount of context - scale = scale * padding - - return center, scale - def _get_db(self): """Load dataset.""" gt_db = [] @@ -236,64 +184,6 @@ def evaluate(self, outputs, res_folder, metric, **kwargs): return name_value - def _report_metric(self, res_file, metrics, pck_thr=0.05): - """Keypoint evaluation. - - Args: - res_file (str): Json file stored prediction results. - metrics (str | list[str]): Metric to be performed. - Options: 'PCK', 'PCKh', 'AUC', 'EPE'. - pck_thr (float): PCK threshold, default as 0.05. - auc_nor (float): AUC normalization factor, default as 30 pixel. - - Returns: - List: Evaluation results for evaluation metric. - """ - info_str = [] - - with open(res_file, 'r') as fin: - preds = json.load(fin) - assert len(preds) == len(self.db) - - outputs = [] - gts = [] - masks = [] - threshold_bbox = [] - - for pred, item in zip(preds, self.db): - outputs.append(np.array(pred['keypoints'])[:, :-1]) - gts.append(np.array(item['joints_3d'])[:, :-1]) - masks.append((np.array(item['joints_3d_visible'])[:, 0]) > 0) - if 'PCK' in metrics: - bbox = np.array(item['bbox']) - bbox_thr = np.max(bbox[2:]) - threshold_bbox.append(np.array([bbox_thr, bbox_thr])) - - outputs = np.array(outputs) - gts = np.array(gts) - masks = np.array(masks) - threshold_bbox = np.array(threshold_bbox) - - if 'PCK' in metrics: - _, pck, _ = keypoint_pck_accuracy(outputs, gts, masks, pck_thr, - threshold_bbox) - info_str.append(('PCK', pck)) - - if 'EPE' in metrics: - info_str.append(('EPE', keypoint_epe(outputs, gts, masks))) - - return info_str - - def _sort_and_unique_bboxes(self, kpts, key='bbox_id'): - """sort kpts and remove the repeated ones.""" - kpts = sorted(kpts, key=lambda x: x[key]) - num = len(kpts) - for i in range(num - 1, 0, -1): - if kpts[i][key] == kpts[i - 1][key]: - del kpts[i] - - return kpts - @staticmethod def _write_keypoint_results(keypoints, res_file): """Write results into a json file.""" diff --git a/mmpose/deprecated.py b/mmpose/deprecated.py index 50e6a0c111..28a0dbc641 100644 --- a/mmpose/deprecated.py +++ b/mmpose/deprecated.py @@ -1,6 +1,7 @@ import warnings from .datasets.builder import DATASETS +from .datasets.datasets._base_ import Kpt2dSviewRgbImgTopDownDataset from .models.builder import HEADS, POSENETS from .models.detectors import AssociativeEmbedding from .models.heads import (AEHigherResolutionHead, AESimpleHead, @@ -8,7 +9,6 @@ TopdownHeatmapMSMUHead, TopdownHeatmapMultiStageHead, TopdownHeatmapSimpleHead) -from .datasets.datasets._base_ import Kpt2dSviewRgbImgTopDownDataset @DATASETS.register_module() diff --git a/tests/test_datasets/test_animal_dataset.py b/tests/test_datasets/test_animal_dataset.py index 944b8ab3af..d84edaafbb 100644 --- a/tests/test_datasets/test_animal_dataset.py +++ b/tests/test_datasets/test_animal_dataset.py @@ -97,6 +97,7 @@ def test_animal_horse10_dataset(): pipeline=[], test_mode=False) + assert custom_dataset.dataset_name == 'horse10' assert custom_dataset.test_mode is False assert custom_dataset.num_images == 3 _ = custom_dataset[0] @@ -152,6 +153,7 @@ def test_animal_fly_dataset(): pipeline=[], test_mode=False) + assert custom_dataset.dataset_name == 'fly' assert custom_dataset.test_mode is False assert custom_dataset.num_images == 2 _ = custom_dataset[0] @@ -208,6 +210,7 @@ def test_animal_locust_dataset(): pipeline=[], test_mode=False) + assert custom_dataset.dataset_name == 'locust' assert custom_dataset.test_mode is False assert custom_dataset.num_images == 2 _ = custom_dataset[0] @@ -257,6 +260,7 @@ def test_animal_zebra_dataset(): pipeline=[], test_mode=False) + assert custom_dataset.dataset_name == 'zebra' assert custom_dataset.test_mode is False assert custom_dataset.num_images == 2 _ = custom_dataset[0] @@ -314,6 +318,7 @@ def test_animal_ATRW_dataset(): pipeline=[], test_mode=False) + assert custom_dataset.dataset_name == 'atrw' assert custom_dataset.test_mode is False assert custom_dataset.num_images == 2 _ = custom_dataset[0] @@ -373,6 +378,7 @@ def test_animal_Macaque_dataset(): pipeline=[], test_mode=False) + assert custom_dataset.dataset_name == 'macaque' assert custom_dataset.test_mode is False assert custom_dataset.num_images == 2 _ = custom_dataset[0] @@ -436,6 +442,7 @@ def test_animalpose_dataset(): pipeline=[], test_mode=False) + assert custom_dataset.dataset_name == 'animalpose' assert custom_dataset.test_mode is False assert custom_dataset.num_images == 2 _ = custom_dataset[0] diff --git a/tests/test_datasets/test_body3d_dataset.py b/tests/test_datasets/test_body3d_dataset.py index b977531cda..61fd43d0af 100644 --- a/tests/test_datasets/test_body3d_dataset.py +++ b/tests/test_datasets/test_body3d_dataset.py @@ -36,6 +36,7 @@ def test_body3d_h36m_dataset(): pipeline=[], test_mode=True) + assert custom_dataset.dataset_name == 'h36m' assert custom_dataset.test_mode is True _ = custom_dataset[0] @@ -151,6 +152,7 @@ def test_body3d_semi_supervision_dataset(): dataset_class = DATASETS.get(dataset) custom_dataset = dataset_class(labeled_dataset, unlabeled_dataset) item = custom_dataset[0] + assert custom_dataset.labeled_dataset.dataset_name == 'h36m' assert 'unlabeled_input' in item.keys() unlabeled_dataset = build_dataset(unlabeled_dataset) diff --git a/tests/test_datasets/test_bottom_up_dataset.py b/tests/test_datasets/test_bottom_up_dataset.py index c1275da15c..cc72c6a4dc 100644 --- a/tests/test_datasets/test_bottom_up_dataset.py +++ b/tests/test_datasets/test_bottom_up_dataset.py @@ -84,9 +84,9 @@ def test_bottom_up_COCO_dataset(): pipeline=[], test_mode=True) + assert custom_dataset.dataset_name == 'coco' assert custom_dataset.num_images == 4 _ = custom_dataset[0] - assert custom_dataset.dataset_name == 'coco' outputs = convert_coco_to_output(custom_dataset.coco) with tempfile.TemporaryDirectory() as tmpdir: @@ -135,11 +135,12 @@ def test_bottom_up_CrowdPose_dataset(): pipeline=[], test_mode=True) + assert custom_dataset.dataset_name == 'crowdpose' + image_id = 103319 assert image_id in custom_dataset.img_ids assert len(custom_dataset.img_ids) == 2 _ = custom_dataset[0] - assert custom_dataset.dataset_name == 'crowdpose' outputs = convert_coco_to_output(custom_dataset.coco) with tempfile.TemporaryDirectory() as tmpdir: @@ -190,11 +191,12 @@ def test_bottom_up_MHP_dataset(): pipeline=[], test_mode=True) + assert custom_dataset.dataset_name == 'mhp' + image_id = 2889 assert image_id in custom_dataset.img_ids assert len(custom_dataset.img_ids) == 2 _ = custom_dataset[0] - assert custom_dataset.dataset_name == 'mhp' outputs = convert_coco_to_output(custom_dataset.coco) with tempfile.TemporaryDirectory() as tmpdir: @@ -244,6 +246,8 @@ def test_bottom_up_AIC_dataset(): pipeline=[], test_mode=True) + assert custom_dataset.dataset_name == 'aic' + image_id = 1 assert image_id in custom_dataset.img_ids assert len(custom_dataset.img_ids) == 3 diff --git a/tests/test_datasets/test_face_dataset.py b/tests/test_datasets/test_face_dataset.py index 12a31072ca..423e98b7d9 100644 --- a/tests/test_datasets/test_face_dataset.py +++ b/tests/test_datasets/test_face_dataset.py @@ -95,6 +95,7 @@ def test_face_300W_dataset(): pipeline=[], test_mode=False) + assert custom_dataset.dataset_name == '300w' assert custom_dataset.test_mode is False assert custom_dataset.num_images == 2 _ = custom_dataset[0] @@ -146,6 +147,7 @@ def test_face_AFLW_dataset(): pipeline=[], test_mode=False) + assert custom_dataset.dataset_name == 'aflw' assert custom_dataset.test_mode is False assert custom_dataset.num_images == 2 _ = custom_dataset[0] @@ -197,6 +199,60 @@ def test_face_WFLW_dataset(): pipeline=[], test_mode=False) + assert custom_dataset.dataset_name == 'wflw' + assert custom_dataset.test_mode is False + assert custom_dataset.num_images == 2 + _ = custom_dataset[0] + + outputs = convert_db_to_output(custom_dataset.db) + + with tempfile.TemporaryDirectory() as tmpdir: + infos = custom_dataset.evaluate(outputs, tmpdir, ['NME']) + assert_almost_equal(infos['NME'], 0.0) + + with pytest.raises(KeyError): + _ = custom_dataset.evaluate(outputs, tmpdir, 'mAP') + + +def test_face_COFW_dataset(): + dataset = 'FaceCOFWDataset' + # test Face COFW datasets + dataset_class = DATASETS.get(dataset) + dataset_class.load_annotations = MagicMock() + dataset_class.coco = MagicMock() + + channel_cfg = dict( + num_output_channels=29, + dataset_joints=29, + dataset_channel=[ + list(range(29)), + ], + inference_channel=list(range(29))) + + data_cfg = dict( + image_size=[256, 256], + heatmap_size=[64, 64], + num_output_channels=channel_cfg['num_output_channels'], + num_joints=channel_cfg['dataset_joints'], + dataset_channel=channel_cfg['dataset_channel'], + inference_channel=channel_cfg['inference_channel']) + # Test + data_cfg_copy = copy.deepcopy(data_cfg) + _ = dataset_class( + ann_file='tests/data/cofw/test_cofw.json', + img_prefix='tests/data/cofw/', + data_cfg=data_cfg_copy, + pipeline=[], + test_mode=True) + + custom_dataset = dataset_class( + ann_file='tests/data/cofw/test_cofw.json', + img_prefix='tests/data/cofw/', + data_cfg=data_cfg_copy, + pipeline=[], + test_mode=False) + + assert custom_dataset.dataset_name == 'cofw' assert custom_dataset.test_mode is False assert custom_dataset.num_images == 2 _ = custom_dataset[0] diff --git a/tests/test_datasets/test_hand_dataset.py b/tests/test_datasets/test_hand_dataset.py index 6249a767f7..a79de152b6 100644 --- a/tests/test_datasets/test_hand_dataset.py +++ b/tests/test_datasets/test_hand_dataset.py @@ -93,6 +93,7 @@ def test_top_down_OneHand10K_dataset(): pipeline=[], test_mode=False) + assert custom_dataset.dataset_name == 'onehand10k' assert custom_dataset.test_mode is False assert custom_dataset.num_images == 4 _ = custom_dataset[0] @@ -149,6 +150,7 @@ def test_top_down_FreiHand_dataset(): pipeline=[], test_mode=False) + assert custom_dataset.dataset_name == 'freihand' assert custom_dataset.test_mode is False assert custom_dataset.num_images == 8 _ = custom_dataset[0] @@ -205,6 +207,7 @@ def test_top_down_RHD_dataset(): pipeline=[], test_mode=False) + assert custom_dataset.dataset_name == 'rhd2d' assert custom_dataset.test_mode is False assert custom_dataset.num_images == 3 _ = custom_dataset[0] @@ -261,6 +264,7 @@ def test_top_down_Panoptic_dataset(): pipeline=[], test_mode=False) + assert custom_dataset.dataset_name == 'panoptic_hand2d' assert custom_dataset.test_mode is False assert custom_dataset.num_images == 4 _ = custom_dataset[0] @@ -322,6 +326,7 @@ def test_top_down_InterHand2D_dataset(): pipeline=[], test_mode=False) + assert custom_dataset.dataset_name == 'interhand2d' assert custom_dataset.test_mode is False assert custom_dataset.num_images == 4 assert len(custom_dataset.db) == 6 @@ -390,6 +395,7 @@ def test_top_down_InterHand3D_dataset(): pipeline=[], test_mode=False) + assert custom_dataset.dataset_name == 'interhand3d' assert custom_dataset.test_mode is False assert custom_dataset.num_images == 4 assert len(custom_dataset.db) == 4