-
Notifications
You must be signed in to change notification settings - Fork 9.5k
/
mask2former_r50_8xb2-lsj-50e_coco.py
100 lines (94 loc) · 2.9 KB
/
mask2former_r50_8xb2-lsj-50e_coco.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py']
num_things_classes = 80
num_stuff_classes = 0
num_classes = num_things_classes + num_stuff_classes
image_size = (1024, 1024)
batch_augments = [
dict(
type='BatchFixedSizePad',
size=image_size,
img_pad_value=0,
pad_mask=True,
mask_pad_value=0,
pad_seg=False)
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
pad_mask=True,
mask_pad_value=0,
pad_seg=False,
batch_augments=batch_augments)
model = dict(
data_preprocessor=data_preprocessor,
panoptic_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
loss_cls=dict(class_weight=[1.0] * num_classes + [0.1])),
panoptic_fusion_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes),
test_cfg=dict(panoptic_on=False))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
# large scale jittering
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
resize_type='Resize',
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=image_size,
crop_type='absolute',
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-5, 1e-5), by_mask=True),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
_delete_=True,
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False,
backend_args={{_base_.backend_args}})
test_evaluator = val_evaluator