-
Notifications
You must be signed in to change notification settings - Fork 8
/
deit_unept_pcontext.py
117 lines (109 loc) · 3.48 KB
/
deit_unept_pcontext.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
# dataset settings
dataset_type = 'PascalContextDataset'
data_root = '/home/ubuntu/dataset/PASCAL_Context/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (520, 520)
crop_size = (480, 480)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg', 'distance_map', 'angle_map']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='train/image',
ann_dir='train/label',
dt_dir='train/dt_offset',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='val/image',
ann_dir='val/label',
dt_dir='val/dt_offset',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='val/image',
ann_dir='val/label',
dt_dir='val/dt_offset',
pipeline=test_pipeline))
# model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='UN_EPT',
feat_dim=256,
k=16,
L=3,
dropout=0.1,
heads=8,
hidden_dim=2048,
depth=2,
pretrained='deit_base_distilled_patch16_384-d0272ac0.pth',
backbone_cfg=dict(
type='DeiT',
img_size=480,
patch_size=16,
embed_dim=768,
bb_depth=12,
num_heads=12,
mlp_ratio=4),
loss_decode=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0))
# model training and testing settings
train_cfg = dict()
test_cfg = dict(mode='slide', num_classes=60, stride=(160,160), crop_size=(480, 480), num_queries=3600)
# optimizer
optimizer = dict(type='AdamW', lr=0.0001, weight_decay=0.0001, betas=(0.9, 0.999), eps=1e-8,
paramwise_cfg = dict(custom_keys={'backbone': dict(lr_mult=0.1)}))
optimizer_config = dict()
# learning policy
lr_config = dict(policy='step', step=126000, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=160000)
checkpoint_config = dict(by_epoch=False, interval=10000)
evaluation = dict(interval=10000, metric='mIoU')
# yapf:disable
log_config = dict(
interval=200,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
cudnn_benchmark = True