-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathmethod.py
147 lines (137 loc) · 8.62 KB
/
method.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
"""
This is the main module for training and testing the Im2RBTE method
"""
import os
import shutil
import sys
import argparse
import warnings
import utils as ut
import network as nw
import cv2 as cv
import torchvision as tv
import torch
#import pdb
def train_test():
# Read the terminal variables
cv.setNumThreads(0)
parser = argparse.ArgumentParser(description='Training the Model')
parser.add_argument('--run', default='', type=str, metavar='TrRun', help='Input training file')
parser.add_argument('--gpu', type=int, metavar='gpu', help='gpu id')
args = parser.parse_args()
if not sys.warnoptions:
warnings.simplefilter('ignore')
run = ut.read_yaml('./runs/' + vars(args)['run'])
if vars(args)['gpu'] is not None:
run['train']['gpu'] = vars(args)['gpu']
test_results = []
for idx in range(1, run['no_trainings'] + 1):
ut.set_all_seeds(idx)
direct = run['io_var']['save_dir'] + run['io_var']['save_folder_name'] + '/' \
+ run['io_var']['save_subfolder_name'] + '_' + str(idx)
if not os.path.exists(direct):
os.makedirs(direct)
nms_model = cv.ximgproc.createStructuredEdgeDetection(run['io_var']['nms_model'])
# Save Training Info
if not os.path.exists(direct+'/Data'):
os.makedirs(direct+'/Data')
if not os.path.exists(direct + '/Code'):
os.makedirs(direct + '/Code')
if not os.path.isfile(direct + '/Code/' + os.path.basename(__file__)):
shutil.copy(__file__, direct + '/Code/' + os.path.basename(__file__))
if not os.path.isfile(direct + '/Code/augmentations.py'):
shutil.copy('./augmentations.py', direct + '/Code/augmentations.py')
if not os.path.isfile(direct + '/Code/utils.py'):
shutil.copy('./utils.py', direct + '/Code/utils.py')
if not os.path.isfile(direct + '/Code/network.py'):
shutil.copy('./network.py', direct + '/Code/network.py')
if not os.path.isfile(direct + '/Hyperparameters.yaml'):
ut.save_yaml({'run': 'python ' + ' '.join(sys.argv), 'io_var': run['io_var'],
'train': run['train'],
'extra_options': run['extra_options'],
'train_transforms': run['train_transforms'],
'val_transforms': run['val_transforms']},
direct + '/Hyperparameters.yaml')
# Datasets, Network, Optimizer, Scheduler and Loss
train_dataset_list, train_list_class = ut.dirlist(run['io_var']['train_dataset_list'],
'training_data', direct+'/Data')
val_dataset_list, val_list_class = ut.dirlist(run['io_var']['val_dataset_list'],
'validation_data', direct+'/Data')
train_tr = ut.create_transform(input_list=run['train_transforms'], nms_model=nms_model)
val_tr = ut.create_transform(input_list=run['val_transforms'], nms_model=nms_model)
train_dataset = ut.ImageFolderSubsetPath(image_list=train_dataset_list,
class_list=train_list_class,
transform=tv.transforms.Compose(train_tr))
val_dataset = ut.ImageFolderSubsetPath(image_list=val_dataset_list,
class_list=val_list_class,
transform=tv.transforms.Compose(val_tr))
train_data_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=run['train']['batch'],
num_workers=run['train']['workers'],
shuffle=True)
val_data_loader = torch.utils.data.DataLoader(dataset=val_dataset,
batch_size=run['train']['batch'],
num_workers=run['train']['workers'],
shuffle=False)
#pdb.set_trace()
net = nw.initizalize_network(architecture=run['train']['architecture'],
no_classes=len(train_dataset.classes),
load_model=run['io_var']['load_model'],
pretrained=run['train']['pretrained'])
optimizer = torch.optim.Adam(net.parameters(), lr=run['train']['learning_rate'],
betas=(0.9, 0.999),
weight_decay=run['train']['weight_decay'])
scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=run['train']['schedule'],
gamma=0.1)
criterion = torch.nn.CrossEntropyLoss()
# Learning Rate Search
if run['extra_options']['lr_mode'] and idx == 1:
ut.lr_tool(net=net, criterion=criterion, original_optimizer=optimizer,
train_data_loader=train_data_loader, gpu_id=run['train']['gpu'],
direct=direct, start_lr=0.0000001, end_lr=10, step_size=50, gamma=10)
# Training
last_model_name = ut.training(net=net, criterion=criterion, optimizer=optimizer,
scheduler=scheduler, train_data_loader=train_data_loader,
val_data_loader=val_data_loader,
total_epochs=run['train']['epochs'],
gpu_id=run['train']['gpu'], direct=direct)
# Testing
testing_name = []
for testing_data in run['io_var']['test_dataset_list']:
for testing_transforms in run['test_transforms']:
print('Testing ' + testing_data['name'] + ' ' + testing_transforms['name'])
test_dataset_list, test_list_class = ut.dirlist(testing_data['directory'],
'test_data_'+testing_data['name'],
direct+'/Data')
test_tr = ut.create_transform(input_list=testing_transforms['transform'],
nms_model=nms_model)
test_dataset = ut.ImageFolderSubsetPath(image_list=test_dataset_list,
class_list=test_list_class,
transform=tv.transforms.Compose(test_tr))
test_data_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=run['train']['batch'],
num_workers=run['train']['workers'],
shuffle=False)
performance_dict, _, _ = ut.testing(net=net, test_data_loader=test_data_loader,
direct=direct, model_name=last_model_name,
gpu_id=run['train']['gpu'],
train_class_list=train_dataset.classes,
dataset_list=testing_data['directory'],
transform=testing_transforms['transform'],
name=testing_data['name']+'_'\
+testing_transforms['name'])
if performance_dict['name'] not in testing_name:
testing_name.append(performance_dict['name'])
test_results.append(performance_dict)
del net, optimizer, scheduler, criterion
with torch.cuda.device('cuda:' + str(run['train']['gpu'])):
torch.cuda.empty_cache()
if run['no_trainings'] > 1:
ut.save_average_testing(testing_name=testing_name, test_results=test_results,
save_dir=run['io_var']['save_dir']\
+run['io_var']['save_folder_name']+'/'\
+run['io_var']['save_subfolder_name']\
+'_'+str(run['no_trainings']))
if __name__ == '__main__':
train_test()