-
Notifications
You must be signed in to change notification settings - Fork 14
/
test.py
95 lines (74 loc) · 3.63 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
r""" VAT testing code """
import argparse
import os
import torch.nn.functional as F
import torch.nn as nn
import torch
from config.config import get_cfg_defaults
from model.vat import VAT
from common.logger import Logger, AverageMeter
from common.vis import Visualizer
from common.evaluation import Evaluator
from common import utils
from data.dataset import FSSDataset
def test(model, dataloader, nshot):
# Freeze randomness during testing for reproducibility
utils.fix_randseed(0)
average_meter = AverageMeter(dataloader.dataset)
for idx, batch in enumerate(dataloader):
# 1. VAT forward pass
batch = utils.to_cuda(batch)
pred_mask = model.module.predict_mask_nshot(batch, nshot=nshot)
assert pred_mask.size() == batch['query_mask'].size()
# 2. Evaluate prediction
area_inter, area_union = Evaluator.classify_prediction(pred_mask.clone(), batch)
average_meter.update(area_inter, area_union, batch['class_id'], loss=None)
average_meter.write_process(idx, len(dataloader), epoch=-1, write_batch_idx=1)
# Visualize predictions
if Visualizer.visualize:
Visualizer.visualize_prediction_batch(batch['support_imgs'], batch['support_masks'],
batch['query_img'], batch['query_mask'],
pred_mask, batch['class_id'], idx,
area_inter[1].float() / area_union[1].float())
# Write evaluation results
average_meter.write_result('Test', 0)
miou, fb_iou = average_meter.compute_iou()
return miou, fb_iou
if __name__ == '__main__':
# Arguments parsing
parser = argparse.ArgumentParser(description='VAT Pytorch Implementation')
parser.add_argument('--datapath', type=str, default='../Datasets_VAT')
parser.add_argument('--logpath', type=str, default='')
parser.add_argument('--bsz', type=int, default=1)
parser.add_argument('--nworker', type=int, default=0)
parser.add_argument('--load', type=str, default='')
parser.add_argument('--nshot', type=int, default=1)
parser.add_argument('--visualize', action='store_true')
args = parser.parse_args()
cfg = get_cfg_defaults()
cfg.merge_from_file(os.path.join(args.load, 'config.yaml'))
cfg.freeze()
Logger.initialize(args, training=False, cfg=cfg, benchmark=cfg.TRAIN.BENCHMARK, logpath=args.logpath)
# Model initialization
model = VAT(cfg, False)
model.eval()
Logger.log_params(model)
# Device setup
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
Logger.info('# available GPUs: %d' % torch.cuda.device_count())
model = nn.DataParallel(model)
model.to(device)
# Load trained model
if args.load == '': raise Exception('Pretrained model not specified.')
model.load_state_dict(torch.load(os.path.join(args.load, 'best_model.pt'))['state_dict'])
# Helper classes (for testing) initialization
Evaluator.initialize()
Visualizer.initialize(args.visualize)
# Dataset initialization
FSSDataset.initialize(benchmark=cfg.TRAIN.BENCHMARK, img_size=cfg.TRAIN.IMG_SIZE, datapath=args.datapath, use_original_imgsize=False)
dataloader_test = FSSDataset.build_dataloader(cfg.TRAIN.BENCHMARK, args.bsz, args.nworker, cfg.TRAIN.FOLD, 'test', args.nshot)
# Test VAT
with torch.no_grad():
test_miou, test_fb_iou = test(model, dataloader_test, args.nshot)
Logger.info('Fold %d mIoU: %5.2f \t FB-IoU: %5.2f' % (cfg.TRAIN.FOLD, test_miou.item(), test_fb_iou.item()))
Logger.info('==================== Finished Testing ====================')