-
Notifications
You must be signed in to change notification settings - Fork 45
/
eval_cls_conv.py
114 lines (97 loc) · 4.28 KB
/
eval_cls_conv.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import argparse
import os
import sys
import numpy as np
import torch
import torch.nn.parallel
import torch.utils.data
import torch.nn.functional as F
from data_utils.ModelNetDataLoader import ModelNetDataLoader
import datetime
import logging
from pathlib import Path
from tqdm import tqdm
from utils.utils import test, save_checkpoint
from model.pointconv import PointConvDensityClsSsg as PointConvClsSsg
def parse_args():
'''PARAMETERS'''
parser = argparse.ArgumentParser('PointConv')
parser.add_argument('--batchsize', type=int, default=32, help='batch size')
parser.add_argument('--gpu', type=str, default='0', help='specify gpu device')
parser.add_argument('--checkpoint', type=str, default=None, help='checkpoint')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]')
parser.add_argument('--num_workers', type=int, default=16, help='Worker Number [default: 16]')
parser.add_argument('--model_name', default='pointconv', help='model name')
parser.add_argument('--normal', action='store_true', default=False, help='Whether to use normal information [default: False]')
return parser.parse_args()
def main(args):
'''HYPER PARAMETER'''
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
'''CREATE DIR'''
experiment_dir = Path('./eval_experiment/')
experiment_dir.mkdir(exist_ok=True)
file_dir = Path(str(experiment_dir) + '/%s_ModelNet40-'%args.model_name + str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')))
file_dir.mkdir(exist_ok=True)
checkpoints_dir = file_dir.joinpath('checkpoints/')
checkpoints_dir.mkdir(exist_ok=True)
os.system('cp %s %s' % (args.checkpoint, checkpoints_dir))
log_dir = file_dir.joinpath('logs/')
log_dir.mkdir(exist_ok=True)
'''LOG'''
args = parse_args()
logger = logging.getLogger(args.model_name)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler(str(log_dir) + 'eval_%s_cls.txt'%args.model_name)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.info('---------------------------------------------------EVAL---------------------------------------------------')
logger.info('PARAMETER ...')
logger.info(args)
'''DATA LOADING'''
logger.info('Load dataset ...')
DATA_PATH = './data/modelnet40_normal_resampled/'
TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='test', normal_channel=args.normal)
testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batchsize, shuffle=False, num_workers=args.num_workers)
logger.info("The number of test data is: %d", len(TEST_DATASET))
seed = 3
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
'''MODEL LOADING'''
num_class = 40
classifier = PointConvClsSsg(num_class).cuda()
if args.checkpoint is not None:
print('Load CheckPoint...')
logger.info('Load CheckPoint')
checkpoint = torch.load(args.checkpoint)
start_epoch = checkpoint['epoch']
classifier.load_state_dict(checkpoint['model_state_dict'])
else:
print('Please load Checkpoint to eval...')
sys.exit(0)
start_epoch = 0
blue = lambda x: '\033[94m' + x + '\033[0m'
'''EVAL'''
logger.info('Start evaluating...')
print('Start evaluating...')
classifier = classifier.eval()
mean_correct = []
for batch_id, data in tqdm(enumerate(testDataLoader, 0), total=len(testDataLoader), smoothing=0.9):
pointcloud, target = data
target = target[:, 0]
points = pointcloud.permute(0, 2, 1)
points, target = points.cuda(), target.cuda()
with torch.no_grad():
pred = classifier(points[:, :3, :], points[:, 3:, :])
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.long().data).cpu().sum()
mean_correct.append(correct.item()/float(points.size()[0]))
accuracy = np.mean(mean_correct)
print('Total Accuracy: %f'%accuracy)
logger.info('Total Accuracy: %f'%accuracy)
logger.info('End of evaluation...')
if __name__ == '__main__':
args = parse_args()
main(args)