-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy patheval_grd_flickr30k_entities.py
242 lines (191 loc) · 10.5 KB
/
eval_grd_flickr30k_entities.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Evaluation script for object localization
import json
import argparse
import torch
import itertools
import numpy as np
from collections import defaultdict
# from misc.utils import bbox_overlaps_batch
from misc.bbox_transform import bbox_overlaps_batch
from stanfordcorenlp import StanfordCoreNLP
from tqdm import tqdm
import pdb
class FlickrGrdEval(object):
def __init__(self, reference_file=None, submission_file=None,
split_file=None, val_split=None, iou_thresh=0.5, verbose=False):
if not reference_file:
raise IOError('Please input a valid reference file!')
if not submission_file:
raise IOError('Please input a valid submission file!')
self.iou_thresh = iou_thresh
self.verbose = verbose
self.val_split = val_split
self.import_ref(reference_file, split_file)
self.import_sub(submission_file)
def import_ref(self, reference_file=None, split_file=None):
with open(split_file) as f:
split_dict = json.load(f)
split = {}
for s in self.val_split:
split.update({i:i for i in split_dict[s]})
with open(reference_file) as f:
ref = json.load(f)['annotations']
ref = [v for k,v in enumerate(ref) if str(v['image_id']) in split]
self.ref = ref
def import_sub(self, submission_file=None):
with open(submission_file) as f:
pred = json.load(f)['results']
self.pred = pred
def gt_grd_eval(self):
ref = self.ref
pred = self.pred
print('Number of images in the reference: {}, number of images in the submission: {}'.format(len(ref), len(pred)))
results = defaultdict(list)
for lst_idx, anns in enumerate(ref):
img = str(anns['image_id'])
for num_sent, ann in enumerate(anns['captions']):
ref_bbox_all = torch.Tensor(ann['process_bnd_box'])
sent_idx = ann['process_idx'] # index of word in sentence to evaluate
for idx in sent_idx:
sel_idx = [ind for ind, i in enumerate(ann['process_idx']) if idx == i]
assert(len(sel_idx) == 1)
ref_bbox = ref_bbox_all[sel_idx[0]] # select matched boxes
assert(ref_bbox.size(0) > 0)
class_name = ann['process_clss'][sel_idx[0]]
if img not in pred:
results[class_name].append(0) # image not grounded
elif len(pred[img]) != 5:
raise Exception('Each image must have five caption predictions!')
elif idx not in pred[img][num_sent]['idx_in_sent']:
results[class_name].append(0) # object not grounded
else:
pred_ind = pred[img][num_sent]['idx_in_sent'].index(idx)
pred_bbox = torch.Tensor(pred[img][num_sent]['bbox'][pred_ind])
overlap = bbox_overlaps_batch(pred_bbox.unsqueeze(0), \
ref_bbox.unsqueeze(0).unsqueeze(0))
results[class_name].append(1 if torch.max(overlap) > self.iou_thresh else 0)
print('Number of groundable objects in this split: {}'.format(len(results)))
grd_accu = np.mean([sum(hm)*1./len(hm) for i,hm in results.items()])
print('-' * 80)
print('The overall localization accuracy is {:.4f}'.format(grd_accu))
print('-' * 80)
if self.verbose:
print('Object frequency and grounding accuracy per class (descending by object frequency):')
accu_per_clss = {(i, sum(hm)*1./len(hm)):len(hm) for i,hm in results.items()}
accu_per_clss = sorted(accu_per_clss.items(), key=lambda x:x[1], reverse=True)
for accu in accu_per_clss:
print('{} ({}): {:.4f}'.format(accu[0][0], accu[1], accu[0][1]))
return grd_accu
def grd_eval(self, mode='all'):
# pdb.set_trace()
if mode == 'all':
print('Evaluating on all object words.')
elif mode == 'loc':
print('Evaluating only on correctly-predicted object words.')
else:
raise Exception('Invalid loc mode!')
ref = self.ref
pred = self.pred
print('Number of images in the reference: {}, number of images in the submission: {}'.format(len(ref), len(pred)))
nlp = StanfordCoreNLP('tools/stanford-corenlp-full-2018-02-27')
props={'annotators': 'lemma','pipelineLanguage':'en', 'outputFormat':'json'}
vocab_in_split = set()
# precision
prec = defaultdict(list)
for lst_idx, anns in tqdm(enumerate(ref)):
img = str(anns['image_id'])
for num_sent, ann in enumerate(anns['captions']):
if img not in pred:
continue # do not penalize if sentence not annotated
# try:
# assert(len(pred[img]) == 1)
# except:
# pdb.set_trace()
ref_bbox_all = torch.Tensor(ann['process_bnd_box'])
idx_in_sent = {}
for box_idx, cls in enumerate(ann['process_clss']):
vocab_in_split.update(set([cls]))
idx_in_sent[cls] = idx_in_sent.get(cls, []) + [ann['process_idx'][box_idx]]
sent_idx = ann['process_idx'] # index of gt object words
exclude_obj = {json.loads(nlp.annotate(token, properties=props) \
)['sentences'][0]['tokens'][0]['lemma']:1 for token_idx, token in enumerate(ann['tokens'] \
) if (token_idx not in sent_idx and token != '')}
for pred_idx, class_name in enumerate(pred[img][0]['clss']):
if class_name in idx_in_sent:
gt_idx = min(idx_in_sent[class_name]) # always consider the first match...
sel_idx = [idx for idx, i in enumerate(ann['process_idx']) if gt_idx == i]
assert(len(sel_idx) == 1)
ref_bbox = ref_bbox_all[sel_idx[0]] # select matched boxes
assert(ref_bbox.size(0) > 0)
pred_bbox = torch.Tensor(pred[img][0]['bbox'][pred_idx])
overlap = bbox_overlaps_batch(pred_bbox.unsqueeze(0), \
ref_bbox.unsqueeze(0).unsqueeze(0))
prec[class_name].append(1 if torch.max(overlap) > self.iou_thresh else 0)
elif json.loads(nlp.annotate(class_name, properties=props))['sentences'][0]['tokens'][0]['lemma'] in exclude_obj:
pass # do not penalize if gt object word not annotated (missed)
else:
if mode == 'all':
prec[class_name].append(0) # hallucinated object
nlp.close()
# recall
recall = defaultdict(list)
for lst_idx, anns in enumerate(ref):
img = str(anns['image_id'])
for num_sent, ann in enumerate(anns['captions']):
ref_bbox_all = torch.Tensor(ann['process_bnd_box'])
sent_idx = ann['process_idx'] # index of gt object words
for gt_idx in sent_idx:
sel_idx = [idx for idx, i in enumerate(ann['process_idx']) if gt_idx == i]
assert(len(sel_idx) == 1)
ref_bbox = ref_bbox_all[sel_idx[0]] # select matched boxes
assert(ref_bbox.size(0) > 0)
class_name = ann['process_clss'][sel_idx[0]]
if img not in pred:
recall[class_name].append(0) # image not grounded
elif class_name in pred[img][0]['clss']:
pred_idx = pred[img][0]['clss'].index(class_name) # always consider the first match...
pred_bbox = torch.Tensor(pred[img][0]['bbox'][pred_idx])
overlap = bbox_overlaps_batch(pred_bbox.unsqueeze(0), \
ref_bbox.unsqueeze(0).unsqueeze(0))
recall[class_name].append(1 if torch.max(overlap) > self.iou_thresh else 0)
else:
if mode == 'all':
recall[class_name].append(0) # object not grounded
num_vocab = len(vocab_in_split)
print('Number of groundable objects in this split: {}'.format(num_vocab))
print('Number of objects in prec and recall: {}, {}'.format(len(prec), len(recall)))
prec_accu = np.sum([sum(hm)*1./len(hm) for i,hm in prec.items()])*1./num_vocab
recall_accu = np.sum([sum(hm)*1./len(hm) for i,hm in recall.items()])*1./num_vocab
f1 = 2. * prec_accu * recall_accu / (prec_accu + recall_accu)
print('-' * 80)
print('The overall precision_{0} / recall_{0} / F1_{0} are {1:.4f} / {2:.4f} / {3:.4f}'.format(mode, prec_accu, recall_accu, f1))
print('-' * 80)
if self.verbose:
print('Object frequency and grounding accuracy per class (descending by object frequency):')
accu_per_clss = {}
for i in vocab_in_split:
prec_clss = sum(prec[i])*1./len(prec[i]) if i in prec else 0
recall_clss = sum(recall[i])*1./len(recall[i]) if i in recall else 0
accu_per_clss[(i, prec_clss, recall_clss)] = (len(prec[i]), len(recall[i]))
accu_per_clss = sorted(accu_per_clss.items(), key=lambda x:x[1][1], reverse=True)
for accu in accu_per_clss:
print('{} ({} / {}): {:.4f} / {:.4f}'.format(accu[0][0], accu[1][0], accu[1][1], accu[0][1], accu[0][2]))
return prec_accu, recall_accu, f1
def main(args):
grd_evaluator = FlickrGrdEval(reference_file=args.reference, submission_file=args.submission,
split_file=args.split_file, val_split=args.split,
iou_thresh=args.iou_thresh, verbose=args.verbose)
if args.eval_mode == 'GT':
print('Assuming the input boxes are based upon GT sentences.')
grd_evaluator.gt_grd_eval()
elif args.eval_mode == 'gen':
print('Assuming the input boxes are based upon generated sentences.')
grd_evaluator.grd_eval(mode=args.loc_mode)
else:
raise Exception('Invalid eval mode!')