Skip to content

Commit

Permalink
Merge pull request #331 from deepset-ai/robust_eval
Browse files Browse the repository at this point in the history
More robust Reader eval by limiting max answers and creating no answer labels
  • Loading branch information
brandenchan authored Aug 26, 2020
2 parents f2b6cc7 + f108939 commit 0ad22d5
Show file tree
Hide file tree
Showing 5 changed files with 47 additions and 25 deletions.
22 changes: 17 additions & 5 deletions haystack/indexing/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,19 +45,31 @@ def eval_data_from_file(filename: str) -> Tuple[List[Document], List[Label]]:

# Get Labels
for qa in paragraph["qas"]:
for answer in qa["answers"]:
if len(qa["answers"]) > 0:
for answer in qa["answers"]:
label = Label(
question=qa["question"],
answer=answer["text"],
is_correct_answer=True,
is_correct_document=True,
document_id=cur_doc.id,
offset_start_in_doc=answer["answer_start"],
no_answer=qa["is_impossible"],
origin="gold_label",
)
labels.append(label)
else:
label = Label(
question=qa["question"],
answer=answer["text"],
answer="",
is_correct_answer=True,
is_correct_document=True,
document_id=cur_doc.id,
offset_start_in_doc=answer["answer_start"],
offset_start_in_doc=0,
no_answer=qa["is_impossible"],
origin="gold_label",
)
)
labels.append(label)

return docs, labels


Expand Down
42 changes: 26 additions & 16 deletions haystack/reader/farm.py
Original file line number Diff line number Diff line change
Expand Up @@ -394,6 +394,11 @@ def eval(
:param doc_index: Index/Table name where documents that are used for evaluation are stored
"""

if self.top_k_per_candidate != 4:
logger.info(f"Performing Evaluation using top_k_per_candidate = {self.top_k_per_candidate} \n"
f"and consequently, QuestionAnsweringPredictionHead.n_best = {self.top_k_per_candidate + 1}. \n"
f"This deviates from FARM's default where QuestionAnsweringPredictionHead.n_best = 5")

# extract all questions for evaluation
filters = {"origin": [label_origin]}

Expand All @@ -409,7 +414,8 @@ def eval(

# Create squad style dicts
d: Dict[str, Any] = {}
for doc_id in aggregated_per_doc.keys():
all_doc_ids = [x.id for x in document_store.get_all_documents(doc_index)]
for doc_id in all_doc_ids:
doc = document_store.get_document_by_id(doc_id, index=doc_index)
if not doc:
logger.error(f"Document with the ID '{doc_id}' is not present in the document store.")
Expand All @@ -419,21 +425,25 @@ def eval(
}
# get all questions / answers
aggregated_per_question: Dict[str, Any] = defaultdict(list)
for label in aggregated_per_doc[doc_id]:
# add to existing answers
if label.question in aggregated_per_question.keys():
aggregated_per_question[label.question]["answers"].append({
"text": label.answer,
"answer_start": label.offset_start_in_doc})
# create new one
else:
aggregated_per_question[label.question] = {
"id": str(hash(str(doc_id)+label.question)),
"question": label.question,
"answers": [{
"text": label.answer,
"answer_start": label.offset_start_in_doc}]
}
if doc_id in aggregated_per_doc:
for label in aggregated_per_doc[doc_id]:
# add to existing answers
if label.question in aggregated_per_question.keys():
# Hack to fix problem where duplicate questions are merged by doc_store processing creating a QA example with 8 annotations > 6 annotation max
if len(aggregated_per_question[label.question]["answers"]) >= 6:
continue
aggregated_per_question[label.question]["answers"].append({
"text": label.answer,
"answer_start": label.offset_start_in_doc})
# create new one
else:
aggregated_per_question[label.question] = {
"id": str(hash(str(doc_id)+label.question)),
"question": label.question,
"answers": [{
"text": label.answer,
"answer_start": label.offset_start_in_doc}]
}
# Get rid of the question key again (after we aggregated we don't need it anymore)
d[str(doc_id)]["qas"] = [v for v in aggregated_per_question.values()]

Expand Down
2 changes: 1 addition & 1 deletion test/test_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ def test_add_eval_data(document_store):
document_store.add_eval_data(filename="samples/squad/small.json", doc_index="test_eval_document", label_index="test_feedback")

assert document_store.get_document_count(index="test_eval_document") == 87
assert document_store.get_label_count(index="test_feedback") == 881
assert document_store.get_label_count(index="test_feedback") == 1214

# test documents
docs = document_store.get_all_documents(index="test_eval_document")
Expand Down
4 changes: 2 additions & 2 deletions tutorials/Tutorial5_Evaluation.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@
"# Initialize Reader\n",
"from haystack.reader.farm import FARMReader\n",
"\n",
"reader = FARMReader(\"deepset/roberta-base-squad2\")"
"reader = FARMReader(\"deepset/roberta-base-squad2\", top_k_per_candidate=4)"
]
},
{
Expand Down Expand Up @@ -1957,4 +1957,4 @@
},
"nbformat": 4,
"nbformat_minor": 1
}
}
2 changes: 1 addition & 1 deletion tutorials/Tutorial5_Evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@


# Initialize Reader
reader = FARMReader("deepset/roberta-base-squad2")
reader = FARMReader("deepset/roberta-base-squad2", top_k_per_candidate=4)

# Initialize Finder which sticks together Reader and Retriever
finder = Finder(reader, retriever)
Expand Down

0 comments on commit 0ad22d5

Please sign in to comment.