-
Notifications
You must be signed in to change notification settings - Fork 51
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge branch 'main' into typed_recipe_artifact_saving
- Loading branch information
Showing
9 changed files
with
659 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,71 @@ | ||
import unitxt | ||
from unitxt.blocks import LoadHF | ||
from unitxt.card import TaskCard | ||
from unitxt.catalog import add_to_catalog | ||
from unitxt.operators import ( | ||
ListFieldValues, | ||
Rename, | ||
Set, | ||
ShuffleFieldValues, | ||
) | ||
from unitxt.splitters import RenameSplits | ||
from unitxt.test_utils.card import test_card | ||
|
||
with unitxt.settings.context(allow_unverified_code=True): | ||
for subset in ["main", "diamond", "experts", "extended"]: | ||
card = TaskCard( | ||
loader=LoadHF( | ||
path="Idavidrein/gpqa", | ||
name="gpqa_" + subset, | ||
data_classification_policy=["public"], | ||
), | ||
preprocess_steps=[ | ||
RenameSplits({"train": "test"}), | ||
ListFieldValues( | ||
fields=[ | ||
"Correct Answer", | ||
"Incorrect Answer 1", | ||
"Incorrect Answer 2", | ||
"Incorrect Answer 3", | ||
], | ||
to_field="choices", | ||
), | ||
ShuffleFieldValues(field="choices"), | ||
Rename(field="Correct Answer", to_field="answer"), | ||
Rename(field="Subdomain", to_field="topic"), | ||
Rename(field="Question", to_field="question"), | ||
Set({"context_type": "situation"}), | ||
], | ||
task="tasks.qa.multiple_choice.with_topic", | ||
templates="templates.qa.multiple_choice.with_topic.all", | ||
__description__=( | ||
"""GPQA is a multiple-choice, Q&A dataset of very hard questions written and validated by experts in biology, physics, and chemistry. When attempting questions out of their own domain (e.g., a physicist answers a chemistry question), these experts get only 34 percent accuracy, despite spending >30m with full access to Google.""" | ||
), | ||
__tags__={ | ||
"annotations_creators": "expert-generated", | ||
"arxiv": "2311.12022", | ||
"flags": ["NLU", "natural language understanding"], | ||
"language": "en", | ||
"language_creators": "other", | ||
"license": "cc-by-4.0", | ||
"multilinguality": "monolingual", | ||
"region": "us", | ||
"size_categories": "n<1K", | ||
"source_datasets": "extended|other", | ||
"task_categories": [ | ||
"text-classification", | ||
"token-classification", | ||
"question-answering", | ||
], | ||
"task_ids": [ | ||
"natural-language-inference", | ||
"word-sense-disambiguation", | ||
"coreference-resolution", | ||
"extractive-qa", | ||
], | ||
}, | ||
) | ||
|
||
if subset == "main": | ||
test_card(card, strict=False) | ||
add_to_catalog(card, "cards.gpqa." + subset, overwrite=True) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,78 @@ | ||
import unitxt | ||
from unitxt.blocks import LoadHF | ||
from unitxt.card import TaskCard | ||
from unitxt.catalog import add_to_catalog | ||
from unitxt.operators import ( | ||
Copy, | ||
ListFieldValues, | ||
MapInstanceValues, | ||
) | ||
from unitxt.splitters import RenameSplits | ||
from unitxt.test_utils.card import test_card | ||
|
||
with unitxt.settings.context( | ||
allow_unverified_code=True, | ||
disable_hf_datasets_cache=False, | ||
): | ||
card = TaskCard( | ||
loader=LoadHF(path="THUDM/LongBench-v2", data_classification_policy=["public"]), | ||
preprocess_steps=[ | ||
RenameSplits({"train": "test"}), | ||
ListFieldValues( | ||
fields=["choice_A", "choice_B", "choice_C", "choice_D"], | ||
to_field="choices", | ||
), | ||
Copy(field="domain", to_field="context_type"), | ||
MapInstanceValues( | ||
mappers={ | ||
"answer": { | ||
"A": 0, | ||
"B": 1, | ||
"C": 2, | ||
"D": 3, | ||
}, | ||
"context_type": { | ||
"Long In-context Learning": "examples", | ||
"Single-Document QA": "document", | ||
"Long Structured Data Understanding": "data", | ||
"Multi-Document QA": "documents", | ||
"Code Repository Understanding": "code", | ||
"Long-dialogue History Understanding": "dialog", | ||
}, | ||
} | ||
), | ||
], | ||
task="tasks.qa.multiple_choice.with_context", | ||
templates="templates.qa.multiple_choice.with_context.all", | ||
__description__=( | ||
"""LongBench v2 is designed to assess the ability of LLMs to handle long-context problems requiring deep understanding and reasoning across real-world multitasks. LongBench v2 has the following features: (1) Length: Context length ranging from 8k to 2M words, with the majority under 128k. (2) Difficulty: Challenging enough that even human experts, using search tools within the document, cannot answer correctly in a short time. (3) Coverage: Cover various realistic scenarios. (4) Reliability: All in a multiple-choice question format for reliable evaluation. | ||
To elaborate, LongBench v2 consists of 503 challenging multiple-choice questions, with contexts ranging from 8k to 2M words, across six major task categories: single-document QA, multi-document QA, long in-context learning, long-dialogue history understanding, code repo understanding, and long structured data understanding. To ensure the breadth and the practicality, we collect data from nearly 100 highly educated individuals with diverse professional backgrounds. We employ both automated and manual review processes to maintain high quality and difficulty, resulting in human experts achieving only 53.7% accuracy under a 15-minute time constraint. Our evaluation reveals that the best-performing model, when directly answers the questions, achieves only 50.1% accuracy. In contrast, the o1-preview model, which includes longer reasoning, achieves 57.7%, surpassing the human baseline by 4%. These results highlight the importance of enhanced reasoning ability and scaling inference-time compute to tackle the long-context challenges in LongBench v2.""" | ||
), | ||
__tags__={ | ||
"annotations_creators": "expert-generated", | ||
"arxiv": "2412.15204", | ||
"flags": ["NLU", "natural language understanding"], | ||
"language": "en", | ||
"language_creators": "other", | ||
"license": "other", | ||
"multilinguality": "monolingual", | ||
"region": "us", | ||
"size_categories": "n<1K", | ||
"source_datasets": "extended|other", | ||
"task_categories": [ | ||
"text-classification", | ||
"token-classification", | ||
"question-answering", | ||
], | ||
"task_ids": [ | ||
"natural-language-inference", | ||
"word-sense-disambiguation", | ||
"coreference-resolution", | ||
"extractive-qa", | ||
], | ||
}, | ||
) | ||
|
||
test_card(card, strict=False) | ||
add_to_catalog(card, "cards.long_bench_v2", overwrite=True) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,47 @@ | ||
import unitxt | ||
from unitxt.blocks import LoadHF | ||
from unitxt.card import TaskCard | ||
from unitxt.catalog import add_to_catalog | ||
from unitxt.collections_operators import Wrap | ||
from unitxt.operators import ( | ||
Rename, | ||
) | ||
from unitxt.test_utils.card import test_card | ||
|
||
with unitxt.settings.context(allow_unverified_code=True): | ||
card = TaskCard( | ||
loader=LoadHF(path="basicv8vc/SimpleQA", data_classification_policy=["public"]), | ||
preprocess_steps=[ | ||
Rename(field="problem", to_field="question"), | ||
Wrap(field="answer", inside="list", to_field="answers"), | ||
], | ||
task="tasks.qa.open", | ||
templates="templates.qa.open.all", | ||
__description__=( | ||
"""A factuality benchmark called SimpleQA that measures the ability for language models to answer short, fact-seeking questions.""" | ||
), | ||
__tags__={ | ||
"annotations_creators": "expert-generated", | ||
"arxiv": "1904.09728", | ||
"flags": ["NLU", "natural language understanding"], | ||
"language": "en", | ||
"language_creators": "other", | ||
"license": "mit", | ||
"multilinguality": "monolingual", | ||
"region": "us", | ||
"size_categories": "10K<n<100K", | ||
"source_datasets": "extended|other", | ||
"task_categories": [ | ||
"text-classification", | ||
"token-classification", | ||
"question-answering", | ||
], | ||
"task_ids": [ | ||
"natural-language-inference", | ||
"word-sense-disambiguation", | ||
], | ||
}, | ||
) | ||
|
||
test_card(card, strict=False) | ||
add_to_catalog(card, "cards.simple_qa", overwrite=True) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,83 @@ | ||
{ | ||
"__type__": "task_card", | ||
"loader": { | ||
"__type__": "load_hf", | ||
"path": "Idavidrein/gpqa", | ||
"name": "gpqa_diamond", | ||
"data_classification_policy": [ | ||
"public" | ||
] | ||
}, | ||
"preprocess_steps": [ | ||
{ | ||
"__type__": "rename_splits", | ||
"mapper": { | ||
"train": "test" | ||
} | ||
}, | ||
{ | ||
"__type__": "list_field_values", | ||
"fields": [ | ||
"Correct Answer", | ||
"Incorrect Answer 1", | ||
"Incorrect Answer 2", | ||
"Incorrect Answer 3" | ||
], | ||
"to_field": "choices" | ||
}, | ||
{ | ||
"__type__": "shuffle_field_values", | ||
"field": "choices" | ||
}, | ||
{ | ||
"__type__": "rename", | ||
"field": "Correct Answer", | ||
"to_field": "answer" | ||
}, | ||
{ | ||
"__type__": "rename", | ||
"field": "Subdomain", | ||
"to_field": "topic" | ||
}, | ||
{ | ||
"__type__": "rename", | ||
"field": "Question", | ||
"to_field": "question" | ||
}, | ||
{ | ||
"__type__": "set", | ||
"fields": { | ||
"context_type": "situation" | ||
} | ||
} | ||
], | ||
"task": "tasks.qa.multiple_choice.with_topic", | ||
"templates": "templates.qa.multiple_choice.with_topic.all", | ||
"__description__": "GPQA is a multiple-choice, Q&A dataset of very hard questions written and validated by experts in biology, physics, and chemistry. When attempting questions out of their own domain (e.g., a physicist answers a chemistry question), these experts get only 34 percent accuracy, despite spending >30m with full access to Google.", | ||
"__tags__": { | ||
"annotations_creators": "expert-generated", | ||
"arxiv": "2311.12022", | ||
"flags": [ | ||
"NLU", | ||
"natural language understanding" | ||
], | ||
"language": "en", | ||
"language_creators": "other", | ||
"license": "cc-by-4.0", | ||
"multilinguality": "monolingual", | ||
"region": "us", | ||
"size_categories": "n<1K", | ||
"source_datasets": "extended|other", | ||
"task_categories": [ | ||
"text-classification", | ||
"token-classification", | ||
"question-answering" | ||
], | ||
"task_ids": [ | ||
"natural-language-inference", | ||
"word-sense-disambiguation", | ||
"coreference-resolution", | ||
"extractive-qa" | ||
] | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,83 @@ | ||
{ | ||
"__type__": "task_card", | ||
"loader": { | ||
"__type__": "load_hf", | ||
"path": "Idavidrein/gpqa", | ||
"name": "gpqa_experts", | ||
"data_classification_policy": [ | ||
"public" | ||
] | ||
}, | ||
"preprocess_steps": [ | ||
{ | ||
"__type__": "rename_splits", | ||
"mapper": { | ||
"train": "test" | ||
} | ||
}, | ||
{ | ||
"__type__": "list_field_values", | ||
"fields": [ | ||
"Correct Answer", | ||
"Incorrect Answer 1", | ||
"Incorrect Answer 2", | ||
"Incorrect Answer 3" | ||
], | ||
"to_field": "choices" | ||
}, | ||
{ | ||
"__type__": "shuffle_field_values", | ||
"field": "choices" | ||
}, | ||
{ | ||
"__type__": "rename", | ||
"field": "Correct Answer", | ||
"to_field": "answer" | ||
}, | ||
{ | ||
"__type__": "rename", | ||
"field": "Subdomain", | ||
"to_field": "topic" | ||
}, | ||
{ | ||
"__type__": "rename", | ||
"field": "Question", | ||
"to_field": "question" | ||
}, | ||
{ | ||
"__type__": "set", | ||
"fields": { | ||
"context_type": "situation" | ||
} | ||
} | ||
], | ||
"task": "tasks.qa.multiple_choice.with_topic", | ||
"templates": "templates.qa.multiple_choice.with_topic.all", | ||
"__description__": "GPQA is a multiple-choice, Q&A dataset of very hard questions written and validated by experts in biology, physics, and chemistry. When attempting questions out of their own domain (e.g., a physicist answers a chemistry question), these experts get only 34 percent accuracy, despite spending >30m with full access to Google.", | ||
"__tags__": { | ||
"annotations_creators": "expert-generated", | ||
"arxiv": "2311.12022", | ||
"flags": [ | ||
"NLU", | ||
"natural language understanding" | ||
], | ||
"language": "en", | ||
"language_creators": "other", | ||
"license": "cc-by-4.0", | ||
"multilinguality": "monolingual", | ||
"region": "us", | ||
"size_categories": "n<1K", | ||
"source_datasets": "extended|other", | ||
"task_categories": [ | ||
"text-classification", | ||
"token-classification", | ||
"question-answering" | ||
], | ||
"task_ids": [ | ||
"natural-language-inference", | ||
"word-sense-disambiguation", | ||
"coreference-resolution", | ||
"extractive-qa" | ||
] | ||
} | ||
} |
Oops, something went wrong.