Skip to content

Commit

Permalink
Merge branch 'dev' into COG-989-cognify-tasks-arguments
Browse files Browse the repository at this point in the history
  • Loading branch information
hajdul88 authored Jan 15, 2025
2 parents fb8ce21 + 3494521 commit d48db24
Show file tree
Hide file tree
Showing 2 changed files with 70 additions and 34 deletions.
45 changes: 11 additions & 34 deletions evals/eval_on_hotpot.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,39 +5,15 @@
from deepeval.test_case import LLMTestCase
from tqdm import tqdm
import logging
import cognee
from cognee.api.v1.search import SearchType
from cognee.infrastructure.llm.get_llm_client import get_llm_client
from cognee.infrastructure.llm.prompts import read_query_prompt, render_prompt
from evals.qa_dataset_utils import load_qa_dataset
from evals.qa_metrics_utils import get_metric
from evals.qa_context_provider_utils import qa_context_providers

logger = logging.getLogger(__name__)


async def get_context_with_cognee(instance):
await cognee.prune.prune_data()
await cognee.prune.prune_system(metadata=True)

for title, sentences in instance["context"]:
await cognee.add("\n".join(sentences), dataset_name="QA")
await cognee.cognify("QA")

search_results = await cognee.search(SearchType.INSIGHTS, query_text=instance["question"])
search_results_second = await cognee.search(
SearchType.SUMMARIES, query_text=instance["question"]
)
search_results = search_results + search_results_second

search_results_str = "\n".join([context_item["text"] for context_item in search_results])

return search_results_str


async def get_context_without_cognee(instance):
return instance["context"]


async def answer_qa_instance(instance, context_provider):
context = await context_provider(instance)

Expand Down Expand Up @@ -88,10 +64,10 @@ async def deepeval_on_instances(instances, context_provider, eval_metric):


async def eval_on_QA_dataset(
dataset_name_or_filename: str, context_provider, num_samples, eval_metric_name
dataset_name_or_filename: str, context_provider_name, num_samples, eval_metric_name
):
dataset = load_qa_dataset(dataset_name_or_filename)

context_provider = qa_context_providers[context_provider_name]
eval_metric = get_metric(eval_metric_name)
instances = dataset if not num_samples else dataset[:num_samples]

Expand All @@ -105,18 +81,19 @@ async def eval_on_QA_dataset(
parser = argparse.ArgumentParser()

parser.add_argument("--dataset", type=str, required=True, help="Which dataset to evaluate on")
parser.add_argument("--with_cognee", action="store_true")
parser.add_argument(
"--rag_option",
type=str,
choices=qa_context_providers.keys(),
required=True,
help="RAG option to use for providing context",
)
parser.add_argument("--num_samples", type=int, default=500)
parser.add_argument("--metric_name", type=str, default="Correctness")

args = parser.parse_args()

if args.with_cognee:
context_provider = get_context_with_cognee
else:
context_provider = get_context_without_cognee

avg_score = asyncio.run(
eval_on_QA_dataset(args.dataset, context_provider, args.num_samples, args.metric_name)
eval_on_QA_dataset(args.dataset, args.rag_option, args.num_samples, args.metric_name)
)
logger.info(f"Average {args.metric_name}: {avg_score}")
59 changes: 59 additions & 0 deletions evals/qa_context_provider_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
import cognee
from cognee.api.v1.search import SearchType
from cognee.infrastructure.databases.vector import get_vector_engine
from cognee.modules.retrieval.brute_force_triplet_search import brute_force_triplet_search
from cognee.tasks.completion.graph_query_completion import retrieved_edges_to_string


async def get_raw_context(instance: dict) -> str:
return instance["context"]


async def cognify_instance(instance: dict):
await cognee.prune.prune_data()
await cognee.prune.prune_system(metadata=True)

for title, sentences in instance["context"]:
await cognee.add("\n".join(sentences), dataset_name="QA")
await cognee.cognify("QA")


async def get_context_with_cognee(instance: dict) -> str:
await cognify_instance(instance)

insights = await cognee.search(SearchType.INSIGHTS, query_text=instance["question"])
summaries = await cognee.search(SearchType.SUMMARIES, query_text=instance["question"])
search_results = insights + summaries

search_results_str = "\n".join([context_item["text"] for context_item in search_results])

return search_results_str


async def get_context_with_simple_rag(instance: dict) -> str:
await cognify_instance(instance)

vector_engine = get_vector_engine()
found_chunks = await vector_engine.search("document_chunk_text", instance["question"], limit=5)

search_results_str = "\n".join([context_item.payload["text"] for context_item in found_chunks])

return search_results_str


async def get_context_with_brute_force_triplet_search(instance: dict) -> str:
await cognify_instance(instance)

found_triplets = await brute_force_triplet_search(instance["question"], top_k=5)

search_results_str = retrieved_edges_to_string(found_triplets)

return search_results_str


qa_context_providers = {
"no_rag": get_raw_context,
"cognee": get_context_with_cognee,
"simple_rag": get_context_with_simple_rag,
"brute_force": get_context_with_brute_force_triplet_search,
}

0 comments on commit d48db24

Please sign in to comment.