From bece9f4f03843dcde7928bcd84c93820b3526fae Mon Sep 17 00:00:00 2001 From: minmin-intel Date: Sat, 28 Sep 2024 00:37:33 -0700 Subject: [PATCH] Fix tool_render with direct import from pydantic (#750) * fix tool renderer bug Signed-off-by: minmin-intel * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * clean up test script Signed-off-by: minmin-intel * switch to pydantic direct import Signed-off-by: minmin-intel * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: minmin-intel Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- comps/agent/langchain/src/strategy/planexec/planner.py | 2 +- comps/agent/langchain/src/strategy/ragagent/planner.py | 4 ++-- comps/agent/langchain/src/tools.py | 4 +--- tests/agent/test_agent_langchain_on_intel_hpu.sh | 8 ++------ 4 files changed, 6 insertions(+), 12 deletions(-) diff --git a/comps/agent/langchain/src/strategy/planexec/planner.py b/comps/agent/langchain/src/strategy/planexec/planner.py index c33c906f4..4d872e9e7 100644 --- a/comps/agent/langchain/src/strategy/planexec/planner.py +++ b/comps/agent/langchain/src/strategy/planexec/planner.py @@ -13,12 +13,12 @@ from langchain_core.output_parsers.openai_tools import PydanticToolsParser from langchain_core.outputs import Generation from langchain_core.prompts import PromptTemplate -from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.utils.json import parse_partial_json from langchain_huggingface import ChatHuggingFace from langgraph.checkpoint.memory import MemorySaver from langgraph.graph import END, START, StateGraph from langgraph.graph.message import add_messages +from pydantic import BaseModel, Field from ...global_var import threads_global_kv from ...utils import has_multi_tool_inputs, tool_renderer diff --git a/comps/agent/langchain/src/strategy/ragagent/planner.py b/comps/agent/langchain/src/strategy/ragagent/planner.py index 8e3f27255..7f967e4ad 100644 --- a/comps/agent/langchain/src/strategy/ragagent/planner.py +++ b/comps/agent/langchain/src/strategy/ragagent/planner.py @@ -8,13 +8,13 @@ from langchain_core.output_parsers import JsonOutputParser, StrOutputParser from langchain_core.output_parsers.openai_tools import PydanticToolsParser from langchain_core.prompts import PromptTemplate -from langchain_core.pydantic_v1 import BaseModel, Field from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint from langchain_openai import ChatOpenAI from langgraph.checkpoint.memory import MemorySaver from langgraph.graph import END, START, StateGraph from langgraph.graph.message import add_messages from langgraph.prebuilt import ToolNode, tools_condition +from pydantic import BaseModel, Field from ..base_agent import BaseAgent from .prompt import DOC_GRADER_PROMPT, RAG_PROMPT, QueryWriterLlamaPrompt @@ -366,7 +366,7 @@ def __call__(self, state) -> Literal["generate", "rewrite"]: print("@@@@ Score: ", score) # if score.startswith("yes"): - if "yes" in score: + if "yes" in score.lower(): print("---DECISION: DOCS RELEVANT---") return {"doc_score": "generate"} diff --git a/comps/agent/langchain/src/tools.py b/comps/agent/langchain/src/tools.py index 6d3af9ebe..9188f384e 100644 --- a/comps/agent/langchain/src/tools.py +++ b/comps/agent/langchain/src/tools.py @@ -7,11 +7,9 @@ import sys import yaml - -# from pydantic import create_model, Field -from langchain.pydantic_v1 import BaseModel, Field, create_model from langchain.tools import BaseTool, StructuredTool from langchain_community.agent_toolkits.load_tools import load_tools +from pydantic import BaseModel, Field, create_model def generate_request_function(url): diff --git a/tests/agent/test_agent_langchain_on_intel_hpu.sh b/tests/agent/test_agent_langchain_on_intel_hpu.sh index a038e52cc..a8b791474 100644 --- a/tests/agent/test_agent_langchain_on_intel_hpu.sh +++ b/tests/agent/test_agent_langchain_on_intel_hpu.sh @@ -8,7 +8,7 @@ WORKPATH=$(dirname "$PWD") LOG_PATH="$WORKPATH/tests" ip_address=$(hostname -I | awk '{print $1}') tgi_port=8085 -tgi_volume=$WORKPATH/data #/data2/cache/hub/Meta-Llama-3.1-70B-Instruct #$HF_CACHE_DIR # +tgi_volume=$WORKPATH/data export agent_image="opea/agent-langchain:comps" export agent_container_name="test-comps-agent-endpoint" @@ -39,10 +39,9 @@ function build_docker_images() { } function start_tgi_service() { - # redis endpoint echo "token is ${HF_TOKEN}" - #single card + #multi cards echo "start tgi gaudi service" docker run -d --runtime=habana --name "test-comps-tgi-gaudi-service" -p $tgi_port:80 -v $tgi_volume:/data -e HF_TOKEN=$HF_TOKEN -e HABANA_VISIBLE_DEVICES=0,1,2,3 -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e PT_HPU_ENABLE_LAZY_COLLECTIVES=true -e http_proxy=$http_proxy -e https_proxy=$https_proxy --cap-add=sys_nice --ipc=host ghcr.io/huggingface/tgi-gaudi:2.0.5 --model-id $model --max-input-tokens 4096 --max-total-tokens 8192 --sharded true --num-shard 4 sleep 5s @@ -62,7 +61,6 @@ function start_tgi_service() { function start_react_langchain_agent_service() { echo "Starting react_langchain agent microservice" - # docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 9095:9095 --ipc=host -e port=9095 -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_langchain -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:comps docker compose -f $WORKPATH/tests/agent/react_langchain.yaml up -d sleep 5s docker logs test-comps-agent-endpoint @@ -72,7 +70,6 @@ function start_react_langchain_agent_service() { function start_react_langgraph_agent_service() { echo "Starting react_langgraph agent microservice" - # docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 9095:9095 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_llama -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:comps docker compose -f $WORKPATH/tests/agent/reactllama.yaml up -d sleep 5s docker logs test-comps-agent-endpoint @@ -90,7 +87,6 @@ function start_react_langgraph_agent_service_openai() { function start_ragagent_agent_service() { echo "Starting rag agent microservice" - # docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 9095:9095 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=rag_agent_llama -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:comps docker compose -f $WORKPATH/tests/agent/ragagent.yaml up -d sleep 5s docker logs test-comps-agent-endpoint