Skip to content

Commit

Permalink
Fix tool_render with direct import from pydantic (opea-project#750)
Browse files Browse the repository at this point in the history
* fix tool renderer bug

Signed-off-by: minmin-intel <[email protected]>

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* clean up test script

Signed-off-by: minmin-intel <[email protected]>

* switch to pydantic direct import

Signed-off-by: minmin-intel <[email protected]>

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

---------

Signed-off-by: minmin-intel <[email protected]>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
minmin-intel and pre-commit-ci[bot] authored Sep 28, 2024
1 parent f6f620a commit bece9f4
Show file tree
Hide file tree
Showing 4 changed files with 6 additions and 12 deletions.
2 changes: 1 addition & 1 deletion comps/agent/langchain/src/strategy/planexec/planner.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,12 @@
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
from langchain_core.outputs import Generation
from langchain_core.prompts import PromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.utils.json import parse_partial_json
from langchain_huggingface import ChatHuggingFace
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import END, START, StateGraph
from langgraph.graph.message import add_messages
from pydantic import BaseModel, Field

from ...global_var import threads_global_kv
from ...utils import has_multi_tool_inputs, tool_renderer
Expand Down
4 changes: 2 additions & 2 deletions comps/agent/langchain/src/strategy/ragagent/planner.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,13 @@
from langchain_core.output_parsers import JsonOutputParser, StrOutputParser
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
from langchain_core.prompts import PromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import END, START, StateGraph
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode, tools_condition
from pydantic import BaseModel, Field

from ..base_agent import BaseAgent
from .prompt import DOC_GRADER_PROMPT, RAG_PROMPT, QueryWriterLlamaPrompt
Expand Down Expand Up @@ -366,7 +366,7 @@ def __call__(self, state) -> Literal["generate", "rewrite"]:
print("@@@@ Score: ", score)

# if score.startswith("yes"):
if "yes" in score:
if "yes" in score.lower():
print("---DECISION: DOCS RELEVANT---")
return {"doc_score": "generate"}

Expand Down
4 changes: 1 addition & 3 deletions comps/agent/langchain/src/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,9 @@
import sys

import yaml

# from pydantic import create_model, Field
from langchain.pydantic_v1 import BaseModel, Field, create_model
from langchain.tools import BaseTool, StructuredTool
from langchain_community.agent_toolkits.load_tools import load_tools
from pydantic import BaseModel, Field, create_model


def generate_request_function(url):
Expand Down
8 changes: 2 additions & 6 deletions tests/agent/test_agent_langchain_on_intel_hpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')
tgi_port=8085
tgi_volume=$WORKPATH/data #/data2/cache/hub/Meta-Llama-3.1-70B-Instruct #$HF_CACHE_DIR #
tgi_volume=$WORKPATH/data

export agent_image="opea/agent-langchain:comps"
export agent_container_name="test-comps-agent-endpoint"
Expand Down Expand Up @@ -39,10 +39,9 @@ function build_docker_images() {
}

function start_tgi_service() {
# redis endpoint
echo "token is ${HF_TOKEN}"

#single card
#multi cards
echo "start tgi gaudi service"
docker run -d --runtime=habana --name "test-comps-tgi-gaudi-service" -p $tgi_port:80 -v $tgi_volume:/data -e HF_TOKEN=$HF_TOKEN -e HABANA_VISIBLE_DEVICES=0,1,2,3 -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e PT_HPU_ENABLE_LAZY_COLLECTIVES=true -e http_proxy=$http_proxy -e https_proxy=$https_proxy --cap-add=sys_nice --ipc=host ghcr.io/huggingface/tgi-gaudi:2.0.5 --model-id $model --max-input-tokens 4096 --max-total-tokens 8192 --sharded true --num-shard 4
sleep 5s
Expand All @@ -62,7 +61,6 @@ function start_tgi_service() {

function start_react_langchain_agent_service() {
echo "Starting react_langchain agent microservice"
# docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 9095:9095 --ipc=host -e port=9095 -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_langchain -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:comps
docker compose -f $WORKPATH/tests/agent/react_langchain.yaml up -d
sleep 5s
docker logs test-comps-agent-endpoint
Expand All @@ -72,7 +70,6 @@ function start_react_langchain_agent_service() {

function start_react_langgraph_agent_service() {
echo "Starting react_langgraph agent microservice"
# docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 9095:9095 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_llama -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:comps
docker compose -f $WORKPATH/tests/agent/reactllama.yaml up -d
sleep 5s
docker logs test-comps-agent-endpoint
Expand All @@ -90,7 +87,6 @@ function start_react_langgraph_agent_service_openai() {

function start_ragagent_agent_service() {
echo "Starting rag agent microservice"
# docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 9095:9095 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=rag_agent_llama -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:comps
docker compose -f $WORKPATH/tests/agent/ragagent.yaml up -d
sleep 5s
docker logs test-comps-agent-endpoint
Expand Down

0 comments on commit bece9f4

Please sign in to comment.