Skip to content

Commit

Permalink
[langchain] agents code changes (#15278)
Browse files Browse the repository at this point in the history
<!-- Thank you for contributing to LangChain!

Please title your PR "<package>: <description>", where <package> is
whichever of langchain, community, core, experimental, etc. is being
modified.

Replace this entire comment with:
  - **Description:** a description of the change, 
  - **Issue:** the issue # it fixes if applicable,
  - **Dependencies:** any dependencies required for this change,
- **Twitter handle:** we announce bigger features on Twitter. If your PR
gets announced, and you'd like a mention, we'll gladly shout you out!
  • Loading branch information
hwchase17 authored Dec 28, 2023
1 parent b868031 commit 90aa26a
Show file tree
Hide file tree
Showing 14 changed files with 579 additions and 9 deletions.
27 changes: 23 additions & 4 deletions libs/langchain/langchain/agents/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,20 +56,32 @@
from langchain.agents.conversational.base import ConversationalAgent
from langchain.agents.conversational_chat.base import ConversationalChatAgent
from langchain.agents.initialize import initialize_agent
from langchain.agents.json_chat.base import create_json_chat_agent
from langchain.agents.load_tools import (
get_all_tool_names,
load_huggingface_tool,
load_tools,
)
from langchain.agents.loading import load_agent
from langchain.agents.mrkl.base import MRKLChain, ZeroShotAgent
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.agents.openai_functions_agent.base import (
OpenAIFunctionsAgent,
create_openai_functions_agent,
)
from langchain.agents.openai_functions_multi_agent.base import OpenAIMultiFunctionsAgent
from langchain.agents.openai_tools.base import create_openai_tools_agent
from langchain.agents.react.agent import create_react_agent
from langchain.agents.react.base import ReActChain, ReActTextWorldAgent
from langchain.agents.self_ask_with_search.base import SelfAskWithSearchChain
from langchain.agents.structured_chat.base import StructuredChatAgent
from langchain.agents.self_ask_with_search.base import (
SelfAskWithSearchChain,
create_self_ask_with_search_agent,
)
from langchain.agents.structured_chat.base import (
StructuredChatAgent,
create_structured_chat_agent,
)
from langchain.agents.tools import Tool, tool
from langchain.agents.xml.base import XMLAgent
from langchain.agents.xml.base import XMLAgent, create_xml_agent

DEPRECATED_CODE = [
"create_csv_agent",
Expand Down Expand Up @@ -133,4 +145,11 @@ def __getattr__(name: str) -> Any:
"load_tools",
"tool",
"XMLAgent",
"create_openai_functions_agent",
"create_xml_agent",
"create_react_agent",
"create_openai_tools_agent",
"create_self_ask_with_search_agent",
"create_json_chat_agent",
"create_structured_chat_agent",
]
Empty file.
83 changes: 83 additions & 0 deletions libs/langchain/langchain/agents/json_chat/base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
from typing import Sequence

from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool

from langchain.agents.format_scratchpad import format_log_to_messages
from langchain.agents.json_chat.prompt import TEMPLATE_TOOL_RESPONSE
from langchain.agents.output_parsers import JSONAgentOutputParser
from langchain.tools.render import render_text_description


def create_json_chat_agent(
llm: BaseLanguageModel, tools: Sequence[BaseTool], prompt: ChatPromptTemplate
) -> Runnable:
"""Create an agent that uses JSON to format its logic, build for Chat Models.
Examples:
.. code-block:: python
from langchain import hub
from langchain.chat_models import ChatOpenAI
from langchain.agents import AgentExecutor, create_json_chat_agent
prompt = hub.pull("hwchase17/react-chat-json")
model = ChatOpenAI()
tools = ...
agent = create_json_chat_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use, must have input keys of
`tools`, `tool_names`, and `agent_scratchpad`.
Returns:
A runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
"""
missing_vars = {"tools", "tool_names", "agent_scratchpad"}.difference(
prompt.input_variables
)
if missing_vars:
raise ValueError(f"Prompt missing required variables: {missing_vars}")

prompt = prompt.partial(
tools=render_text_description(list(tools)),
tool_names=", ".join([t.name for t in tools]),
)
llm_with_stop = llm.bind(stop=["\nObservation"])

agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_log_to_messages(
x["intermediate_steps"], template_tool_response=TEMPLATE_TOOL_RESPONSE
)
)
| prompt
| llm_with_stop
| JSONAgentOutputParser()
)
return agent
9 changes: 9 additions & 0 deletions libs/langchain/langchain/agents/json_chat/prompt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# flake8: noqa
TEMPLATE_TOOL_RESPONSE = """TOOL RESPONSE:
---------------------
{observation}
USER'S INPUT
--------------------
Okay, so what is the response to my last comment? If using information obtained from the tools you must mention it explicitly without mentioning the tool names - I have forgotten all TOOL RESPONSES! Remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else - even if you just want to respond to the user. Do NOT respond with anything except a JSON snippet no matter what!"""
71 changes: 71 additions & 0 deletions libs/langchain/langchain/agents/openai_functions_agent/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
MessagesPlaceholder,
)
from langchain_core.pydantic_v1 import root_validator
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool

from langchain.agents import BaseSingleActionAgent
Expand Down Expand Up @@ -226,3 +227,73 @@ def from_llm_and_tools(
callback_manager=callback_manager,
**kwargs,
)


def create_openai_functions_agent(
llm: BaseLanguageModel, tools: Sequence[BaseTool], prompt: ChatPromptTemplate
) -> Runnable:
"""Create an agent that uses OpenAI function calling.
Examples:
Creating an agent with no memory
.. code-block:: python
from langchain.chat_models import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain import hub
prompt = hub.pull("hwchase17/openai-functions-agent")
model = ChatOpenAI()
tools = ...
agent = create_openai_functions_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Args:
llm: LLM to use as the agent. Should work with OpenAI function calling,
so either be an OpenAI model that supports that or a wrapper of
a different model that adds in equivalent support.
tools: Tools this agent has access to.
prompt: The prompt to use, must have an input key of `agent_scratchpad`.
Returns:
A runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
"""
if "agent_scratchpad" not in prompt.input_variables:
raise ValueError(
"Prompt must have input variable `agent_scratchpad`, but wasn't found. "
f"Found {prompt.input_variables} instead."
)
llm_with_tools = llm.bind(
functions=[format_tool_to_openai_function(t) for t in tools]
)
agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_to_openai_function_messages(
x["intermediate_steps"]
)
)
| prompt
| llm_with_tools
| OpenAIFunctionsAgentOutputParser()
)
return agent
Empty file.
79 changes: 79 additions & 0 deletions libs/langchain/langchain/agents/openai_tools/base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
from typing import Sequence

from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool

from langchain.agents.format_scratchpad.openai_tools import (
format_to_openai_tool_messages,
)
from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
from langchain.tools.render import format_tool_to_openai_tool


def create_openai_tools_agent(
llm: BaseLanguageModel, tools: Sequence[BaseTool], prompt: ChatPromptTemplate
) -> Runnable:
"""Create an agent that uses OpenAI tools.
Examples:
.. code-block:: python
from langchain import hub
from langchain.chat_models import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_tools_agent
prompt = hub.pull("hwchase17/openai-tools-agent")
model = ChatOpenAI()
tools = ...
agent = create_openai_tools_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use, must have input keys of `agent_scratchpad`.
Returns:
A runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
"""
missing_vars = {"agent_scratchpad"}.difference(prompt.input_variables)
if missing_vars:
raise ValueError(f"Prompt missing required variables: {missing_vars}")

llm_with_tools = llm.bind(
tools=[format_tool_to_openai_tool(tool) for tool in tools]
)

agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_to_openai_tool_messages(
x["intermediate_steps"]
)
)
| prompt
| llm_with_tools
| OpenAIToolsAgentOutputParser()
)
return agent
79 changes: 79 additions & 0 deletions libs/langchain/langchain/agents/react/agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
from __future__ import annotations

from typing import Sequence

from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool

from langchain.agents.format_scratchpad import format_log_to_str
from langchain.agents.output_parsers import ReActSingleInputOutputParser
from langchain.tools.render import render_text_description


def create_react_agent(
llm: BaseLanguageModel, tools: Sequence[BaseTool], prompt: BasePromptTemplate
) -> Runnable:
"""Create an agent that uses ReAct prompting.
Examples:
.. code-block:: python
from langchain import hub
from langchain.llms import OpenAI
from langchain.agents import AgentExecutor, create_react_agent
prompt = hub.pull("hwchase17/react")
model = OpenAI()
tools = ...
agent = create_react_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Use with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
# Notice that chat_history is a string
# since this prompt is aimed at LLMs, not chat models
"chat_history": "Human: My name is Bob\nAI: Hello Bob!",
}
)
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use, must have input keys of
`tools`, `tool_names`, and `agent_scratchpad`.
Returns:
A runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
"""
missing_vars = {"tools", "tool_names", "agent_scratchpad"}.difference(
prompt.input_variables
)
if missing_vars:
raise ValueError(f"Prompt missing required variables: {missing_vars}")

prompt = prompt.partial(
tools=render_text_description(list(tools)),
tool_names=", ".join([t.name for t in tools]),
)
llm_with_stop = llm.bind(stop=["\nObservation"])
agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_log_to_str(x["intermediate_steps"]),
)
| prompt
| llm_with_stop
| ReActSingleInputOutputParser()
)
return agent
Loading

0 comments on commit 90aa26a

Please sign in to comment.