From bd257319a50e9718633d1fcb4a6824054a08b8ce Mon Sep 17 00:00:00 2001 From: Bagatur Date: Mon, 6 Nov 2023 18:07:54 -0800 Subject: [PATCH 01/22] works --- .../openai_assistant/__init__.py | 0 .../openai_assistant/base.py | 81 +++++++++++++++++++ 2 files changed, 81 insertions(+) create mode 100644 libs/experimental/langchain_experimental/openai_assistant/__init__.py create mode 100644 libs/experimental/langchain_experimental/openai_assistant/base.py diff --git a/libs/experimental/langchain_experimental/openai_assistant/__init__.py b/libs/experimental/langchain_experimental/openai_assistant/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/libs/experimental/langchain_experimental/openai_assistant/base.py b/libs/experimental/langchain_experimental/openai_assistant/base.py new file mode 100644 index 0000000000000..9ed6fbbd849e3 --- /dev/null +++ b/libs/experimental/langchain_experimental/openai_assistant/base.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +from time import sleep +from typing import TYPE_CHECKING, Dict, List, Optional, Union + +from langchain.pydantic_v1 import root_validator +from langchain.schema.runnable import RunnableConfig, RunnableSerializable + +if TYPE_CHECKING: + import openai + + +class OpenAIAssistantRunnable(RunnableSerializable[Union[List[dict], str], list]): + client: Optional[openai.OpenAI] = None + name: str + instructions: str + tools: list + model: str + thread_id: Optional[str] = None + assistant_id: Optional[str] = None + run_id: Optional[str] = None + run_instructions: Optional[str] = None + + @root_validator() + def validate_environment(cls, values: Dict) -> Dict: + if not values["client"]: + try: + import openai + except ImportError as e: + raise ImportError() from e + + values["client"] = openai.OpenAI() + return values + + def create(self, run_instructions: Optional[str] = None) -> None: + if not self.assistant_id: + assistant = self.client.beta.assistants.create( + name=self.name, + instructions=self.instructions, + tools=self.tools, + model=self.model, + ) + self.assistant_id = assistant.id + if not self.thread_id: + thread = self.client.beta.threads.create() + self.thread_id = thread.id + + def invoke( + self, input: Union[str, List[dict]], config: Optional[RunnableConfig] = None + ) -> List: + if isinstance(input, str): + msg = self.client.beta.threads.messages.create( + self.thread_id, content=input, role="user" + ) + run = self.client.beta.threads.runs.create( + self.thread_id, + assistant_id=self.assistant_id, + instructions=self.run_instructions, + ) + self.run_id = run.id + + else: + run = self.client.beta.threads.runs.submit_tool_outputs( + thread_id=self.thread_id, + run_id=self.run_id, + tool_outputs=input, + ) + return self._list_steps() + + def _list_steps(self) -> List: + # TODO: Pagination + in_progress = True + while in_progress: + steps = self.client.beta.threads.runs.steps.list( + self.run_id, thread_id=self.thread_id, order="asc" + ) + in_progress = not steps.data or steps.data[-1].status == "in_progress" + if in_progress: + sleep(1) + + return steps.data From 671a90b7435804eb1429a12d4b51d5185441ef5b Mon Sep 17 00:00:00 2001 From: Bagatur Date: Mon, 6 Nov 2023 18:11:17 -0800 Subject: [PATCH 02/22] cr --- .../langchain_experimental/openai_assistant/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/experimental/langchain_experimental/openai_assistant/base.py b/libs/experimental/langchain_experimental/openai_assistant/base.py index 9ed6fbbd849e3..850e0782bf913 100644 --- a/libs/experimental/langchain_experimental/openai_assistant/base.py +++ b/libs/experimental/langchain_experimental/openai_assistant/base.py @@ -32,7 +32,7 @@ def validate_environment(cls, values: Dict) -> Dict: values["client"] = openai.OpenAI() return values - def create(self, run_instructions: Optional[str] = None) -> None: + def create(self) -> None: if not self.assistant_id: assistant = self.client.beta.assistants.create( name=self.name, From 53308aa938ecaef93552bd023a85ea05f57b7366 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Mon, 6 Nov 2023 20:29:12 -0800 Subject: [PATCH 03/22] wip --- .../openai_assistant/__init__.py | 3 + .../openai_assistant/base.py | 78 ++++++++++++++----- libs/langchain/langchain/agents/agent.py | 70 +++++++++++++++++ 3 files changed, 132 insertions(+), 19 deletions(-) diff --git a/libs/experimental/langchain_experimental/openai_assistant/__init__.py b/libs/experimental/langchain_experimental/openai_assistant/__init__.py index e69de29bb2d1d..84ab8035ac393 100644 --- a/libs/experimental/langchain_experimental/openai_assistant/__init__.py +++ b/libs/experimental/langchain_experimental/openai_assistant/__init__.py @@ -0,0 +1,3 @@ +from langchain_experimental.openai_assistant.base import OpenAIAssistantRunnable + +__all__ = ["OpenAIAssistantRunnable"] diff --git a/libs/experimental/langchain_experimental/openai_assistant/base.py b/libs/experimental/langchain_experimental/openai_assistant/base.py index 850e0782bf913..957dfacc4cfaf 100644 --- a/libs/experimental/langchain_experimental/openai_assistant/base.py +++ b/libs/experimental/langchain_experimental/openai_assistant/base.py @@ -1,15 +1,26 @@ from __future__ import annotations +import json from time import sleep from typing import TYPE_CHECKING, Dict, List, Optional, Union from langchain.pydantic_v1 import root_validator +from langchain.schema.agent import AgentAction, AgentFinish from langchain.schema.runnable import RunnableConfig, RunnableSerializable if TYPE_CHECKING: import openai +class OpenAIAssistantFinish(AgentFinish): + run_id: str + + +class OpenAIAssistantAction(AgentAction): + tool_call_id: str + run_id: str + + class OpenAIAssistantRunnable(RunnableSerializable[Union[List[dict], str], list]): client: Optional[openai.OpenAI] = None name: str @@ -19,7 +30,8 @@ class OpenAIAssistantRunnable(RunnableSerializable[Union[List[dict], str], list] thread_id: Optional[str] = None assistant_id: Optional[str] = None run_id: Optional[str] = None - run_instructions: Optional[str] = None + poll_rate: int = 1 + as_agent: bool = False @root_validator() def validate_environment(cls, values: Dict) -> Dict: @@ -45,37 +57,65 @@ def create(self) -> None: thread = self.client.beta.threads.create() self.thread_id = thread.id - def invoke( - self, input: Union[str, List[dict]], config: Optional[RunnableConfig] = None - ) -> List: - if isinstance(input, str): + def invoke(self, input: Any, config: Optional[RunnableConfig] = None) -> List: + if "run_id" not in input: msg = self.client.beta.threads.messages.create( - self.thread_id, content=input, role="user" + self.thread_id, content=input["content"], role="user" ) run = self.client.beta.threads.runs.create( self.thread_id, assistant_id=self.assistant_id, - instructions=self.run_instructions, + instructions=input.get("run_instructions"), ) - self.run_id = run.id else: run = self.client.beta.threads.runs.submit_tool_outputs( - thread_id=self.thread_id, - run_id=self.run_id, - tool_outputs=input, + thread_id=self.thread_id, **input ) - return self._list_steps() + return self._get_response(run.id) - def _list_steps(self) -> List: + def _get_response(self, run_id: str) -> List: # TODO: Pagination in_progress = True while in_progress: - steps = self.client.beta.threads.runs.steps.list( - self.run_id, thread_id=self.thread_id, order="asc" - ) - in_progress = not steps.data or steps.data[-1].status == "in_progress" + run = self._retrieve_run(run_id) + in_progress = run.status in ("in_progress", "queued") if in_progress: - sleep(1) + sleep(self.poll_rate) + if run.status == "completed": + messages = self.client.beta.threads.messages.list( + self.thread_id, order="asc" + ) + new_messages = [msg for msg in messages if msg.run_id == run_id] + if not self.as_agent: + return new_messages + answer = "".join( + msg_content.text.value + for msg in new_messages + for msg_content in msg.content + ) + return OpenAIAssistantFinish( + return_values={"output": answer}, log="", run_id=run_id + ) + elif run.status == "requires_action": + if not self.as_agent: + return run.required_action.submit_tool_outputs + actions = [] + for tool_call in run.required_action.submit_tool_outputs.tool_calls: + function = tool_call.function + args = json.loads(function.arguments) + actions.append( + OpenAIAssistantAction( + tool=function.name, + tool_input=args, + tool_call_id=tool_call.id, + log="", + run_id=run_id, + ) + ) + return actions + else: + raise ValueError(run.dict()) - return steps.data + def _retrieve_run(self, run_id: str) -> Any: + return self.client.beta.threads.runs.retrieve(run_id, thread_id=self.thread_id) diff --git a/libs/langchain/langchain/agents/agent.py b/libs/langchain/langchain/agents/agent.py index 86f45ed244d90..83601e1b055d0 100644 --- a/libs/langchain/langchain/agents/agent.py +++ b/libs/langchain/langchain/agents/agent.py @@ -319,6 +319,76 @@ def tool_run_logging_kwargs(self) -> Dict: return {} +class RunnableMultiActionAgent(BaseMultiActionAgent): + """Agent powered by runnables.""" + + runnable: Runnable[dict, Union[List[AgentAction], AgentFinish]] + """Runnable to call to get agent action.""" + _input_keys: List[str] = [] + """Input keys.""" + + class Config: + """Configuration for this pydantic object.""" + + arbitrary_types_allowed = True + + @property + def return_values(self) -> List[str]: + """Return values of the agent.""" + return [] + + @property + def input_keys(self) -> List[str]: + """Return the input keys. + + Returns: + List of input keys. + """ + return self._input_keys + + def plan( + self, + intermediate_steps: List[Tuple[AgentAction, str]], + callbacks: Callbacks = None, + **kwargs: Any, + ) -> Union[List[AgentAction], AgentFinish]: + """Given input, decided what to do. + + Args: + intermediate_steps: Steps the LLM has taken to date, + along with the observations. + callbacks: Callbacks to run. + **kwargs: User inputs. + + Returns: + Action specifying what tool to use. + """ + inputs = {**kwargs, **{"intermediate_steps": intermediate_steps}} + output = self.runnable.invoke(inputs, config={"callbacks": callbacks}) + return output + + async def aplan( + self, + intermediate_steps: List[Tuple[AgentAction, str]], + callbacks: Callbacks = None, + **kwargs: Any, + ) -> Union[List[AgentAction], AgentFinish]: + """Given input, decided what to do. + + Args: + intermediate_steps: Steps the LLM has taken to date, + along with observations + callbacks: Callbacks to run. + **kwargs: User inputs. + + Returns: + Action specifying what tool to use. + """ + inputs = {**kwargs, **{"intermediate_steps": intermediate_steps}} + output = await self.runnable.ainvoke(inputs, config={"callbacks": callbacks}) + return output + + class AgentOutputParser(BaseOutputParser): """Base class for parsing agent output into agent action/finish.""" From e5a4eacbb34903fddf151314db9068d0de200625 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Mon, 6 Nov 2023 20:33:47 -0800 Subject: [PATCH 04/22] cookbook --- cookbook/openai_v1_cookbook.ipynb | 119 ++++++++++++++++++++++++++++++ 1 file changed, 119 insertions(+) diff --git a/cookbook/openai_v1_cookbook.ipynb b/cookbook/openai_v1_cookbook.ipynb index de1675d7ffb38..4d21c47711d4a 100644 --- a/cookbook/openai_v1_cookbook.ipynb +++ b/cookbook/openai_v1_cookbook.ipynb @@ -78,6 +78,125 @@ ")" ] }, + { + "cell_type": "markdown", + "id": "210f8248-fcf3-4052-a4a3-0684e08f8785", + "metadata": {}, + "source": [ + "## [OpenAI assistants](https://platform.openai.com/docs/assistants/overview)\n", + "\n", + "> The Assistants API allows you to build AI assistants within your own applications. An Assistant has instructions and can leverage models, tools, and knowledge to respond to user queries. The Assistants API currently supports three types of tools: Code Interpreter, Retrieval, and Function calling" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "7a20a008-49ac-46d2-aa26-b270118af5ea", + "metadata": {}, + "outputs": [ + { + "ename": "ValueError", + "evalue": "{'id': 'run_6jCp1r4M684TWy77rNFnY9WV', 'assistant_id': 'asst_CpWPBHGRo5QR5XhLeFZsb4uL', 'cancelled_at': None, 'completed_at': None, 'created_at': 1699331563, 'expires_at': None, 'failed_at': 1699331575, 'file_ids': [], 'instructions': 'You are a personal math tutor. Write and run code to answer math questions.', 'last_error': {'code': 'rate_limit_exceeded', 'message': \"We're currently processing too many requests - please try again later.\"}, 'metadata': {}, 'model': 'gpt-4-1106-preview', 'object': 'thread.run', 'required_action': None, 'started_at': 1699331563, 'status': 'failed', 'thread_id': 'thread_uPHtm7oJ9W2Rvtp5LIZjwspu', 'tools': [{'type': 'code_interpreter'}]}", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[1], line 10\u001b[0m\n\u001b[1;32m 3\u001b[0m interpreter_assistant \u001b[38;5;241m=\u001b[39m OpenAIAssistantRunnable(\n\u001b[1;32m 4\u001b[0m name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlangchain assistant\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 5\u001b[0m instructions\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mYou are a personal math tutor. Write and run code to answer math questions.\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 6\u001b[0m tools\u001b[38;5;241m=\u001b[39m[{\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtype\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcode_interpreter\u001b[39m\u001b[38;5;124m\"\u001b[39m}],\n\u001b[1;32m 7\u001b[0m model\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mgpt-4-1106-preview\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 8\u001b[0m )\n\u001b[1;32m 9\u001b[0m interpreter_assistant\u001b[38;5;241m.\u001b[39mcreate()\n\u001b[0;32m---> 10\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[43minterpreter_assistant\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcontent\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mWhat\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43ms 10 - 4 raised to the 2.7\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m}\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 11\u001b[0m output\n", + "File \u001b[0;32m~/langchain/libs/experimental/langchain_experimental/openai_assistant/base.py:75\u001b[0m, in \u001b[0;36mOpenAIAssistantRunnable.invoke\u001b[0;34m(self, input, config)\u001b[0m\n\u001b[1;32m 71\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 72\u001b[0m run \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclient\u001b[38;5;241m.\u001b[39mbeta\u001b[38;5;241m.\u001b[39mthreads\u001b[38;5;241m.\u001b[39mruns\u001b[38;5;241m.\u001b[39msubmit_tool_outputs(\n\u001b[1;32m 73\u001b[0m thread_id\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mthread_id, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m\u001b[38;5;28minput\u001b[39m\n\u001b[1;32m 74\u001b[0m )\n\u001b[0;32m---> 75\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_get_response\u001b[49m\u001b[43m(\u001b[49m\u001b[43mrun\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mid\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/langchain/libs/experimental/langchain_experimental/openai_assistant/base.py:118\u001b[0m, in \u001b[0;36mOpenAIAssistantRunnable._get_response\u001b[0;34m(self, run_id)\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m actions\n\u001b[1;32m 117\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 118\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(run\u001b[38;5;241m.\u001b[39mdict())\n", + "\u001b[0;31mValueError\u001b[0m: {'id': 'run_6jCp1r4M684TWy77rNFnY9WV', 'assistant_id': 'asst_CpWPBHGRo5QR5XhLeFZsb4uL', 'cancelled_at': None, 'completed_at': None, 'created_at': 1699331563, 'expires_at': None, 'failed_at': 1699331575, 'file_ids': [], 'instructions': 'You are a personal math tutor. Write and run code to answer math questions.', 'last_error': {'code': 'rate_limit_exceeded', 'message': \"We're currently processing too many requests - please try again later.\"}, 'metadata': {}, 'model': 'gpt-4-1106-preview', 'object': 'thread.run', 'required_action': None, 'started_at': 1699331563, 'status': 'failed', 'thread_id': 'thread_uPHtm7oJ9W2Rvtp5LIZjwspu', 'tools': [{'type': 'code_interpreter'}]}" + ] + } + ], + "source": [ + "from langchain_experimental.openai_assistant.base import OpenAIAssistantRunnable\n", + "\n", + "interpreter_assistant = OpenAIAssistantRunnable(\n", + " name=\"langchain assistant\",\n", + " instructions=\"You are a personal math tutor. Write and run code to answer math questions.\",\n", + " tools=[{\"type\": \"code_interpreter\"}],\n", + " model=\"gpt-4-1106-preview\"\n", + ")\n", + "interpreter_assistant.create()\n", + "output = interpreter_assistant.invoke({\"content\": \"What's 10 - 4 raised to the 2.7\"})\n", + "output" + ] + }, + { + "cell_type": "markdown", + "id": "a8ddd181-ac63-4ab6-a40d-a236120379c1", + "metadata": {}, + "source": [ + "### As a LangChain agent with arbitrary tools" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "48681ac7-b267-48d4-972c-8a7df8393a21", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.tools import E2BDataAnalysisTool, format_tool_to_openai_function\n", + "\n", + "tool = E2BDataAnalysisTool(api_key=\"...\")\n", + "function = format_tool_to_openai_function(tool)\n", + "tools = [tool]" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "1c01dd79-dd3e-4509-a2e2-009a7f99f16a", + "metadata": {}, + "outputs": [], + "source": [ + "agent = OpenAIAssistantRunnable(\n", + " name=\"langchain assistant e2b tool\",\n", + " instructions=\"You are a personal math tutor. Write and run code to answer math questions.\",\n", + " tools=[{\"type\": \"function\", \"function\": function}],\n", + " model=\"gpt-4-1106-preview\",\n", + " as_agent=True\n", + ")\n", + "agent.create()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "b76cb669-6aba-4827-868f-00aa960026f2", + "metadata": {}, + "outputs": [ + { + "ename": "ValueError", + "evalue": "{'id': 'run_6QY8OJeyV2M4BjUQMdWkAn1i', 'assistant_id': 'asst_8dAlzPI0zvjOaaenvwNCtIEl', 'cancelled_at': None, 'completed_at': None, 'created_at': 1699331591, 'expires_at': None, 'failed_at': 1699331601, 'file_ids': [], 'instructions': 'You are a personal math tutor. Write and run code to answer math questions.', 'last_error': {'code': 'rate_limit_exceeded', 'message': \"We're currently processing too many requests - please try again later.\"}, 'metadata': {}, 'model': 'gpt-4-1106-preview', 'object': 'thread.run', 'required_action': None, 'started_at': 1699331591, 'status': 'failed', 'thread_id': 'thread_wcdfruB9FPwjs27TbBv0p45U', 'tools': [{'function': {'description': 'Evaluates python code in a sandbox environment. The environment is long running and exists across multiple executions. You must send the whole script every time and print your outputs. Script should be pure python code that can be evaluated. It should be in python format NOT markdown. The code should NOT be wrapped in backticks. All python packages including requests, matplotlib, scipy, numpy, pandas, etc are available. Create and display chart using `plt.show()`.', 'name': 'e2b_data_analysis', 'parameters': {'title': 'E2BDataAnalysisToolArguments', 'description': 'Arguments for the E2BDataAnalysisTool.', 'type': 'object', 'properties': {'python_code': {'title': 'Python Code', 'description': 'The python script to be evaluated. The contents will be in main.py. It should not be in markdown format.', 'example': \"print('Hello World')\", 'type': 'string'}}, 'required': ['python_code']}}, 'type': 'function'}]}", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[5], line 4\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mschema\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01magent\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m AgentFinish\n\u001b[1;32m 3\u001b[0m tool_map \u001b[38;5;241m=\u001b[39m {tool\u001b[38;5;241m.\u001b[39mname: tool \u001b[38;5;28;01mfor\u001b[39;00m tool \u001b[38;5;129;01min\u001b[39;00m tools}\n\u001b[0;32m----> 4\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[43magent\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcontent\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mWhat\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43ms 10 - 4 raised to the 2.7\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m}\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 5\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(response, AgentFinish):\n\u001b[1;32m 6\u001b[0m tool_outputs \u001b[38;5;241m=\u001b[39m []\n", + "File \u001b[0;32m~/langchain/libs/experimental/langchain_experimental/openai_assistant/base.py:75\u001b[0m, in \u001b[0;36mOpenAIAssistantRunnable.invoke\u001b[0;34m(self, input, config)\u001b[0m\n\u001b[1;32m 71\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 72\u001b[0m run \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclient\u001b[38;5;241m.\u001b[39mbeta\u001b[38;5;241m.\u001b[39mthreads\u001b[38;5;241m.\u001b[39mruns\u001b[38;5;241m.\u001b[39msubmit_tool_outputs(\n\u001b[1;32m 73\u001b[0m thread_id\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mthread_id, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m\u001b[38;5;28minput\u001b[39m\n\u001b[1;32m 74\u001b[0m )\n\u001b[0;32m---> 75\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_get_response\u001b[49m\u001b[43m(\u001b[49m\u001b[43mrun\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mid\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/langchain/libs/experimental/langchain_experimental/openai_assistant/base.py:118\u001b[0m, in \u001b[0;36mOpenAIAssistantRunnable._get_response\u001b[0;34m(self, run_id)\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m actions\n\u001b[1;32m 117\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 118\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(run\u001b[38;5;241m.\u001b[39mdict())\n", + "\u001b[0;31mValueError\u001b[0m: {'id': 'run_6QY8OJeyV2M4BjUQMdWkAn1i', 'assistant_id': 'asst_8dAlzPI0zvjOaaenvwNCtIEl', 'cancelled_at': None, 'completed_at': None, 'created_at': 1699331591, 'expires_at': None, 'failed_at': 1699331601, 'file_ids': [], 'instructions': 'You are a personal math tutor. Write and run code to answer math questions.', 'last_error': {'code': 'rate_limit_exceeded', 'message': \"We're currently processing too many requests - please try again later.\"}, 'metadata': {}, 'model': 'gpt-4-1106-preview', 'object': 'thread.run', 'required_action': None, 'started_at': 1699331591, 'status': 'failed', 'thread_id': 'thread_wcdfruB9FPwjs27TbBv0p45U', 'tools': [{'function': {'description': 'Evaluates python code in a sandbox environment. The environment is long running and exists across multiple executions. You must send the whole script every time and print your outputs. Script should be pure python code that can be evaluated. It should be in python format NOT markdown. The code should NOT be wrapped in backticks. All python packages including requests, matplotlib, scipy, numpy, pandas, etc are available. Create and display chart using `plt.show()`.', 'name': 'e2b_data_analysis', 'parameters': {'title': 'E2BDataAnalysisToolArguments', 'description': 'Arguments for the E2BDataAnalysisTool.', 'type': 'object', 'properties': {'python_code': {'title': 'Python Code', 'description': 'The python script to be evaluated. The contents will be in main.py. It should not be in markdown format.', 'example': \"print('Hello World')\", 'type': 'string'}}, 'required': ['python_code']}}, 'type': 'function'}]}" + ] + } + ], + "source": [ + "from langchain.schema.agent import AgentFinish\n", + "\n", + "tool_map = {tool.name: tool for tool in tools}\n", + "response = agent.invoke({\"content\": \"What's 10 - 4 raised to the 2.7\"})\n", + "while not isinstance(response, AgentFinish):\n", + " tool_outputs = []\n", + " for action in response:\n", + " tool_output = tool_map[action.tool].invoke(action.tool_input)\n", + " print(action.tool, action.tool_input, tool_output)\n", + " tool_outputs.append({\"output\": tool_output, \"tool_call_id\": action.tool_call_id})\n", + " response = agent.invoke({\"tool_outputs\": tool_outputs, \"run_id\": action.run_id})\n", + " \n", + "print(response.return_values[\"output\"])" + ] + }, { "cell_type": "markdown", "id": "71c34763-d1e7-4b9a-a9d7-3e4cc0dfc2c4", From a8b1d537220a57d34f1e61da53ec4cbe47c90a7d Mon Sep 17 00:00:00 2001 From: Bagatur Date: Mon, 6 Nov 2023 20:35:58 -0800 Subject: [PATCH 05/22] undo --- libs/langchain/langchain/agents/agent.py | 70 ------------------------ 1 file changed, 70 deletions(-) diff --git a/libs/langchain/langchain/agents/agent.py b/libs/langchain/langchain/agents/agent.py index 83601e1b055d0..86f45ed244d90 100644 --- a/libs/langchain/langchain/agents/agent.py +++ b/libs/langchain/langchain/agents/agent.py @@ -319,76 +319,6 @@ def tool_run_logging_kwargs(self) -> Dict: return {} -class RunnableMultiActionAgent(BaseMultiActionAgent): - """Agent powered by runnables.""" - - runnable: Runnable[dict, Union[List[AgentAction], AgentFinish]] - """Runnable to call to get agent action.""" - _input_keys: List[str] = [] - """Input keys.""" - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True - - @property - def return_values(self) -> List[str]: - """Return values of the agent.""" - return [] - - @property - def input_keys(self) -> List[str]: - """Return the input keys. - - Returns: - List of input keys. - """ - return self._input_keys - - def plan( - self, - intermediate_steps: List[Tuple[AgentAction, str]], - callbacks: Callbacks = None, - **kwargs: Any, - ) -> Union[List[AgentAction], AgentFinish]: - """Given input, decided what to do. - - Args: - intermediate_steps: Steps the LLM has taken to date, - along with the observations. - callbacks: Callbacks to run. - **kwargs: User inputs. - - Returns: - Action specifying what tool to use. - """ - inputs = {**kwargs, **{"intermediate_steps": intermediate_steps}} - output = self.runnable.invoke(inputs, config={"callbacks": callbacks}) - return output - - async def aplan( - self, - intermediate_steps: List[Tuple[AgentAction, str]], - callbacks: Callbacks = None, - **kwargs: Any, - ) -> Union[List[AgentAction], AgentFinish]: - """Given input, decided what to do. - - Args: - intermediate_steps: Steps the LLM has taken to date, - along with observations - callbacks: Callbacks to run. - **kwargs: User inputs. - - Returns: - Action specifying what tool to use. - """ - inputs = {**kwargs, **{"intermediate_steps": intermediate_steps}} - output = await self.runnable.ainvoke(inputs, config={"callbacks": callbacks}) - return output - - class AgentOutputParser(BaseOutputParser): """Base class for parsing agent output into agent action/finish.""" From 51225f8a34beeecaa4a786197b80e307f37eecb6 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Mon, 6 Nov 2023 22:13:33 -0800 Subject: [PATCH 06/22] executor --- cookbook/openai_v1_cookbook.ipynb | 120 +++++++++++++++++- .../openai_assistant/base.py | 10 +- 2 files changed, 122 insertions(+), 8 deletions(-) diff --git a/cookbook/openai_v1_cookbook.ipynb b/cookbook/openai_v1_cookbook.ipynb index 4d21c47711d4a..15e8a4f87d5f4 100644 --- a/cookbook/openai_v1_cookbook.ipynb +++ b/cookbook/openai_v1_cookbook.ipynb @@ -90,21 +90,21 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 6, "id": "7a20a008-49ac-46d2-aa26-b270118af5ea", "metadata": {}, "outputs": [ { "ename": "ValueError", - "evalue": "{'id': 'run_6jCp1r4M684TWy77rNFnY9WV', 'assistant_id': 'asst_CpWPBHGRo5QR5XhLeFZsb4uL', 'cancelled_at': None, 'completed_at': None, 'created_at': 1699331563, 'expires_at': None, 'failed_at': 1699331575, 'file_ids': [], 'instructions': 'You are a personal math tutor. Write and run code to answer math questions.', 'last_error': {'code': 'rate_limit_exceeded', 'message': \"We're currently processing too many requests - please try again later.\"}, 'metadata': {}, 'model': 'gpt-4-1106-preview', 'object': 'thread.run', 'required_action': None, 'started_at': 1699331563, 'status': 'failed', 'thread_id': 'thread_uPHtm7oJ9W2Rvtp5LIZjwspu', 'tools': [{'type': 'code_interpreter'}]}", + "evalue": "{'id': 'run_PiaUaLysMmWCgT49jLNkBhhx', 'assistant_id': 'asst_4mXntqWdxkAM8CLkGERlYBXI', 'cancelled_at': None, 'completed_at': None, 'created_at': 1699337403, 'expires_at': None, 'failed_at': 1699337413, 'file_ids': [], 'instructions': 'You are a personal math tutor. Write and run code to answer math questions.', 'last_error': {'code': 'rate_limit_exceeded', 'message': \"We're currently processing too many requests - please try again later.\"}, 'metadata': {}, 'model': 'gpt-4-1106-preview', 'object': 'thread.run', 'required_action': None, 'started_at': 1699337403, 'status': 'failed', 'thread_id': 'thread_mRVqIj5Jm3rxuGreucH22bsr', 'tools': [{'type': 'code_interpreter'}]}", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[1], line 10\u001b[0m\n\u001b[1;32m 3\u001b[0m interpreter_assistant \u001b[38;5;241m=\u001b[39m OpenAIAssistantRunnable(\n\u001b[1;32m 4\u001b[0m name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlangchain assistant\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 5\u001b[0m instructions\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mYou are a personal math tutor. Write and run code to answer math questions.\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 6\u001b[0m tools\u001b[38;5;241m=\u001b[39m[{\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtype\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcode_interpreter\u001b[39m\u001b[38;5;124m\"\u001b[39m}],\n\u001b[1;32m 7\u001b[0m model\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mgpt-4-1106-preview\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 8\u001b[0m )\n\u001b[1;32m 9\u001b[0m interpreter_assistant\u001b[38;5;241m.\u001b[39mcreate()\n\u001b[0;32m---> 10\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[43minterpreter_assistant\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcontent\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mWhat\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43ms 10 - 4 raised to the 2.7\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m}\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 11\u001b[0m output\n", + "Cell \u001b[0;32mIn[6], line 10\u001b[0m\n\u001b[1;32m 3\u001b[0m interpreter_assistant \u001b[38;5;241m=\u001b[39m OpenAIAssistantRunnable(\n\u001b[1;32m 4\u001b[0m name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlangchain assistant\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 5\u001b[0m instructions\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mYou are a personal math tutor. Write and run code to answer math questions.\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 6\u001b[0m tools\u001b[38;5;241m=\u001b[39m[{\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtype\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcode_interpreter\u001b[39m\u001b[38;5;124m\"\u001b[39m}],\n\u001b[1;32m 7\u001b[0m model\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mgpt-4-1106-preview\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 8\u001b[0m )\n\u001b[1;32m 9\u001b[0m interpreter_assistant\u001b[38;5;241m.\u001b[39mcreate()\n\u001b[0;32m---> 10\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[43minterpreter_assistant\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcontent\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mWhat\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43ms 10 - 4 raised to the 2.7\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m}\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 11\u001b[0m output\n", "File \u001b[0;32m~/langchain/libs/experimental/langchain_experimental/openai_assistant/base.py:75\u001b[0m, in \u001b[0;36mOpenAIAssistantRunnable.invoke\u001b[0;34m(self, input, config)\u001b[0m\n\u001b[1;32m 71\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 72\u001b[0m run \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclient\u001b[38;5;241m.\u001b[39mbeta\u001b[38;5;241m.\u001b[39mthreads\u001b[38;5;241m.\u001b[39mruns\u001b[38;5;241m.\u001b[39msubmit_tool_outputs(\n\u001b[1;32m 73\u001b[0m thread_id\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mthread_id, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m\u001b[38;5;28minput\u001b[39m\n\u001b[1;32m 74\u001b[0m )\n\u001b[0;32m---> 75\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_get_response\u001b[49m\u001b[43m(\u001b[49m\u001b[43mrun\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mid\u001b[49m\u001b[43m)\u001b[49m\n", "File \u001b[0;32m~/langchain/libs/experimental/langchain_experimental/openai_assistant/base.py:118\u001b[0m, in \u001b[0;36mOpenAIAssistantRunnable._get_response\u001b[0;34m(self, run_id)\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m actions\n\u001b[1;32m 117\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 118\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(run\u001b[38;5;241m.\u001b[39mdict())\n", - "\u001b[0;31mValueError\u001b[0m: {'id': 'run_6jCp1r4M684TWy77rNFnY9WV', 'assistant_id': 'asst_CpWPBHGRo5QR5XhLeFZsb4uL', 'cancelled_at': None, 'completed_at': None, 'created_at': 1699331563, 'expires_at': None, 'failed_at': 1699331575, 'file_ids': [], 'instructions': 'You are a personal math tutor. Write and run code to answer math questions.', 'last_error': {'code': 'rate_limit_exceeded', 'message': \"We're currently processing too many requests - please try again later.\"}, 'metadata': {}, 'model': 'gpt-4-1106-preview', 'object': 'thread.run', 'required_action': None, 'started_at': 1699331563, 'status': 'failed', 'thread_id': 'thread_uPHtm7oJ9W2Rvtp5LIZjwspu', 'tools': [{'type': 'code_interpreter'}]}" + "\u001b[0;31mValueError\u001b[0m: {'id': 'run_PiaUaLysMmWCgT49jLNkBhhx', 'assistant_id': 'asst_4mXntqWdxkAM8CLkGERlYBXI', 'cancelled_at': None, 'completed_at': None, 'created_at': 1699337403, 'expires_at': None, 'failed_at': 1699337413, 'file_ids': [], 'instructions': 'You are a personal math tutor. Write and run code to answer math questions.', 'last_error': {'code': 'rate_limit_exceeded', 'message': \"We're currently processing too many requests - please try again later.\"}, 'metadata': {}, 'model': 'gpt-4-1106-preview', 'object': 'thread.run', 'required_action': None, 'started_at': 1699337403, 'status': 'failed', 'thread_id': 'thread_mRVqIj5Jm3rxuGreucH22bsr', 'tools': [{'type': 'code_interpreter'}]}" ] } ], @@ -132,10 +132,50 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 7, "id": "48681ac7-b267-48d4-972c-8a7df8393a21", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Failed to acquire session\n" + ] + }, + { + "ename": "UnauthorizedException", + "evalue": "(401)\nReason: Unauthorized\nHTTP response headers: HTTPHeaderDict({'Date': 'Tue, 07 Nov 2023 06:10:27 GMT', 'Content-Type': 'application/json; charset=utf-8', 'Content-Length': '123', 'Connection': 'keep-alive', 'via': '1.1 google', 'alt-svc': 'h3=\":443\"; ma=86400', 'CF-Cache-Status': 'DYNAMIC', 'Report-To': '{\"endpoints\":[{\"url\":\"https:\\\\/\\\\/a.nel.cloudflare.com\\\\/report\\\\/v3?s=eD4xvpIGOuei6fNw3tUox8hcw2rilvLB%2FUrFxgAkV05OCKE1yIBAZfHpmAnqATRlRrXBnDTn8KFWtliFKKO0fZHiKEPRzxzMB3CKvDSgquxoePtSyVtPguaR1UNYrK3h\"}],\"group\":\"cf-nel\",\"max_age\":604800}', 'NEL': '{\"success_fraction\":0,\"report_to\":\"cf-nel\",\"max_age\":604800}', 'Server': 'cloudflare', 'CF-RAY': '822369c7eefbd00d-SJC'})\nHTTP response body: {\"code\":401,\"message\":\"Invalid API key, please visit https://e2b.dev/docs?reason=sdk-missing-api-key to get your API key.\"}\n", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mUnauthorizedException\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[7], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mtools\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m E2BDataAnalysisTool, format_tool_to_openai_function\n\u001b[0;32m----> 3\u001b[0m tool \u001b[38;5;241m=\u001b[39m \u001b[43mE2BDataAnalysisTool\u001b[49m\u001b[43m(\u001b[49m\u001b[43mapi_key\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m...\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 4\u001b[0m function \u001b[38;5;241m=\u001b[39m format_tool_to_openai_function(tool)\n\u001b[1;32m 5\u001b[0m tools \u001b[38;5;241m=\u001b[39m [tool]\n", + "File \u001b[0;32m~/langchain/libs/langchain/langchain/tools/e2b_data_analysis/tool.py:124\u001b[0m, in \u001b[0;36mE2BDataAnalysisTool.__init__\u001b[0;34m(self, api_key, cwd, env_vars, on_stdout, on_stderr, on_artifact, on_exit, **kwargs)\u001b[0m\n\u001b[1;32m 121\u001b[0m \u001b[38;5;66;03m# If no API key is provided, E2B will try to read it from the environment\u001b[39;00m\n\u001b[1;32m 122\u001b[0m \u001b[38;5;66;03m# variable E2B_API_KEY\u001b[39;00m\n\u001b[1;32m 123\u001b[0m \u001b[38;5;28msuper\u001b[39m()\u001b[38;5;241m.\u001b[39m\u001b[38;5;21m__init__\u001b[39m(description\u001b[38;5;241m=\u001b[39mbase_description, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m--> 124\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msession \u001b[38;5;241m=\u001b[39m \u001b[43mDataAnalysis\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 125\u001b[0m \u001b[43m \u001b[49m\u001b[43mapi_key\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mapi_key\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 126\u001b[0m \u001b[43m \u001b[49m\u001b[43mcwd\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcwd\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 127\u001b[0m \u001b[43m \u001b[49m\u001b[43menv_vars\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43menv_vars\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 128\u001b[0m \u001b[43m \u001b[49m\u001b[43mon_stdout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mon_stdout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 129\u001b[0m \u001b[43m \u001b[49m\u001b[43mon_stderr\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mon_stderr\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 130\u001b[0m \u001b[43m \u001b[49m\u001b[43mon_exit\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mon_exit\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 131\u001b[0m \u001b[43m \u001b[49m\u001b[43mon_artifact\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mon_artifact\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 132\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/templates/data_analysis.py:42\u001b[0m, in \u001b[0;36mDataAnalysis.__init__\u001b[0;34m(self, api_key, cwd, env_vars, on_stdout, on_stderr, on_artifact, on_exit)\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__init__\u001b[39m(\n\u001b[1;32m 32\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 33\u001b[0m api_key: Optional[\u001b[38;5;28mstr\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 39\u001b[0m on_exit: Optional[Callable[[\u001b[38;5;28mint\u001b[39m], Any]] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 40\u001b[0m ):\n\u001b[1;32m 41\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mon_artifact \u001b[38;5;241m=\u001b[39m on_artifact\n\u001b[0;32m---> 42\u001b[0m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[38;5;21;43m__init__\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[1;32m 43\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mid\u001b[39;49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43menv_id\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 44\u001b[0m \u001b[43m \u001b[49m\u001b[43mapi_key\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mapi_key\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 45\u001b[0m \u001b[43m \u001b[49m\u001b[43mcwd\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcwd\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 46\u001b[0m \u001b[43m \u001b[49m\u001b[43menv_vars\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43menv_vars\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 47\u001b[0m \u001b[43m \u001b[49m\u001b[43mon_stdout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mon_stdout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 48\u001b[0m \u001b[43m \u001b[49m\u001b[43mon_stderr\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mon_stderr\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 49\u001b[0m \u001b[43m \u001b[49m\u001b[43mon_exit\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mon_exit\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 50\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/session/main.py:116\u001b[0m, in \u001b[0;36mSession.__init__\u001b[0;34m(self, id, api_key, cwd, env_vars, on_scan_ports, on_stdout, on_stderr, on_exit, timeout, _debug_hostname, _debug_port, _debug_dev_env)\u001b[0m\n\u001b[1;32m 112\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_filesystem \u001b[38;5;241m=\u001b[39m FilesystemManager(session\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m)\n\u001b[1;32m 113\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_process \u001b[38;5;241m=\u001b[39m ProcessManager(\n\u001b[1;32m 114\u001b[0m session\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m, on_stdout\u001b[38;5;241m=\u001b[39mon_stdout, on_stderr\u001b[38;5;241m=\u001b[39mon_stderr, on_exit\u001b[38;5;241m=\u001b[39mon_exit\n\u001b[1;32m 115\u001b[0m )\n\u001b[0;32m--> 116\u001b[0m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[38;5;21;43m__init__\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[1;32m 117\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mid\u001b[39;49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mid\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 118\u001b[0m \u001b[43m \u001b[49m\u001b[43mapi_key\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mapi_key\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 119\u001b[0m \u001b[43m \u001b[49m\u001b[43mcwd\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcwd\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 120\u001b[0m \u001b[43m \u001b[49m\u001b[43menv_vars\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43menv_vars\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 121\u001b[0m \u001b[43m \u001b[49m\u001b[43m_debug_hostname\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_debug_hostname\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 122\u001b[0m \u001b[43m \u001b[49m\u001b[43m_debug_port\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_debug_port\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 123\u001b[0m \u001b[43m \u001b[49m\u001b[43m_debug_dev_env\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_debug_dev_env\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 124\u001b[0m \u001b[43m \u001b[49m\u001b[43mon_close\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_close_services\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 125\u001b[0m \u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 126\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/session/session_connection.py:98\u001b[0m, in \u001b[0;36mSessionConnection.__init__\u001b[0;34m(self, id, api_key, cwd, env_vars, on_close, timeout, _debug_hostname, _debug_port, _debug_dev_env)\u001b[0m\n\u001b[1;32m 94\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_finished \u001b[38;5;241m=\u001b[39m DeferredFuture(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_process_cleanup)\n\u001b[1;32m 96\u001b[0m logger\u001b[38;5;241m.\u001b[39minfo(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSession for code snippet \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_id\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m initialized\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m---> 98\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_open\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/session/main.py:135\u001b[0m, in \u001b[0;36mSession._open\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 129\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 130\u001b[0m \u001b[38;5;124;03mOpens the session.\u001b[39;00m\n\u001b[1;32m 131\u001b[0m \n\u001b[1;32m 132\u001b[0m \u001b[38;5;124;03m:param timeout: Specify the duration, in seconds to give the method to finish its execution before it times out (default is 60 seconds). If set to None, the method will continue to wait until it completes, regardless of time\u001b[39;00m\n\u001b[1;32m 133\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 134\u001b[0m logger\u001b[38;5;241m.\u001b[39minfo(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mOpening session \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_id\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m--> 135\u001b[0m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_open\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 136\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_code_snippet\u001b[38;5;241m.\u001b[39m_subscribe()\n\u001b[1;32m 137\u001b[0m logger\u001b[38;5;241m.\u001b[39minfo(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSession \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_id\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m opened\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n", + "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/session/session_connection.py:206\u001b[0m, in \u001b[0;36mSessionConnection._open\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 204\u001b[0m logger\u001b[38;5;241m.\u001b[39merror(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFailed to acquire session\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 205\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_close()\n\u001b[0;32m--> 206\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 208\u001b[0m hostname \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mget_hostname(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_debug_port \u001b[38;5;129;01mor\u001b[39;00m ENVD_PORT)\n\u001b[1;32m 209\u001b[0m protocol \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mws\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_debug_dev_env \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlocal\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mwss\u001b[39m\u001b[38;5;124m\"\u001b[39m\n", + "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/session/session_connection.py:173\u001b[0m, in \u001b[0;36mSessionConnection._open\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 170\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m client\u001b[38;5;241m.\u001b[39mApiClient(configuration) \u001b[38;5;28;01mas\u001b[39;00m api_client:\n\u001b[1;32m 171\u001b[0m api \u001b[38;5;241m=\u001b[39m client\u001b[38;5;241m.\u001b[39mSessionsApi(api_client)\n\u001b[0;32m--> 173\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_session \u001b[38;5;241m=\u001b[39m \u001b[43mapi\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msessions_post\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 174\u001b[0m \u001b[43m \u001b[49m\u001b[43mmodels\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mNewSession\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcodeSnippetID\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_id\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43meditEnabled\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 175\u001b[0m \u001b[43m \u001b[49m\u001b[43mapi_key\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_api_key\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 176\u001b[0m \u001b[43m \u001b[49m\u001b[43m_request_timeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 177\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 178\u001b[0m logger\u001b[38;5;241m.\u001b[39minfo(\n\u001b[1;32m 179\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSession \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_session\u001b[38;5;241m.\u001b[39mcode_snippet_id\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m created (id:\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_session\u001b[38;5;241m.\u001b[39msession_id\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m)\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 180\u001b[0m )\n\u001b[1;32m 182\u001b[0m \u001b[38;5;66;03m# We could potentially use asyncio.to_thread() but that requires Python 3.9+\u001b[39;00m\n", + "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/decorator.py:40\u001b[0m, in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[0;34m()\u001b[0m\n", + "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/decorator.py:134\u001b[0m, in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[0;34m()\u001b[0m\n", + "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/decorator.py:206\u001b[0m, in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[0;34m()\u001b[0m\n", + "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/api/v1/client/api/sessions_api.py:230\u001b[0m, in \u001b[0;36mSessionsApi.sessions_post\u001b[0;34m(self, new_session, api_key, **kwargs)\u001b[0m\n\u001b[1;32m 226\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m_preload_content\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m kwargs:\n\u001b[1;32m 227\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 228\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mError! Please call the sessions_post_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 229\u001b[0m )\n\u001b[0;32m--> 230\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msessions_post_with_http_info\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 231\u001b[0m \u001b[43m \u001b[49m\u001b[43mnew_session\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mapi_key\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\n\u001b[1;32m 232\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/decorator.py:40\u001b[0m, in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[0;34m()\u001b[0m\n", + "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/decorator.py:134\u001b[0m, in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[0;34m()\u001b[0m\n", + "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/decorator.py:206\u001b[0m, in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[0;34m()\u001b[0m\n", + "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/api/v1/client/api/sessions_api.py:344\u001b[0m, in \u001b[0;36mSessionsApi.sessions_post_with_http_info\u001b[0;34m(self, new_session, api_key, **kwargs)\u001b[0m\n\u001b[1;32m 335\u001b[0m _auth_settings \u001b[38;5;241m=\u001b[39m [] \u001b[38;5;66;03m# noqa: E501\u001b[39;00m\n\u001b[1;32m 337\u001b[0m _response_types_map \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m 338\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m201\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSession\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 339\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m401\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mError\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 340\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m400\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mError\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 341\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m500\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mError\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 342\u001b[0m }\n\u001b[0;32m--> 344\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mapi_client\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcall_api\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 345\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m/sessions\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 346\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mPOST\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 347\u001b[0m \u001b[43m \u001b[49m\u001b[43m_path_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 348\u001b[0m \u001b[43m \u001b[49m\u001b[43m_query_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 349\u001b[0m \u001b[43m \u001b[49m\u001b[43m_header_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 350\u001b[0m \u001b[43m \u001b[49m\u001b[43mbody\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_body_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 351\u001b[0m \u001b[43m \u001b[49m\u001b[43mpost_params\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_form_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 352\u001b[0m \u001b[43m \u001b[49m\u001b[43mfiles\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_files\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 353\u001b[0m \u001b[43m \u001b[49m\u001b[43mresponse_types_map\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_response_types_map\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 354\u001b[0m \u001b[43m \u001b[49m\u001b[43mauth_settings\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_auth_settings\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 355\u001b[0m \u001b[43m \u001b[49m\u001b[43masync_req\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_params\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43masync_req\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 356\u001b[0m \u001b[43m \u001b[49m\u001b[43m_return_http_data_only\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_params\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m_return_http_data_only\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# noqa: E501\u001b[39;49;00m\n\u001b[1;32m 357\u001b[0m \u001b[43m \u001b[49m\u001b[43m_preload_content\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_params\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m_preload_content\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 358\u001b[0m \u001b[43m \u001b[49m\u001b[43m_request_timeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_params\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m_request_timeout\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 359\u001b[0m \u001b[43m \u001b[49m\u001b[43mcollection_formats\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_collection_formats\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 360\u001b[0m \u001b[43m \u001b[49m\u001b[43m_request_auth\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_params\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m_request_auth\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 361\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/api/v1/client/api_client.py:439\u001b[0m, in \u001b[0;36mApiClient.call_api\u001b[0;34m(self, resource_path, method, path_params, query_params, header_params, body, post_params, files, response_types_map, auth_settings, async_req, _return_http_data_only, collection_formats, _preload_content, _request_timeout, _host, _request_auth)\u001b[0m\n\u001b[1;32m 397\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Makes the HTTP request (synchronous) and returns deserialized data.\u001b[39;00m\n\u001b[1;32m 398\u001b[0m \n\u001b[1;32m 399\u001b[0m \u001b[38;5;124;03mTo make an async_req request, set the async_req parameter.\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 436\u001b[0m \u001b[38;5;124;03m then the method will return the response directly.\u001b[39;00m\n\u001b[1;32m 437\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 438\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m async_req:\n\u001b[0;32m--> 439\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m__call_api\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 440\u001b[0m \u001b[43m \u001b[49m\u001b[43mresource_path\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 441\u001b[0m \u001b[43m \u001b[49m\u001b[43mmethod\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 442\u001b[0m \u001b[43m \u001b[49m\u001b[43mpath_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 443\u001b[0m \u001b[43m \u001b[49m\u001b[43mquery_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 444\u001b[0m \u001b[43m \u001b[49m\u001b[43mheader_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 445\u001b[0m \u001b[43m \u001b[49m\u001b[43mbody\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 446\u001b[0m \u001b[43m \u001b[49m\u001b[43mpost_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 447\u001b[0m \u001b[43m \u001b[49m\u001b[43mfiles\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 448\u001b[0m \u001b[43m \u001b[49m\u001b[43mresponse_types_map\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 449\u001b[0m \u001b[43m \u001b[49m\u001b[43mauth_settings\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 450\u001b[0m \u001b[43m \u001b[49m\u001b[43m_return_http_data_only\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 451\u001b[0m \u001b[43m \u001b[49m\u001b[43mcollection_formats\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 452\u001b[0m \u001b[43m \u001b[49m\u001b[43m_preload_content\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 453\u001b[0m \u001b[43m \u001b[49m\u001b[43m_request_timeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 454\u001b[0m \u001b[43m \u001b[49m\u001b[43m_host\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 455\u001b[0m \u001b[43m \u001b[49m\u001b[43m_request_auth\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 456\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 458\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpool\u001b[38;5;241m.\u001b[39mapply_async(\n\u001b[1;32m 459\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m__call_api,\n\u001b[1;32m 460\u001b[0m (\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 477\u001b[0m ),\n\u001b[1;32m 478\u001b[0m )\n", + "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/api/v1/client/api_client.py:242\u001b[0m, in \u001b[0;36mApiClient.__call_api\u001b[0;34m(self, resource_path, method, path_params, query_params, header_params, body, post_params, files, response_types_map, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout, _host, _request_auth)\u001b[0m\n\u001b[1;32m 240\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m e\u001b[38;5;241m.\u001b[39mbody:\n\u001b[1;32m 241\u001b[0m e\u001b[38;5;241m.\u001b[39mbody \u001b[38;5;241m=\u001b[39m e\u001b[38;5;241m.\u001b[39mbody\u001b[38;5;241m.\u001b[39mdecode(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mutf-8\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m--> 242\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 244\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlast_response \u001b[38;5;241m=\u001b[39m response_data\n\u001b[1;32m 246\u001b[0m return_data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;66;03m# assuming derialization is not needed\u001b[39;00m\n", + "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/api/v1/client/api_client.py:229\u001b[0m, in \u001b[0;36mApiClient.__call_api\u001b[0;34m(self, resource_path, method, path_params, query_params, header_params, body, post_params, files, response_types_map, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout, _host, _request_auth)\u001b[0m\n\u001b[1;32m 225\u001b[0m url \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m?\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m+\u001b[39m url_query\n\u001b[1;32m 227\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 228\u001b[0m \u001b[38;5;66;03m# perform request and return response\u001b[39;00m\n\u001b[0;32m--> 229\u001b[0m response_data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrequest\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 230\u001b[0m \u001b[43m \u001b[49m\u001b[43mmethod\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 231\u001b[0m \u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 232\u001b[0m \u001b[43m \u001b[49m\u001b[43mquery_params\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mquery_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 233\u001b[0m \u001b[43m \u001b[49m\u001b[43mheaders\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mheader_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 234\u001b[0m \u001b[43m \u001b[49m\u001b[43mpost_params\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpost_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 235\u001b[0m \u001b[43m \u001b[49m\u001b[43mbody\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mbody\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 236\u001b[0m \u001b[43m \u001b[49m\u001b[43m_preload_content\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_preload_content\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 237\u001b[0m \u001b[43m \u001b[49m\u001b[43m_request_timeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_request_timeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 238\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 239\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m ApiException \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 240\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m e\u001b[38;5;241m.\u001b[39mbody:\n", + "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/api/v1/client/api_client.py:517\u001b[0m, in \u001b[0;36mApiClient.request\u001b[0;34m(self, method, url, query_params, headers, post_params, body, _preload_content, _request_timeout)\u001b[0m\n\u001b[1;32m 509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mrest_client\u001b[38;5;241m.\u001b[39moptions_request(\n\u001b[1;32m 510\u001b[0m url,\n\u001b[1;32m 511\u001b[0m query_params\u001b[38;5;241m=\u001b[39mquery_params,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 514\u001b[0m _request_timeout\u001b[38;5;241m=\u001b[39m_request_timeout,\n\u001b[1;32m 515\u001b[0m )\n\u001b[1;32m 516\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m method \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mPOST\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[0;32m--> 517\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrest_client\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpost_request\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 518\u001b[0m \u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 519\u001b[0m \u001b[43m \u001b[49m\u001b[43mquery_params\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mquery_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 520\u001b[0m \u001b[43m \u001b[49m\u001b[43mheaders\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mheaders\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 521\u001b[0m \u001b[43m \u001b[49m\u001b[43mpost_params\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpost_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 522\u001b[0m \u001b[43m \u001b[49m\u001b[43m_preload_content\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_preload_content\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 523\u001b[0m \u001b[43m \u001b[49m\u001b[43m_request_timeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_request_timeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 524\u001b[0m \u001b[43m \u001b[49m\u001b[43mbody\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mbody\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 525\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 526\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m method \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mPUT\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[1;32m 527\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mrest_client\u001b[38;5;241m.\u001b[39mput_request(\n\u001b[1;32m 528\u001b[0m url,\n\u001b[1;32m 529\u001b[0m query_params\u001b[38;5;241m=\u001b[39mquery_params,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 534\u001b[0m body\u001b[38;5;241m=\u001b[39mbody,\n\u001b[1;32m 535\u001b[0m )\n", + "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/api/v1/client/rest.py:348\u001b[0m, in \u001b[0;36mRESTClientObject.post_request\u001b[0;34m(self, url, headers, query_params, post_params, body, _preload_content, _request_timeout)\u001b[0m\n\u001b[1;32m 338\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mpost_request\u001b[39m(\n\u001b[1;32m 339\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 340\u001b[0m url,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 346\u001b[0m _request_timeout\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 347\u001b[0m ):\n\u001b[0;32m--> 348\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrequest\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 349\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mPOST\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 350\u001b[0m \u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 351\u001b[0m \u001b[43m \u001b[49m\u001b[43mheaders\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mheaders\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 352\u001b[0m \u001b[43m \u001b[49m\u001b[43mquery_params\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mquery_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 353\u001b[0m \u001b[43m \u001b[49m\u001b[43mpost_params\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpost_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 354\u001b[0m \u001b[43m \u001b[49m\u001b[43m_preload_content\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_preload_content\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 355\u001b[0m \u001b[43m \u001b[49m\u001b[43m_request_timeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_request_timeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 356\u001b[0m \u001b[43m \u001b[49m\u001b[43mbody\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mbody\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 357\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/api/v1/client/rest.py:249\u001b[0m, in \u001b[0;36mRESTClientObject.request\u001b[0;34m(self, method, url, query_params, headers, body, post_params, _preload_content, _request_timeout)\u001b[0m\n\u001b[1;32m 247\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;241m200\u001b[39m \u001b[38;5;241m<\u001b[39m\u001b[38;5;241m=\u001b[39m r\u001b[38;5;241m.\u001b[39mstatus \u001b[38;5;241m<\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m299\u001b[39m:\n\u001b[1;32m 248\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m r\u001b[38;5;241m.\u001b[39mstatus \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m401\u001b[39m:\n\u001b[0;32m--> 249\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m UnauthorizedException(http_resp\u001b[38;5;241m=\u001b[39mr)\n\u001b[1;32m 251\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m r\u001b[38;5;241m.\u001b[39mstatus \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m403\u001b[39m:\n\u001b[1;32m 252\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m ForbiddenException(http_resp\u001b[38;5;241m=\u001b[39mr)\n", + "\u001b[0;31mUnauthorizedException\u001b[0m: (401)\nReason: Unauthorized\nHTTP response headers: HTTPHeaderDict({'Date': 'Tue, 07 Nov 2023 06:10:27 GMT', 'Content-Type': 'application/json; charset=utf-8', 'Content-Length': '123', 'Connection': 'keep-alive', 'via': '1.1 google', 'alt-svc': 'h3=\":443\"; ma=86400', 'CF-Cache-Status': 'DYNAMIC', 'Report-To': '{\"endpoints\":[{\"url\":\"https:\\\\/\\\\/a.nel.cloudflare.com\\\\/report\\\\/v3?s=eD4xvpIGOuei6fNw3tUox8hcw2rilvLB%2FUrFxgAkV05OCKE1yIBAZfHpmAnqATRlRrXBnDTn8KFWtliFKKO0fZHiKEPRzxzMB3CKvDSgquxoePtSyVtPguaR1UNYrK3h\"}],\"group\":\"cf-nel\",\"max_age\":604800}', 'NEL': '{\"success_fraction\":0,\"report_to\":\"cf-nel\",\"max_age\":604800}', 'Server': 'cloudflare', 'CF-RAY': '822369c7eefbd00d-SJC'})\nHTTP response body: {\"code\":401,\"message\":\"Invalid API key, please visit https://e2b.dev/docs?reason=sdk-missing-api-key to get your API key.\"}\n" + ] + } + ], "source": [ "from langchain.tools import E2BDataAnalysisTool, format_tool_to_openai_function\n", "\n", @@ -146,7 +186,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "id": "1c01dd79-dd3e-4509-a2e2-009a7f99f16a", "metadata": {}, "outputs": [], @@ -161,6 +201,72 @@ "agent.create()" ] }, + { + "cell_type": "markdown", + "id": "1ac71d8b-4b4b-4f98-b826-6b3c57a34166", + "metadata": {}, + "source": [ + "#### Using AgentExecutor\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "1f137f94-801f-4766-9ff5-2de9df5e8079", + "metadata": {}, + "outputs": [ + { + "ename": "ValueError", + "evalue": "{'id': 'run_08of98E8mkOkvvLevGlmxTpF', 'assistant_id': 'asst_8dAlzPI0zvjOaaenvwNCtIEl', 'cancelled_at': None, 'completed_at': None, 'created_at': 1699337429, 'expires_at': None, 'failed_at': 1699337439, 'file_ids': [], 'instructions': 'You are a personal math tutor. Write and run code to answer math questions.', 'last_error': {'code': 'rate_limit_exceeded', 'message': \"We're currently processing too many requests - please try again later.\"}, 'metadata': {}, 'model': 'gpt-4-1106-preview', 'object': 'thread.run', 'required_action': None, 'started_at': 1699337429, 'status': 'failed', 'thread_id': 'thread_wcdfruB9FPwjs27TbBv0p45U', 'tools': [{'function': {'description': 'Evaluates python code in a sandbox environment. The environment is long running and exists across multiple executions. You must send the whole script every time and print your outputs. Script should be pure python code that can be evaluated. It should be in python format NOT markdown. The code should NOT be wrapped in backticks. All python packages including requests, matplotlib, scipy, numpy, pandas, etc are available. Create and display chart using `plt.show()`.', 'name': 'e2b_data_analysis', 'parameters': {'title': 'E2BDataAnalysisToolArguments', 'description': 'Arguments for the E2BDataAnalysisTool.', 'type': 'object', 'properties': {'python_code': {'title': 'Python Code', 'description': 'The python script to be evaluated. The contents will be in main.py. It should not be in markdown format.', 'example': \"print('Hello World')\", 'type': 'string'}}, 'required': ['python_code']}}, 'type': 'function'}]}", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[8], line 4\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01magents\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m AgentExecutor\n\u001b[1;32m 3\u001b[0m agent_executor \u001b[38;5;241m=\u001b[39m AgentExecutor(agent\u001b[38;5;241m=\u001b[39magent, tools\u001b[38;5;241m=\u001b[39mtools)\n\u001b[0;32m----> 4\u001b[0m \u001b[43magent_executor\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcontent\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mWhat\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43ms 10 - 4 raised to the 2.7\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m}\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/langchain/libs/langchain/langchain/chains/base.py:87\u001b[0m, in \u001b[0;36mChain.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 80\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21minvoke\u001b[39m(\n\u001b[1;32m 81\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 82\u001b[0m \u001b[38;5;28minput\u001b[39m: Dict[\u001b[38;5;28mstr\u001b[39m, Any],\n\u001b[1;32m 83\u001b[0m config: Optional[RunnableConfig] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 84\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 85\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Dict[\u001b[38;5;28mstr\u001b[39m, Any]:\n\u001b[1;32m 86\u001b[0m config \u001b[38;5;241m=\u001b[39m config \u001b[38;5;129;01mor\u001b[39;00m {}\n\u001b[0;32m---> 87\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[1;32m 88\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 89\u001b[0m \u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcallbacks\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 90\u001b[0m \u001b[43m \u001b[49m\u001b[43mtags\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtags\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 91\u001b[0m \u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmetadata\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 92\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_name\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 93\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 94\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/langchain/libs/langchain/langchain/chains/base.py:310\u001b[0m, in \u001b[0;36mChain.__call__\u001b[0;34m(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)\u001b[0m\n\u001b[1;32m 308\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 309\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n\u001b[0;32m--> 310\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 311\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_end(outputs)\n\u001b[1;32m 312\u001b[0m final_outputs: Dict[\u001b[38;5;28mstr\u001b[39m, Any] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mprep_outputs(\n\u001b[1;32m 313\u001b[0m inputs, outputs, return_only_outputs\n\u001b[1;32m 314\u001b[0m )\n", + "File \u001b[0;32m~/langchain/libs/langchain/langchain/chains/base.py:304\u001b[0m, in \u001b[0;36mChain.__call__\u001b[0;34m(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)\u001b[0m\n\u001b[1;32m 297\u001b[0m run_manager \u001b[38;5;241m=\u001b[39m callback_manager\u001b[38;5;241m.\u001b[39mon_chain_start(\n\u001b[1;32m 298\u001b[0m dumpd(\u001b[38;5;28mself\u001b[39m),\n\u001b[1;32m 299\u001b[0m inputs,\n\u001b[1;32m 300\u001b[0m name\u001b[38;5;241m=\u001b[39mrun_name,\n\u001b[1;32m 301\u001b[0m )\n\u001b[1;32m 302\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 303\u001b[0m outputs \u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m--> 304\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 305\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m new_arg_supported\n\u001b[1;32m 306\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_call(inputs)\n\u001b[1;32m 307\u001b[0m )\n\u001b[1;32m 308\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 309\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n", + "File \u001b[0;32m~/langchain/libs/langchain/langchain/agents/agent.py:1146\u001b[0m, in \u001b[0;36mAgentExecutor._call\u001b[0;34m(self, inputs, run_manager)\u001b[0m\n\u001b[1;32m 1144\u001b[0m \u001b[38;5;66;03m# We now enter the agent loop (until it returns something).\u001b[39;00m\n\u001b[1;32m 1145\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_should_continue(iterations, time_elapsed):\n\u001b[0;32m-> 1146\u001b[0m next_step_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_take_next_step\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1147\u001b[0m \u001b[43m \u001b[49m\u001b[43mname_to_tool_map\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1148\u001b[0m \u001b[43m \u001b[49m\u001b[43mcolor_mapping\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1149\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1150\u001b[0m \u001b[43m \u001b[49m\u001b[43mintermediate_steps\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1151\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1152\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1153\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(next_step_output, AgentFinish):\n\u001b[1;32m 1154\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_return(\n\u001b[1;32m 1155\u001b[0m next_step_output, intermediate_steps, run_manager\u001b[38;5;241m=\u001b[39mrun_manager\n\u001b[1;32m 1156\u001b[0m )\n", + "File \u001b[0;32m~/langchain/libs/langchain/langchain/agents/agent.py:933\u001b[0m, in \u001b[0;36mAgentExecutor._take_next_step\u001b[0;34m(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)\u001b[0m\n\u001b[1;32m 930\u001b[0m intermediate_steps \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_prepare_intermediate_steps(intermediate_steps)\n\u001b[1;32m 932\u001b[0m \u001b[38;5;66;03m# Call the LLM to see what to do.\u001b[39;00m\n\u001b[0;32m--> 933\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43magent\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mplan\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 934\u001b[0m \u001b[43m \u001b[49m\u001b[43mintermediate_steps\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 935\u001b[0m \u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_child\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01melse\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 936\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 937\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 938\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m OutputParserException \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 939\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_parsing_errors, \u001b[38;5;28mbool\u001b[39m):\n", + "File \u001b[0;32m~/langchain/libs/langchain/langchain/agents/agent.py:375\u001b[0m, in \u001b[0;36mRunnableAgent.plan\u001b[0;34m(self, intermediate_steps, callbacks, **kwargs)\u001b[0m\n\u001b[1;32m 363\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Given input, decided what to do.\u001b[39;00m\n\u001b[1;32m 364\u001b[0m \n\u001b[1;32m 365\u001b[0m \u001b[38;5;124;03mArgs:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 372\u001b[0m \u001b[38;5;124;03m Action specifying what tool to use.\u001b[39;00m\n\u001b[1;32m 373\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 374\u001b[0m inputs \u001b[38;5;241m=\u001b[39m {\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m{\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mintermediate_steps\u001b[39m\u001b[38;5;124m\"\u001b[39m: intermediate_steps}}\n\u001b[0;32m--> 375\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrunnable\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcallbacks\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[43m}\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 376\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m output\n", + "File \u001b[0;32m~/langchain/libs/experimental/langchain_experimental/openai_assistant/base.py:75\u001b[0m, in \u001b[0;36mOpenAIAssistantRunnable.invoke\u001b[0;34m(self, input, config)\u001b[0m\n\u001b[1;32m 71\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 72\u001b[0m run \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclient\u001b[38;5;241m.\u001b[39mbeta\u001b[38;5;241m.\u001b[39mthreads\u001b[38;5;241m.\u001b[39mruns\u001b[38;5;241m.\u001b[39msubmit_tool_outputs(\n\u001b[1;32m 73\u001b[0m thread_id\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mthread_id, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m\u001b[38;5;28minput\u001b[39m\n\u001b[1;32m 74\u001b[0m )\n\u001b[0;32m---> 75\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_get_response\u001b[49m\u001b[43m(\u001b[49m\u001b[43mrun\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mid\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/langchain/libs/experimental/langchain_experimental/openai_assistant/base.py:118\u001b[0m, in \u001b[0;36mOpenAIAssistantRunnable._get_response\u001b[0;34m(self, run_id)\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m actions\n\u001b[1;32m 117\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 118\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(run\u001b[38;5;241m.\u001b[39mdict())\n", + "\u001b[0;31mValueError\u001b[0m: {'id': 'run_08of98E8mkOkvvLevGlmxTpF', 'assistant_id': 'asst_8dAlzPI0zvjOaaenvwNCtIEl', 'cancelled_at': None, 'completed_at': None, 'created_at': 1699337429, 'expires_at': None, 'failed_at': 1699337439, 'file_ids': [], 'instructions': 'You are a personal math tutor. Write and run code to answer math questions.', 'last_error': {'code': 'rate_limit_exceeded', 'message': \"We're currently processing too many requests - please try again later.\"}, 'metadata': {}, 'model': 'gpt-4-1106-preview', 'object': 'thread.run', 'required_action': None, 'started_at': 1699337429, 'status': 'failed', 'thread_id': 'thread_wcdfruB9FPwjs27TbBv0p45U', 'tools': [{'function': {'description': 'Evaluates python code in a sandbox environment. The environment is long running and exists across multiple executions. You must send the whole script every time and print your outputs. Script should be pure python code that can be evaluated. It should be in python format NOT markdown. The code should NOT be wrapped in backticks. All python packages including requests, matplotlib, scipy, numpy, pandas, etc are available. Create and display chart using `plt.show()`.', 'name': 'e2b_data_analysis', 'parameters': {'title': 'E2BDataAnalysisToolArguments', 'description': 'Arguments for the E2BDataAnalysisTool.', 'type': 'object', 'properties': {'python_code': {'title': 'Python Code', 'description': 'The python script to be evaluated. The contents will be in main.py. It should not be in markdown format.', 'example': \"print('Hello World')\", 'type': 'string'}}, 'required': ['python_code']}}, 'type': 'function'}]}" + ] + } + ], + "source": [ + "from langchain.agents import AgentExecutor\n", + "\n", + "agent_executor = AgentExecutor(agent=agent, tools=tools)\n", + "agent_executor.invoke({\"content\": \"What's 10 - 4 raised to the 2.7\"})" + ] + }, + { + "cell_type": "markdown", + "id": "2d0a0b1d-c1b3-4b50-9dce-1189b51a6206", + "metadata": {}, + "source": [ + "#### Custom execution" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c0475fa7-b6c1-4331-b8e2-55407466c724", + "metadata": {}, + "outputs": [], + "source": [ + "agent = OpenAIAssistantRunnable(\n", + " name=\"langchain assistant e2b tool\",\n", + " instructions=\"You are a personal math tutor. Write and run code to answer math questions.\",\n", + " tools=[{\"type\": \"function\", \"function\": function}],\n", + " model=\"gpt-4-1106-preview\",\n", + " as_agent=True\n", + ")\n", + "agent.create()" + ] + }, { "cell_type": "code", "execution_count": 5, diff --git a/libs/experimental/langchain_experimental/openai_assistant/base.py b/libs/experimental/langchain_experimental/openai_assistant/base.py index 957dfacc4cfaf..edb4f44f89655 100644 --- a/libs/experimental/langchain_experimental/openai_assistant/base.py +++ b/libs/experimental/langchain_experimental/openai_assistant/base.py @@ -58,7 +58,15 @@ def create(self) -> None: self.thread_id = thread.id def invoke(self, input: Any, config: Optional[RunnableConfig] = None) -> List: - if "run_id" not in input: + if input.get("intermediate_steps"): + last_action, last_output = input["intermediate_steps"][-1] + input = { + "tool_outputs": [ + {"output": last_output, "tool_call_id": last_action.tool_call_id} + ], + "run_id": last_action.run_id + } + if "run_id" not in input : msg = self.client.beta.threads.messages.create( self.thread_id, content=input["content"], role="user" ) From a6d39111ec9ced1d63508c30b81ab64a9f947d41 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Tue, 7 Nov 2023 08:51:15 -0800 Subject: [PATCH 07/22] cr --- .../openai_assistant/base.py | 35 +++++++++++++------ 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/libs/experimental/langchain_experimental/openai_assistant/base.py b/libs/experimental/langchain_experimental/openai_assistant/base.py index edb4f44f89655..560d5144cc839 100644 --- a/libs/experimental/langchain_experimental/openai_assistant/base.py +++ b/libs/experimental/langchain_experimental/openai_assistant/base.py @@ -2,7 +2,7 @@ import json from time import sleep -from typing import TYPE_CHECKING, Dict, List, Optional, Union +from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Union from langchain.pydantic_v1 import root_validator from langchain.schema.agent import AgentAction, AgentFinish @@ -14,25 +14,40 @@ class OpenAIAssistantFinish(AgentFinish): run_id: str + thread_id: str class OpenAIAssistantAction(AgentAction): tool_call_id: str run_id: str + thread_id: str class OpenAIAssistantRunnable(RunnableSerializable[Union[List[dict], str], list]): client: Optional[openai.OpenAI] = None - name: str - instructions: str - tools: list - model: str - thread_id: Optional[str] = None - assistant_id: Optional[str] = None - run_id: Optional[str] = None - poll_rate: int = 1 + assistant_id: str + check_every_ms: float = 1_000.0 as_agent: bool = False + @classmethod + def create( + cls, + name: str, + instructions: str, + tools: Sequence, + model: str, + *, + client: Optional[openai.OpenAI] = None, + **kwargs: Any, + ) -> OpenAIAssistantRunnable: + assistant = self.client.beta.assistants.create( + name=self.name, + instructions=self.instructions, + tools=self.tools, + model=self.model, + ) + return cls(assistant_id=assistant.id, **kwargs) + @root_validator() def validate_environment(cls, values: Dict) -> Dict: if not values["client"]: @@ -89,7 +104,7 @@ def _get_response(self, run_id: str) -> List: run = self._retrieve_run(run_id) in_progress = run.status in ("in_progress", "queued") if in_progress: - sleep(self.poll_rate) + sleep(1000 * self.check_every_ms) if run.status == "completed": messages = self.client.beta.threads.messages.list( self.thread_id, order="asc" From 7c07940f397b800f96249460446b053459199415 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Tue, 7 Nov 2023 09:20:34 -0800 Subject: [PATCH 08/22] wip --- .../openai_assistant/base.py | 118 +++++++++++------- 1 file changed, 72 insertions(+), 46 deletions(-) diff --git a/libs/experimental/langchain_experimental/openai_assistant/base.py b/libs/experimental/langchain_experimental/openai_assistant/base.py index 560d5144cc839..bea6afc8d9772 100644 --- a/libs/experimental/langchain_experimental/openai_assistant/base.py +++ b/libs/experimental/langchain_experimental/openai_assistant/base.py @@ -2,7 +2,7 @@ import json from time import sleep -from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union from langchain.pydantic_v1 import root_validator from langchain.schema.agent import AgentAction, AgentFinish @@ -40,11 +40,11 @@ def create( client: Optional[openai.OpenAI] = None, **kwargs: Any, ) -> OpenAIAssistantRunnable: - assistant = self.client.beta.assistants.create( - name=self.name, - instructions=self.instructions, - tools=self.tools, - model=self.model, + assistant = client.beta.assistants.create( + name=name, + instructions=instructions, + tools=tools, + model=model, ) return cls(assistant_id=assistant.id, **kwargs) @@ -59,52 +59,65 @@ def validate_environment(cls, values: Dict) -> Dict: values["client"] = openai.OpenAI() return values - def create(self) -> None: - if not self.assistant_id: - assistant = self.client.beta.assistants.create( - name=self.name, - instructions=self.instructions, - tools=self.tools, - model=self.model, + def invoke(self, input: dict, config: Optional[RunnableConfig] = None) -> List: + input = self._parse_input(input) + if "thread_id" not in input: + run = self._create_thread_and_run(input) + _ = self.client.beta.threads.messages.create( + run.thread_id, content=input["content"], role="user" ) - self.assistant_id = assistant.id - if not self.thread_id: - thread = self.client.beta.threads.create() - self.thread_id = thread.id + elif "run_id" not in input: + _ = self.client.beta.threads.messages.create( + input["thread_id"], content=input["content"], role="user" + ) + run = self._create_run(input) + else: + run = self.client.beta.threads.runs.submit_tool_outputs( + thread_id=input["thread_id"], **input + ) + return self._get_response(run.id) - def invoke(self, input: Any, config: Optional[RunnableConfig] = None) -> List: - if input.get("intermediate_steps"): + def _parse_input(self, input: dict) -> dict: + if self.as_agent and input.get("intermediate_steps"): last_action, last_output = input["intermediate_steps"][-1] input = { "tool_outputs": [ {"output": last_output, "tool_call_id": last_action.tool_call_id} - ], - "run_id": last_action.run_id + ], + "run_id": last_action.run_id, + "thread_id": last_action.thread_id, } - if "run_id" not in input : - msg = self.client.beta.threads.messages.create( - self.thread_id, content=input["content"], role="user" - ) - run = self.client.beta.threads.runs.create( - self.thread_id, - assistant_id=self.assistant_id, - instructions=input.get("run_instructions"), - ) + return input + + def _create_run(self, input: dict) -> Any: + params = { + k: v + for k, v in input.items() + if k in ("instructions", "model", "tools", "metadata") + } + return self.client.beta.threads.runs.create( + input["thread_id"], + assistant_id=self.assistant_id, + **params, + ) - else: - run = self.client.beta.threads.runs.submit_tool_outputs( - thread_id=self.thread_id, **input - ) - return self._get_response(run.id) + def _create_thread_and_run(self, input: dict) -> Any: + params = { + k: v + for k, v in input.items() + if k in ("instructions", "thread", "model", "tools", "metadata") + } + run = self.client.beta.threads.create_and_run( + assistant_id=self.assistant_id, + **params, + ) + return run - def _get_response(self, run_id: str) -> List: + def _get_response( + self, run_id: str, thread_id: str + ) -> Union[List[OpenAIAssistantAction], OpenAIAssistantFinish]: # TODO: Pagination - in_progress = True - while in_progress: - run = self._retrieve_run(run_id) - in_progress = run.status in ("in_progress", "queued") - if in_progress: - sleep(1000 * self.check_every_ms) + run = self._wait_for_run(run_id, thread_id) if run.status == "completed": messages = self.client.beta.threads.messages.list( self.thread_id, order="asc" @@ -118,7 +131,10 @@ def _get_response(self, run_id: str) -> List: for msg_content in msg.content ) return OpenAIAssistantFinish( - return_values={"output": answer}, log="", run_id=run_id + return_values={"output": answer}, + log="", + run_id=run_id, + thread_id=thread_id, ) elif run.status == "requires_action": if not self.as_agent: @@ -134,11 +150,21 @@ def _get_response(self, run_id: str) -> List: tool_call_id=tool_call.id, log="", run_id=run_id, + thread_id=thread_id, ) ) return actions else: - raise ValueError(run.dict()) + run_info = json.dumps(run.dict(), indent=2) + raise ValueError( + f"Unknown run status {run.status}. Full run info:\n\n{run_info})" + ) - def _retrieve_run(self, run_id: str) -> Any: - return self.client.beta.threads.runs.retrieve(run_id, thread_id=self.thread_id) + def _wait_for_run(self, run_id: str, thread_id) -> Any: + in_progress = True + while in_progress: + run = self.client.beta.threads.runs.retrieve(run_id, thread_id=thread_id) + in_progress = run.status in ("in_progress", "queued") + if in_progress: + sleep(self.check_every_ms / 1000) + return run From 0edf7427daf1416ba1f86ea890d673b209526abc Mon Sep 17 00:00:00 2001 From: Bagatur Date: Tue, 7 Nov 2023 09:49:40 -0800 Subject: [PATCH 09/22] cr --- .../openai_assistant/base.py | 104 +++++++++++++----- 1 file changed, 77 insertions(+), 27 deletions(-) diff --git a/libs/experimental/langchain_experimental/openai_assistant/base.py b/libs/experimental/langchain_experimental/openai_assistant/base.py index bea6afc8d9772..3f307e4794b23 100644 --- a/libs/experimental/langchain_experimental/openai_assistant/base.py +++ b/libs/experimental/langchain_experimental/openai_assistant/base.py @@ -4,9 +4,11 @@ from time import sleep from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union -from langchain.pydantic_v1 import root_validator +from langchain.pydantic_v1 import Field, root_validator from langchain.schema.agent import AgentAction, AgentFinish from langchain.schema.runnable import RunnableConfig, RunnableSerializable +from langchain.tools import format_tool_to_openai_function +from langchain.tools.base import BaseTool if TYPE_CHECKING: import openai @@ -23,27 +25,67 @@ class OpenAIAssistantAction(AgentAction): thread_id: str -class OpenAIAssistantRunnable(RunnableSerializable[Union[List[dict], str], list]): - client: Optional[openai.OpenAI] = None +def _get_openai_client() -> openai.OpenAI: + try: + import openai + + return openai.OpenAI() + except ImportError as e: + raise ImportError( + "Unable to import openai, please install with `pip install openai`." + ) from e + except AttributeError as e: + raise AttributeError( + "Please make sure you are using a v1.1-compatible version of openai. You " + 'can install with `pip install "openai>=1.1"`.' + ) from e + + +class OpenAIAssistantRunnable(RunnableSerializable[Dict, Any]): + """Run an OpenAI Assistant.""" + + client: openai.OpenAI = Field(default_factory=_get_openai_client) + """OpenAI client.""" assistant_id: str + """OpenAI assistant id.""" check_every_ms: float = 1_000.0 + """Frequency with which to check run progress in ms.""" as_agent: bool = False + """Use as a LangChain agent, compatible with the AgentExecutor.""" @classmethod - def create( + def create_assistant( cls, name: str, instructions: str, - tools: Sequence, + tools: Sequence[Union[BaseTool, dict]], model: str, *, client: Optional[openai.OpenAI] = None, **kwargs: Any, ) -> OpenAIAssistantRunnable: + """Create an OpenAI Assistant and instantiate the Runnable. + + Args: + name: Assistant name. + instructions: Assistant instructions. + tools: Assistant tools. Can be passed in in OpenAI format or as BaseTools. + model: Assistant model to use. + client: OpenAI client. Will create default client if not specified. + + Returns: + OpenAIAssistantRunnable configured to run using the created assistant. + """ + client = client or _get_openai_client() + openai_tools: List = [] + for tool in tools: + if isinstance(tool, BaseTool): + tool = format_tool_to_openai_function(tool) + openai_tools.append(tool) assistant = client.beta.assistants.create( name=name, instructions=instructions, - tools=tools, + tools=openai_tools, model=model, ) return cls(assistant_id=assistant.id, **kwargs) @@ -59,23 +101,30 @@ def validate_environment(cls, values: Dict) -> Dict: values["client"] = openai.OpenAI() return values - def invoke(self, input: dict, config: Optional[RunnableConfig] = None) -> List: + def invoke(self, input: dict, config: Optional[RunnableConfig] = None) -> Any: + """""" input = self._parse_input(input) if "thread_id" not in input: run = self._create_thread_and_run(input) _ = self.client.beta.threads.messages.create( - run.thread_id, content=input["content"], role="user" + run.thread_id, + content=input["content"], + role="user", + file_ids=input.get("file_ids", []), + metadata=input.get("message_metadata"), ) elif "run_id" not in input: _ = self.client.beta.threads.messages.create( - input["thread_id"], content=input["content"], role="user" + input["thread_id"], + content=input["content"], + role="user", + file_ids=input.get("file_ids", []), + metadata=input.get("message_metadata"), ) run = self._create_run(input) else: - run = self.client.beta.threads.runs.submit_tool_outputs( - thread_id=input["thread_id"], **input - ) - return self._get_response(run.id) + run = self.client.beta.threads.runs.submit_tool_outputs(**input) + return self._get_response(run.id, run.thread_id) def _parse_input(self, input: dict) -> dict: if self.as_agent and input.get("intermediate_steps"): @@ -93,7 +142,7 @@ def _create_run(self, input: dict) -> Any: params = { k: v for k, v in input.items() - if k in ("instructions", "model", "tools", "metadata") + if k in ("instructions", "model", "tools", "run_metadata") } return self.client.beta.threads.runs.create( input["thread_id"], @@ -105,7 +154,7 @@ def _create_thread_and_run(self, input: dict) -> Any: params = { k: v for k, v in input.items() - if k in ("instructions", "thread", "model", "tools", "metadata") + if k in ("instructions", "thread", "model", "tools", "run_metadata") } run = self.client.beta.threads.create_and_run( assistant_id=self.assistant_id, @@ -113,23 +162,24 @@ def _create_thread_and_run(self, input: dict) -> Any: ) return run - def _get_response( - self, run_id: str, thread_id: str - ) -> Union[List[OpenAIAssistantAction], OpenAIAssistantFinish]: + def _get_response(self, run_id: str, thread_id: str) -> Any: # TODO: Pagination + import openai + run = self._wait_for_run(run_id, thread_id) if run.status == "completed": - messages = self.client.beta.threads.messages.list( - self.thread_id, order="asc" - ) + messages = self.client.beta.threads.messages.list(thread_id, order="asc") new_messages = [msg for msg in messages if msg.run_id == run_id] if not self.as_agent: return new_messages - answer = "".join( - msg_content.text.value - for msg in new_messages - for msg_content in msg.content - ) + answer: Any = [ + msg_content for msg in new_messages for msg_content in msg.content + ] + if all( + isinstance(content, openai.types.beta.threads.MessageContentText) + for content in answer + ): + answer = "".join(content.text.value for content in answer) return OpenAIAssistantFinish( return_values={"output": answer}, log="", @@ -160,7 +210,7 @@ def _get_response( f"Unknown run status {run.status}. Full run info:\n\n{run_info})" ) - def _wait_for_run(self, run_id: str, thread_id) -> Any: + def _wait_for_run(self, run_id: str, thread_id: str) -> Any: in_progress = True while in_progress: run = self.client.beta.threads.runs.retrieve(run_id, thread_id=thread_id) From 245a64af8e0bf7e3596e251fae3595aeabb8dfb3 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Tue, 7 Nov 2023 10:02:33 -0800 Subject: [PATCH 10/22] nb --- cookbook/openai_v1_cookbook.ipynb | 156 ++++++------------ .../openai_assistant/base.py | 27 +-- 2 files changed, 68 insertions(+), 115 deletions(-) diff --git a/cookbook/openai_v1_cookbook.ipynb b/cookbook/openai_v1_cookbook.ipynb index 15e8a4f87d5f4..fb8869e1b74e5 100644 --- a/cookbook/openai_v1_cookbook.ipynb +++ b/cookbook/openai_v1_cookbook.ipynb @@ -90,34 +90,38 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 1, + "id": "a9064bbe-d9f7-4a29-a7b3-73933b3197e7", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_experimental.openai_assistant.base import OpenAIAssistantRunnable\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, "id": "7a20a008-49ac-46d2-aa26-b270118af5ea", "metadata": {}, "outputs": [ { - "ename": "ValueError", - "evalue": "{'id': 'run_PiaUaLysMmWCgT49jLNkBhhx', 'assistant_id': 'asst_4mXntqWdxkAM8CLkGERlYBXI', 'cancelled_at': None, 'completed_at': None, 'created_at': 1699337403, 'expires_at': None, 'failed_at': 1699337413, 'file_ids': [], 'instructions': 'You are a personal math tutor. Write and run code to answer math questions.', 'last_error': {'code': 'rate_limit_exceeded', 'message': \"We're currently processing too many requests - please try again later.\"}, 'metadata': {}, 'model': 'gpt-4-1106-preview', 'object': 'thread.run', 'required_action': None, 'started_at': 1699337403, 'status': 'failed', 'thread_id': 'thread_mRVqIj5Jm3rxuGreucH22bsr', 'tools': [{'type': 'code_interpreter'}]}", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[6], line 10\u001b[0m\n\u001b[1;32m 3\u001b[0m interpreter_assistant \u001b[38;5;241m=\u001b[39m OpenAIAssistantRunnable(\n\u001b[1;32m 4\u001b[0m name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlangchain assistant\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 5\u001b[0m instructions\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mYou are a personal math tutor. Write and run code to answer math questions.\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 6\u001b[0m tools\u001b[38;5;241m=\u001b[39m[{\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtype\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcode_interpreter\u001b[39m\u001b[38;5;124m\"\u001b[39m}],\n\u001b[1;32m 7\u001b[0m model\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mgpt-4-1106-preview\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 8\u001b[0m )\n\u001b[1;32m 9\u001b[0m interpreter_assistant\u001b[38;5;241m.\u001b[39mcreate()\n\u001b[0;32m---> 10\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[43minterpreter_assistant\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcontent\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mWhat\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43ms 10 - 4 raised to the 2.7\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m}\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 11\u001b[0m output\n", - "File \u001b[0;32m~/langchain/libs/experimental/langchain_experimental/openai_assistant/base.py:75\u001b[0m, in \u001b[0;36mOpenAIAssistantRunnable.invoke\u001b[0;34m(self, input, config)\u001b[0m\n\u001b[1;32m 71\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 72\u001b[0m run \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclient\u001b[38;5;241m.\u001b[39mbeta\u001b[38;5;241m.\u001b[39mthreads\u001b[38;5;241m.\u001b[39mruns\u001b[38;5;241m.\u001b[39msubmit_tool_outputs(\n\u001b[1;32m 73\u001b[0m thread_id\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mthread_id, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m\u001b[38;5;28minput\u001b[39m\n\u001b[1;32m 74\u001b[0m )\n\u001b[0;32m---> 75\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_get_response\u001b[49m\u001b[43m(\u001b[49m\u001b[43mrun\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mid\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/langchain/libs/experimental/langchain_experimental/openai_assistant/base.py:118\u001b[0m, in \u001b[0;36mOpenAIAssistantRunnable._get_response\u001b[0;34m(self, run_id)\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m actions\n\u001b[1;32m 117\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 118\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(run\u001b[38;5;241m.\u001b[39mdict())\n", - "\u001b[0;31mValueError\u001b[0m: {'id': 'run_PiaUaLysMmWCgT49jLNkBhhx', 'assistant_id': 'asst_4mXntqWdxkAM8CLkGERlYBXI', 'cancelled_at': None, 'completed_at': None, 'created_at': 1699337403, 'expires_at': None, 'failed_at': 1699337413, 'file_ids': [], 'instructions': 'You are a personal math tutor. Write and run code to answer math questions.', 'last_error': {'code': 'rate_limit_exceeded', 'message': \"We're currently processing too many requests - please try again later.\"}, 'metadata': {}, 'model': 'gpt-4-1106-preview', 'object': 'thread.run', 'required_action': None, 'started_at': 1699337403, 'status': 'failed', 'thread_id': 'thread_mRVqIj5Jm3rxuGreucH22bsr', 'tools': [{'type': 'code_interpreter'}]}" - ] + "data": { + "text/plain": [ + "[ThreadMessage(id='msg_bG50eUX5DhIKSTLKV7EgaHMM', assistant_id='asst_p9qkGkw1FfYEd90HLvPalaox', content=[MessageContentText(text=Text(annotations=[], value='The value of \\\\( 10 - 4^{2.7} \\\\) is approximately -32.224.'), type='text')], created_at=1699379844, file_ids=[], metadata={}, object='thread.message', role='assistant', run_id='run_2cc89e4VYnxBOhPQsR5pgcPE', thread_id='thread_ugmxDTcETSsM52k4NBsRB6tR')]" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ - "from langchain_experimental.openai_assistant.base import OpenAIAssistantRunnable\n", - "\n", - "interpreter_assistant = OpenAIAssistantRunnable(\n", + "interpreter_assistant = OpenAIAssistantRunnable.create_assistant(\n", " name=\"langchain assistant\",\n", " instructions=\"You are a personal math tutor. Write and run code to answer math questions.\",\n", " tools=[{\"type\": \"code_interpreter\"}],\n", " model=\"gpt-4-1106-preview\"\n", ")\n", - "interpreter_assistant.create()\n", "output = interpreter_assistant.invoke({\"content\": \"What's 10 - 4 raised to the 2.7\"})\n", "output" ] @@ -132,73 +136,30 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 9, "id": "48681ac7-b267-48d4-972c-8a7df8393a21", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Failed to acquire session\n" - ] - }, - { - "ename": "UnauthorizedException", - "evalue": "(401)\nReason: Unauthorized\nHTTP response headers: HTTPHeaderDict({'Date': 'Tue, 07 Nov 2023 06:10:27 GMT', 'Content-Type': 'application/json; charset=utf-8', 'Content-Length': '123', 'Connection': 'keep-alive', 'via': '1.1 google', 'alt-svc': 'h3=\":443\"; ma=86400', 'CF-Cache-Status': 'DYNAMIC', 'Report-To': '{\"endpoints\":[{\"url\":\"https:\\\\/\\\\/a.nel.cloudflare.com\\\\/report\\\\/v3?s=eD4xvpIGOuei6fNw3tUox8hcw2rilvLB%2FUrFxgAkV05OCKE1yIBAZfHpmAnqATRlRrXBnDTn8KFWtliFKKO0fZHiKEPRzxzMB3CKvDSgquxoePtSyVtPguaR1UNYrK3h\"}],\"group\":\"cf-nel\",\"max_age\":604800}', 'NEL': '{\"success_fraction\":0,\"report_to\":\"cf-nel\",\"max_age\":604800}', 'Server': 'cloudflare', 'CF-RAY': '822369c7eefbd00d-SJC'})\nHTTP response body: {\"code\":401,\"message\":\"Invalid API key, please visit https://e2b.dev/docs?reason=sdk-missing-api-key to get your API key.\"}\n", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mUnauthorizedException\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[7], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mtools\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m E2BDataAnalysisTool, format_tool_to_openai_function\n\u001b[0;32m----> 3\u001b[0m tool \u001b[38;5;241m=\u001b[39m \u001b[43mE2BDataAnalysisTool\u001b[49m\u001b[43m(\u001b[49m\u001b[43mapi_key\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m...\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 4\u001b[0m function \u001b[38;5;241m=\u001b[39m format_tool_to_openai_function(tool)\n\u001b[1;32m 5\u001b[0m tools \u001b[38;5;241m=\u001b[39m [tool]\n", - "File \u001b[0;32m~/langchain/libs/langchain/langchain/tools/e2b_data_analysis/tool.py:124\u001b[0m, in \u001b[0;36mE2BDataAnalysisTool.__init__\u001b[0;34m(self, api_key, cwd, env_vars, on_stdout, on_stderr, on_artifact, on_exit, **kwargs)\u001b[0m\n\u001b[1;32m 121\u001b[0m \u001b[38;5;66;03m# If no API key is provided, E2B will try to read it from the environment\u001b[39;00m\n\u001b[1;32m 122\u001b[0m \u001b[38;5;66;03m# variable E2B_API_KEY\u001b[39;00m\n\u001b[1;32m 123\u001b[0m \u001b[38;5;28msuper\u001b[39m()\u001b[38;5;241m.\u001b[39m\u001b[38;5;21m__init__\u001b[39m(description\u001b[38;5;241m=\u001b[39mbase_description, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m--> 124\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msession \u001b[38;5;241m=\u001b[39m \u001b[43mDataAnalysis\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 125\u001b[0m \u001b[43m \u001b[49m\u001b[43mapi_key\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mapi_key\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 126\u001b[0m \u001b[43m \u001b[49m\u001b[43mcwd\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcwd\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 127\u001b[0m \u001b[43m \u001b[49m\u001b[43menv_vars\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43menv_vars\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 128\u001b[0m \u001b[43m \u001b[49m\u001b[43mon_stdout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mon_stdout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 129\u001b[0m \u001b[43m \u001b[49m\u001b[43mon_stderr\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mon_stderr\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 130\u001b[0m \u001b[43m \u001b[49m\u001b[43mon_exit\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mon_exit\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 131\u001b[0m \u001b[43m \u001b[49m\u001b[43mon_artifact\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mon_artifact\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 132\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/templates/data_analysis.py:42\u001b[0m, in \u001b[0;36mDataAnalysis.__init__\u001b[0;34m(self, api_key, cwd, env_vars, on_stdout, on_stderr, on_artifact, on_exit)\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__init__\u001b[39m(\n\u001b[1;32m 32\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 33\u001b[0m api_key: Optional[\u001b[38;5;28mstr\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 39\u001b[0m on_exit: Optional[Callable[[\u001b[38;5;28mint\u001b[39m], Any]] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 40\u001b[0m ):\n\u001b[1;32m 41\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mon_artifact \u001b[38;5;241m=\u001b[39m on_artifact\n\u001b[0;32m---> 42\u001b[0m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[38;5;21;43m__init__\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[1;32m 43\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mid\u001b[39;49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43menv_id\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 44\u001b[0m \u001b[43m \u001b[49m\u001b[43mapi_key\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mapi_key\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 45\u001b[0m \u001b[43m \u001b[49m\u001b[43mcwd\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcwd\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 46\u001b[0m \u001b[43m \u001b[49m\u001b[43menv_vars\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43menv_vars\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 47\u001b[0m \u001b[43m \u001b[49m\u001b[43mon_stdout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mon_stdout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 48\u001b[0m \u001b[43m \u001b[49m\u001b[43mon_stderr\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mon_stderr\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 49\u001b[0m \u001b[43m \u001b[49m\u001b[43mon_exit\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mon_exit\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 50\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/session/main.py:116\u001b[0m, in \u001b[0;36mSession.__init__\u001b[0;34m(self, id, api_key, cwd, env_vars, on_scan_ports, on_stdout, on_stderr, on_exit, timeout, _debug_hostname, _debug_port, _debug_dev_env)\u001b[0m\n\u001b[1;32m 112\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_filesystem \u001b[38;5;241m=\u001b[39m FilesystemManager(session\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m)\n\u001b[1;32m 113\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_process \u001b[38;5;241m=\u001b[39m ProcessManager(\n\u001b[1;32m 114\u001b[0m session\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m, on_stdout\u001b[38;5;241m=\u001b[39mon_stdout, on_stderr\u001b[38;5;241m=\u001b[39mon_stderr, on_exit\u001b[38;5;241m=\u001b[39mon_exit\n\u001b[1;32m 115\u001b[0m )\n\u001b[0;32m--> 116\u001b[0m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[38;5;21;43m__init__\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[1;32m 117\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mid\u001b[39;49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mid\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 118\u001b[0m \u001b[43m \u001b[49m\u001b[43mapi_key\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mapi_key\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 119\u001b[0m \u001b[43m \u001b[49m\u001b[43mcwd\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcwd\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 120\u001b[0m \u001b[43m \u001b[49m\u001b[43menv_vars\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43menv_vars\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 121\u001b[0m \u001b[43m \u001b[49m\u001b[43m_debug_hostname\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_debug_hostname\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 122\u001b[0m \u001b[43m \u001b[49m\u001b[43m_debug_port\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_debug_port\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 123\u001b[0m \u001b[43m \u001b[49m\u001b[43m_debug_dev_env\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_debug_dev_env\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 124\u001b[0m \u001b[43m \u001b[49m\u001b[43mon_close\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_close_services\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 125\u001b[0m \u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 126\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/session/session_connection.py:98\u001b[0m, in \u001b[0;36mSessionConnection.__init__\u001b[0;34m(self, id, api_key, cwd, env_vars, on_close, timeout, _debug_hostname, _debug_port, _debug_dev_env)\u001b[0m\n\u001b[1;32m 94\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_finished \u001b[38;5;241m=\u001b[39m DeferredFuture(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_process_cleanup)\n\u001b[1;32m 96\u001b[0m logger\u001b[38;5;241m.\u001b[39minfo(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSession for code snippet \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_id\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m initialized\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m---> 98\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_open\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/session/main.py:135\u001b[0m, in \u001b[0;36mSession._open\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 129\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 130\u001b[0m \u001b[38;5;124;03mOpens the session.\u001b[39;00m\n\u001b[1;32m 131\u001b[0m \n\u001b[1;32m 132\u001b[0m \u001b[38;5;124;03m:param timeout: Specify the duration, in seconds to give the method to finish its execution before it times out (default is 60 seconds). If set to None, the method will continue to wait until it completes, regardless of time\u001b[39;00m\n\u001b[1;32m 133\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 134\u001b[0m logger\u001b[38;5;241m.\u001b[39minfo(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mOpening session \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_id\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m--> 135\u001b[0m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_open\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 136\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_code_snippet\u001b[38;5;241m.\u001b[39m_subscribe()\n\u001b[1;32m 137\u001b[0m logger\u001b[38;5;241m.\u001b[39minfo(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSession \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_id\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m opened\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n", - "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/session/session_connection.py:206\u001b[0m, in \u001b[0;36mSessionConnection._open\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 204\u001b[0m logger\u001b[38;5;241m.\u001b[39merror(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFailed to acquire session\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 205\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_close()\n\u001b[0;32m--> 206\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 208\u001b[0m hostname \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mget_hostname(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_debug_port \u001b[38;5;129;01mor\u001b[39;00m ENVD_PORT)\n\u001b[1;32m 209\u001b[0m protocol \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mws\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_debug_dev_env \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlocal\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mwss\u001b[39m\u001b[38;5;124m\"\u001b[39m\n", - "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/session/session_connection.py:173\u001b[0m, in \u001b[0;36mSessionConnection._open\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 170\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m client\u001b[38;5;241m.\u001b[39mApiClient(configuration) \u001b[38;5;28;01mas\u001b[39;00m api_client:\n\u001b[1;32m 171\u001b[0m api \u001b[38;5;241m=\u001b[39m client\u001b[38;5;241m.\u001b[39mSessionsApi(api_client)\n\u001b[0;32m--> 173\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_session \u001b[38;5;241m=\u001b[39m \u001b[43mapi\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msessions_post\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 174\u001b[0m \u001b[43m \u001b[49m\u001b[43mmodels\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mNewSession\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcodeSnippetID\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_id\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43meditEnabled\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 175\u001b[0m \u001b[43m \u001b[49m\u001b[43mapi_key\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_api_key\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 176\u001b[0m \u001b[43m \u001b[49m\u001b[43m_request_timeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 177\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 178\u001b[0m logger\u001b[38;5;241m.\u001b[39minfo(\n\u001b[1;32m 179\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSession \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_session\u001b[38;5;241m.\u001b[39mcode_snippet_id\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m created (id:\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_session\u001b[38;5;241m.\u001b[39msession_id\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m)\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 180\u001b[0m )\n\u001b[1;32m 182\u001b[0m \u001b[38;5;66;03m# We could potentially use asyncio.to_thread() but that requires Python 3.9+\u001b[39;00m\n", - "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/decorator.py:40\u001b[0m, in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[0;34m()\u001b[0m\n", - "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/decorator.py:134\u001b[0m, in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[0;34m()\u001b[0m\n", - "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/decorator.py:206\u001b[0m, in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[0;34m()\u001b[0m\n", - "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/api/v1/client/api/sessions_api.py:230\u001b[0m, in \u001b[0;36mSessionsApi.sessions_post\u001b[0;34m(self, new_session, api_key, **kwargs)\u001b[0m\n\u001b[1;32m 226\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m_preload_content\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m kwargs:\n\u001b[1;32m 227\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 228\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mError! Please call the sessions_post_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 229\u001b[0m )\n\u001b[0;32m--> 230\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msessions_post_with_http_info\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 231\u001b[0m \u001b[43m \u001b[49m\u001b[43mnew_session\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mapi_key\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\n\u001b[1;32m 232\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/decorator.py:40\u001b[0m, in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[0;34m()\u001b[0m\n", - "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/decorator.py:134\u001b[0m, in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[0;34m()\u001b[0m\n", - "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/decorator.py:206\u001b[0m, in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[0;34m()\u001b[0m\n", - "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/api/v1/client/api/sessions_api.py:344\u001b[0m, in \u001b[0;36mSessionsApi.sessions_post_with_http_info\u001b[0;34m(self, new_session, api_key, **kwargs)\u001b[0m\n\u001b[1;32m 335\u001b[0m _auth_settings \u001b[38;5;241m=\u001b[39m [] \u001b[38;5;66;03m# noqa: E501\u001b[39;00m\n\u001b[1;32m 337\u001b[0m _response_types_map \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m 338\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m201\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSession\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 339\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m401\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mError\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 340\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m400\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mError\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 341\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m500\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mError\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 342\u001b[0m }\n\u001b[0;32m--> 344\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mapi_client\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcall_api\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 345\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m/sessions\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 346\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mPOST\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 347\u001b[0m \u001b[43m \u001b[49m\u001b[43m_path_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 348\u001b[0m \u001b[43m \u001b[49m\u001b[43m_query_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 349\u001b[0m \u001b[43m \u001b[49m\u001b[43m_header_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 350\u001b[0m \u001b[43m \u001b[49m\u001b[43mbody\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_body_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 351\u001b[0m \u001b[43m \u001b[49m\u001b[43mpost_params\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_form_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 352\u001b[0m \u001b[43m \u001b[49m\u001b[43mfiles\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_files\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 353\u001b[0m \u001b[43m \u001b[49m\u001b[43mresponse_types_map\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_response_types_map\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 354\u001b[0m \u001b[43m \u001b[49m\u001b[43mauth_settings\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_auth_settings\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 355\u001b[0m \u001b[43m \u001b[49m\u001b[43masync_req\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_params\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43masync_req\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 356\u001b[0m \u001b[43m \u001b[49m\u001b[43m_return_http_data_only\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_params\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m_return_http_data_only\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# noqa: E501\u001b[39;49;00m\n\u001b[1;32m 357\u001b[0m \u001b[43m \u001b[49m\u001b[43m_preload_content\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_params\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m_preload_content\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 358\u001b[0m \u001b[43m \u001b[49m\u001b[43m_request_timeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_params\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m_request_timeout\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 359\u001b[0m \u001b[43m \u001b[49m\u001b[43mcollection_formats\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_collection_formats\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 360\u001b[0m \u001b[43m \u001b[49m\u001b[43m_request_auth\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_params\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m_request_auth\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 361\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/api/v1/client/api_client.py:439\u001b[0m, in \u001b[0;36mApiClient.call_api\u001b[0;34m(self, resource_path, method, path_params, query_params, header_params, body, post_params, files, response_types_map, auth_settings, async_req, _return_http_data_only, collection_formats, _preload_content, _request_timeout, _host, _request_auth)\u001b[0m\n\u001b[1;32m 397\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Makes the HTTP request (synchronous) and returns deserialized data.\u001b[39;00m\n\u001b[1;32m 398\u001b[0m \n\u001b[1;32m 399\u001b[0m \u001b[38;5;124;03mTo make an async_req request, set the async_req parameter.\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 436\u001b[0m \u001b[38;5;124;03m then the method will return the response directly.\u001b[39;00m\n\u001b[1;32m 437\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 438\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m async_req:\n\u001b[0;32m--> 439\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m__call_api\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 440\u001b[0m \u001b[43m \u001b[49m\u001b[43mresource_path\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 441\u001b[0m \u001b[43m \u001b[49m\u001b[43mmethod\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 442\u001b[0m \u001b[43m \u001b[49m\u001b[43mpath_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 443\u001b[0m \u001b[43m \u001b[49m\u001b[43mquery_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 444\u001b[0m \u001b[43m \u001b[49m\u001b[43mheader_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 445\u001b[0m \u001b[43m \u001b[49m\u001b[43mbody\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 446\u001b[0m \u001b[43m \u001b[49m\u001b[43mpost_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 447\u001b[0m \u001b[43m \u001b[49m\u001b[43mfiles\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 448\u001b[0m \u001b[43m \u001b[49m\u001b[43mresponse_types_map\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 449\u001b[0m \u001b[43m \u001b[49m\u001b[43mauth_settings\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 450\u001b[0m \u001b[43m \u001b[49m\u001b[43m_return_http_data_only\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 451\u001b[0m \u001b[43m \u001b[49m\u001b[43mcollection_formats\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 452\u001b[0m \u001b[43m \u001b[49m\u001b[43m_preload_content\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 453\u001b[0m \u001b[43m \u001b[49m\u001b[43m_request_timeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 454\u001b[0m \u001b[43m \u001b[49m\u001b[43m_host\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 455\u001b[0m \u001b[43m \u001b[49m\u001b[43m_request_auth\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 456\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 458\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpool\u001b[38;5;241m.\u001b[39mapply_async(\n\u001b[1;32m 459\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m__call_api,\n\u001b[1;32m 460\u001b[0m (\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 477\u001b[0m ),\n\u001b[1;32m 478\u001b[0m )\n", - "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/api/v1/client/api_client.py:242\u001b[0m, in \u001b[0;36mApiClient.__call_api\u001b[0;34m(self, resource_path, method, path_params, query_params, header_params, body, post_params, files, response_types_map, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout, _host, _request_auth)\u001b[0m\n\u001b[1;32m 240\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m e\u001b[38;5;241m.\u001b[39mbody:\n\u001b[1;32m 241\u001b[0m e\u001b[38;5;241m.\u001b[39mbody \u001b[38;5;241m=\u001b[39m e\u001b[38;5;241m.\u001b[39mbody\u001b[38;5;241m.\u001b[39mdecode(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mutf-8\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m--> 242\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 244\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlast_response \u001b[38;5;241m=\u001b[39m response_data\n\u001b[1;32m 246\u001b[0m return_data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;66;03m# assuming derialization is not needed\u001b[39;00m\n", - "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/api/v1/client/api_client.py:229\u001b[0m, in \u001b[0;36mApiClient.__call_api\u001b[0;34m(self, resource_path, method, path_params, query_params, header_params, body, post_params, files, response_types_map, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout, _host, _request_auth)\u001b[0m\n\u001b[1;32m 225\u001b[0m url \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m?\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m+\u001b[39m url_query\n\u001b[1;32m 227\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 228\u001b[0m \u001b[38;5;66;03m# perform request and return response\u001b[39;00m\n\u001b[0;32m--> 229\u001b[0m response_data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrequest\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 230\u001b[0m \u001b[43m \u001b[49m\u001b[43mmethod\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 231\u001b[0m \u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 232\u001b[0m \u001b[43m \u001b[49m\u001b[43mquery_params\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mquery_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 233\u001b[0m \u001b[43m \u001b[49m\u001b[43mheaders\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mheader_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 234\u001b[0m \u001b[43m \u001b[49m\u001b[43mpost_params\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpost_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 235\u001b[0m \u001b[43m \u001b[49m\u001b[43mbody\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mbody\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 236\u001b[0m \u001b[43m \u001b[49m\u001b[43m_preload_content\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_preload_content\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 237\u001b[0m \u001b[43m \u001b[49m\u001b[43m_request_timeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_request_timeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 238\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 239\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m ApiException \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 240\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m e\u001b[38;5;241m.\u001b[39mbody:\n", - "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/api/v1/client/api_client.py:517\u001b[0m, in \u001b[0;36mApiClient.request\u001b[0;34m(self, method, url, query_params, headers, post_params, body, _preload_content, _request_timeout)\u001b[0m\n\u001b[1;32m 509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mrest_client\u001b[38;5;241m.\u001b[39moptions_request(\n\u001b[1;32m 510\u001b[0m url,\n\u001b[1;32m 511\u001b[0m query_params\u001b[38;5;241m=\u001b[39mquery_params,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 514\u001b[0m _request_timeout\u001b[38;5;241m=\u001b[39m_request_timeout,\n\u001b[1;32m 515\u001b[0m )\n\u001b[1;32m 516\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m method \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mPOST\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[0;32m--> 517\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrest_client\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpost_request\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 518\u001b[0m \u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 519\u001b[0m \u001b[43m \u001b[49m\u001b[43mquery_params\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mquery_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 520\u001b[0m \u001b[43m \u001b[49m\u001b[43mheaders\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mheaders\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 521\u001b[0m \u001b[43m \u001b[49m\u001b[43mpost_params\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpost_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 522\u001b[0m \u001b[43m \u001b[49m\u001b[43m_preload_content\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_preload_content\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 523\u001b[0m \u001b[43m \u001b[49m\u001b[43m_request_timeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_request_timeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 524\u001b[0m \u001b[43m \u001b[49m\u001b[43mbody\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mbody\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 525\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 526\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m method \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mPUT\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[1;32m 527\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mrest_client\u001b[38;5;241m.\u001b[39mput_request(\n\u001b[1;32m 528\u001b[0m url,\n\u001b[1;32m 529\u001b[0m query_params\u001b[38;5;241m=\u001b[39mquery_params,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 534\u001b[0m body\u001b[38;5;241m=\u001b[39mbody,\n\u001b[1;32m 535\u001b[0m )\n", - "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/api/v1/client/rest.py:348\u001b[0m, in \u001b[0;36mRESTClientObject.post_request\u001b[0;34m(self, url, headers, query_params, post_params, body, _preload_content, _request_timeout)\u001b[0m\n\u001b[1;32m 338\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mpost_request\u001b[39m(\n\u001b[1;32m 339\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 340\u001b[0m url,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 346\u001b[0m _request_timeout\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 347\u001b[0m ):\n\u001b[0;32m--> 348\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrequest\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 349\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mPOST\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 350\u001b[0m \u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 351\u001b[0m \u001b[43m \u001b[49m\u001b[43mheaders\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mheaders\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 352\u001b[0m \u001b[43m \u001b[49m\u001b[43mquery_params\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mquery_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 353\u001b[0m \u001b[43m \u001b[49m\u001b[43mpost_params\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpost_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 354\u001b[0m \u001b[43m \u001b[49m\u001b[43m_preload_content\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_preload_content\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 355\u001b[0m \u001b[43m \u001b[49m\u001b[43m_request_timeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_request_timeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 356\u001b[0m \u001b[43m \u001b[49m\u001b[43mbody\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mbody\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 357\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/e2b/api/v1/client/rest.py:249\u001b[0m, in \u001b[0;36mRESTClientObject.request\u001b[0;34m(self, method, url, query_params, headers, body, post_params, _preload_content, _request_timeout)\u001b[0m\n\u001b[1;32m 247\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;241m200\u001b[39m \u001b[38;5;241m<\u001b[39m\u001b[38;5;241m=\u001b[39m r\u001b[38;5;241m.\u001b[39mstatus \u001b[38;5;241m<\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m299\u001b[39m:\n\u001b[1;32m 248\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m r\u001b[38;5;241m.\u001b[39mstatus \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m401\u001b[39m:\n\u001b[0;32m--> 249\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m UnauthorizedException(http_resp\u001b[38;5;241m=\u001b[39mr)\n\u001b[1;32m 251\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m r\u001b[38;5;241m.\u001b[39mstatus \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m403\u001b[39m:\n\u001b[1;32m 252\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m ForbiddenException(http_resp\u001b[38;5;241m=\u001b[39mr)\n", - "\u001b[0;31mUnauthorizedException\u001b[0m: (401)\nReason: Unauthorized\nHTTP response headers: HTTPHeaderDict({'Date': 'Tue, 07 Nov 2023 06:10:27 GMT', 'Content-Type': 'application/json; charset=utf-8', 'Content-Length': '123', 'Connection': 'keep-alive', 'via': '1.1 google', 'alt-svc': 'h3=\":443\"; ma=86400', 'CF-Cache-Status': 'DYNAMIC', 'Report-To': '{\"endpoints\":[{\"url\":\"https:\\\\/\\\\/a.nel.cloudflare.com\\\\/report\\\\/v3?s=eD4xvpIGOuei6fNw3tUox8hcw2rilvLB%2FUrFxgAkV05OCKE1yIBAZfHpmAnqATRlRrXBnDTn8KFWtliFKKO0fZHiKEPRzxzMB3CKvDSgquxoePtSyVtPguaR1UNYrK3h\"}],\"group\":\"cf-nel\",\"max_age\":604800}', 'NEL': '{\"success_fraction\":0,\"report_to\":\"cf-nel\",\"max_age\":604800}', 'Server': 'cloudflare', 'CF-RAY': '822369c7eefbd00d-SJC'})\nHTTP response body: {\"code\":401,\"message\":\"Invalid API key, please visit https://e2b.dev/docs?reason=sdk-missing-api-key to get your API key.\"}\n" - ] - } - ], + "outputs": [], "source": [ - "from langchain.tools import E2BDataAnalysisTool, format_tool_to_openai_function\n", + "from langchain.tools import E2BDataAnalysisTool\n", "\n", - "tool = E2BDataAnalysisTool(api_key=\"...\")\n", - "function = format_tool_to_openai_function(tool)\n", - "tools = [tool]" + "tools = [E2BDataAnalysisTool(api_key=\"e2b_c4547586ee874bb331944cfbf8cdf12fd64cfb59\")]" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "id": "1c01dd79-dd3e-4509-a2e2-009a7f99f16a", "metadata": {}, "outputs": [], "source": [ - "agent = OpenAIAssistantRunnable(\n", + "agent = OpenAIAssistantRunnable.create_assistant(\n", " name=\"langchain assistant e2b tool\",\n", " instructions=\"You are a personal math tutor. Write and run code to answer math questions.\",\n", - " tools=[{\"type\": \"function\", \"function\": function}],\n", + " tools=tools,\n", " model=\"gpt-4-1106-preview\",\n", " as_agent=True\n", - ")\n", - "agent.create()" + ")" ] }, { @@ -211,28 +172,20 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 7, "id": "1f137f94-801f-4766-9ff5-2de9df5e8079", "metadata": {}, "outputs": [ { - "ename": "ValueError", - "evalue": "{'id': 'run_08of98E8mkOkvvLevGlmxTpF', 'assistant_id': 'asst_8dAlzPI0zvjOaaenvwNCtIEl', 'cancelled_at': None, 'completed_at': None, 'created_at': 1699337429, 'expires_at': None, 'failed_at': 1699337439, 'file_ids': [], 'instructions': 'You are a personal math tutor. Write and run code to answer math questions.', 'last_error': {'code': 'rate_limit_exceeded', 'message': \"We're currently processing too many requests - please try again later.\"}, 'metadata': {}, 'model': 'gpt-4-1106-preview', 'object': 'thread.run', 'required_action': None, 'started_at': 1699337429, 'status': 'failed', 'thread_id': 'thread_wcdfruB9FPwjs27TbBv0p45U', 'tools': [{'function': {'description': 'Evaluates python code in a sandbox environment. The environment is long running and exists across multiple executions. You must send the whole script every time and print your outputs. Script should be pure python code that can be evaluated. It should be in python format NOT markdown. The code should NOT be wrapped in backticks. All python packages including requests, matplotlib, scipy, numpy, pandas, etc are available. Create and display chart using `plt.show()`.', 'name': 'e2b_data_analysis', 'parameters': {'title': 'E2BDataAnalysisToolArguments', 'description': 'Arguments for the E2BDataAnalysisTool.', 'type': 'object', 'properties': {'python_code': {'title': 'Python Code', 'description': 'The python script to be evaluated. The contents will be in main.py. It should not be in markdown format.', 'example': \"print('Hello World')\", 'type': 'string'}}, 'required': ['python_code']}}, 'type': 'function'}]}", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[8], line 4\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01magents\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m AgentExecutor\n\u001b[1;32m 3\u001b[0m agent_executor \u001b[38;5;241m=\u001b[39m AgentExecutor(agent\u001b[38;5;241m=\u001b[39magent, tools\u001b[38;5;241m=\u001b[39mtools)\n\u001b[0;32m----> 4\u001b[0m \u001b[43magent_executor\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcontent\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mWhat\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43ms 10 - 4 raised to the 2.7\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m}\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/langchain/libs/langchain/langchain/chains/base.py:87\u001b[0m, in \u001b[0;36mChain.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 80\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21minvoke\u001b[39m(\n\u001b[1;32m 81\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 82\u001b[0m \u001b[38;5;28minput\u001b[39m: Dict[\u001b[38;5;28mstr\u001b[39m, Any],\n\u001b[1;32m 83\u001b[0m config: Optional[RunnableConfig] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 84\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 85\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Dict[\u001b[38;5;28mstr\u001b[39m, Any]:\n\u001b[1;32m 86\u001b[0m config \u001b[38;5;241m=\u001b[39m config \u001b[38;5;129;01mor\u001b[39;00m {}\n\u001b[0;32m---> 87\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[1;32m 88\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 89\u001b[0m \u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcallbacks\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 90\u001b[0m \u001b[43m \u001b[49m\u001b[43mtags\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtags\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 91\u001b[0m \u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmetadata\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 92\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_name\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 93\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 94\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/langchain/libs/langchain/langchain/chains/base.py:310\u001b[0m, in \u001b[0;36mChain.__call__\u001b[0;34m(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)\u001b[0m\n\u001b[1;32m 308\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 309\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n\u001b[0;32m--> 310\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 311\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_end(outputs)\n\u001b[1;32m 312\u001b[0m final_outputs: Dict[\u001b[38;5;28mstr\u001b[39m, Any] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mprep_outputs(\n\u001b[1;32m 313\u001b[0m inputs, outputs, return_only_outputs\n\u001b[1;32m 314\u001b[0m )\n", - "File \u001b[0;32m~/langchain/libs/langchain/langchain/chains/base.py:304\u001b[0m, in \u001b[0;36mChain.__call__\u001b[0;34m(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)\u001b[0m\n\u001b[1;32m 297\u001b[0m run_manager \u001b[38;5;241m=\u001b[39m callback_manager\u001b[38;5;241m.\u001b[39mon_chain_start(\n\u001b[1;32m 298\u001b[0m dumpd(\u001b[38;5;28mself\u001b[39m),\n\u001b[1;32m 299\u001b[0m inputs,\n\u001b[1;32m 300\u001b[0m name\u001b[38;5;241m=\u001b[39mrun_name,\n\u001b[1;32m 301\u001b[0m )\n\u001b[1;32m 302\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 303\u001b[0m outputs \u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m--> 304\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 305\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m new_arg_supported\n\u001b[1;32m 306\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_call(inputs)\n\u001b[1;32m 307\u001b[0m )\n\u001b[1;32m 308\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 309\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n", - "File \u001b[0;32m~/langchain/libs/langchain/langchain/agents/agent.py:1146\u001b[0m, in \u001b[0;36mAgentExecutor._call\u001b[0;34m(self, inputs, run_manager)\u001b[0m\n\u001b[1;32m 1144\u001b[0m \u001b[38;5;66;03m# We now enter the agent loop (until it returns something).\u001b[39;00m\n\u001b[1;32m 1145\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_should_continue(iterations, time_elapsed):\n\u001b[0;32m-> 1146\u001b[0m next_step_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_take_next_step\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1147\u001b[0m \u001b[43m \u001b[49m\u001b[43mname_to_tool_map\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1148\u001b[0m \u001b[43m \u001b[49m\u001b[43mcolor_mapping\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1149\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1150\u001b[0m \u001b[43m \u001b[49m\u001b[43mintermediate_steps\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1151\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1152\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1153\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(next_step_output, AgentFinish):\n\u001b[1;32m 1154\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_return(\n\u001b[1;32m 1155\u001b[0m next_step_output, intermediate_steps, run_manager\u001b[38;5;241m=\u001b[39mrun_manager\n\u001b[1;32m 1156\u001b[0m )\n", - "File \u001b[0;32m~/langchain/libs/langchain/langchain/agents/agent.py:933\u001b[0m, in \u001b[0;36mAgentExecutor._take_next_step\u001b[0;34m(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)\u001b[0m\n\u001b[1;32m 930\u001b[0m intermediate_steps \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_prepare_intermediate_steps(intermediate_steps)\n\u001b[1;32m 932\u001b[0m \u001b[38;5;66;03m# Call the LLM to see what to do.\u001b[39;00m\n\u001b[0;32m--> 933\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43magent\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mplan\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 934\u001b[0m \u001b[43m \u001b[49m\u001b[43mintermediate_steps\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 935\u001b[0m \u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_child\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01melse\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 936\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 937\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 938\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m OutputParserException \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 939\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_parsing_errors, \u001b[38;5;28mbool\u001b[39m):\n", - "File \u001b[0;32m~/langchain/libs/langchain/langchain/agents/agent.py:375\u001b[0m, in \u001b[0;36mRunnableAgent.plan\u001b[0;34m(self, intermediate_steps, callbacks, **kwargs)\u001b[0m\n\u001b[1;32m 363\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Given input, decided what to do.\u001b[39;00m\n\u001b[1;32m 364\u001b[0m \n\u001b[1;32m 365\u001b[0m \u001b[38;5;124;03mArgs:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 372\u001b[0m \u001b[38;5;124;03m Action specifying what tool to use.\u001b[39;00m\n\u001b[1;32m 373\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 374\u001b[0m inputs \u001b[38;5;241m=\u001b[39m {\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m{\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mintermediate_steps\u001b[39m\u001b[38;5;124m\"\u001b[39m: intermediate_steps}}\n\u001b[0;32m--> 375\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrunnable\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcallbacks\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[43m}\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 376\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m output\n", - "File \u001b[0;32m~/langchain/libs/experimental/langchain_experimental/openai_assistant/base.py:75\u001b[0m, in \u001b[0;36mOpenAIAssistantRunnable.invoke\u001b[0;34m(self, input, config)\u001b[0m\n\u001b[1;32m 71\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 72\u001b[0m run \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclient\u001b[38;5;241m.\u001b[39mbeta\u001b[38;5;241m.\u001b[39mthreads\u001b[38;5;241m.\u001b[39mruns\u001b[38;5;241m.\u001b[39msubmit_tool_outputs(\n\u001b[1;32m 73\u001b[0m thread_id\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mthread_id, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m\u001b[38;5;28minput\u001b[39m\n\u001b[1;32m 74\u001b[0m )\n\u001b[0;32m---> 75\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_get_response\u001b[49m\u001b[43m(\u001b[49m\u001b[43mrun\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mid\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/langchain/libs/experimental/langchain_experimental/openai_assistant/base.py:118\u001b[0m, in \u001b[0;36mOpenAIAssistantRunnable._get_response\u001b[0;34m(self, run_id)\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m actions\n\u001b[1;32m 117\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 118\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(run\u001b[38;5;241m.\u001b[39mdict())\n", - "\u001b[0;31mValueError\u001b[0m: {'id': 'run_08of98E8mkOkvvLevGlmxTpF', 'assistant_id': 'asst_8dAlzPI0zvjOaaenvwNCtIEl', 'cancelled_at': None, 'completed_at': None, 'created_at': 1699337429, 'expires_at': None, 'failed_at': 1699337439, 'file_ids': [], 'instructions': 'You are a personal math tutor. Write and run code to answer math questions.', 'last_error': {'code': 'rate_limit_exceeded', 'message': \"We're currently processing too many requests - please try again later.\"}, 'metadata': {}, 'model': 'gpt-4-1106-preview', 'object': 'thread.run', 'required_action': None, 'started_at': 1699337429, 'status': 'failed', 'thread_id': 'thread_wcdfruB9FPwjs27TbBv0p45U', 'tools': [{'function': {'description': 'Evaluates python code in a sandbox environment. The environment is long running and exists across multiple executions. You must send the whole script every time and print your outputs. Script should be pure python code that can be evaluated. It should be in python format NOT markdown. The code should NOT be wrapped in backticks. All python packages including requests, matplotlib, scipy, numpy, pandas, etc are available. Create and display chart using `plt.show()`.', 'name': 'e2b_data_analysis', 'parameters': {'title': 'E2BDataAnalysisToolArguments', 'description': 'Arguments for the E2BDataAnalysisTool.', 'type': 'object', 'properties': {'python_code': {'title': 'Python Code', 'description': 'The python script to be evaluated. The contents will be in main.py. It should not be in markdown format.', 'example': \"print('Hello World')\", 'type': 'string'}}, 'required': ['python_code']}}, 'type': 'function'}]}" - ] + "data": { + "text/plain": [ + "{'content': \"What's 10 - 4 raised to the 2.7\",\n", + " 'output': 'The result of \\\\( 10 - 4^{2.7} \\\\) is approximately \\\\(-32.224\\\\).'}" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ @@ -252,38 +205,33 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "id": "c0475fa7-b6c1-4331-b8e2-55407466c724", "metadata": {}, "outputs": [], "source": [ - "agent = OpenAIAssistantRunnable(\n", + "agent = OpenAIAssistantRunnable.create_assistant(\n", " name=\"langchain assistant e2b tool\",\n", " instructions=\"You are a personal math tutor. Write and run code to answer math questions.\",\n", - " tools=[{\"type\": \"function\", \"function\": function}],\n", + " tools=tools,\n", " model=\"gpt-4-1106-preview\",\n", " as_agent=True\n", - ")\n", - "agent.create()" + ")" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 12, "id": "b76cb669-6aba-4827-868f-00aa960026f2", "metadata": {}, "outputs": [ { - "ename": "ValueError", - "evalue": "{'id': 'run_6QY8OJeyV2M4BjUQMdWkAn1i', 'assistant_id': 'asst_8dAlzPI0zvjOaaenvwNCtIEl', 'cancelled_at': None, 'completed_at': None, 'created_at': 1699331591, 'expires_at': None, 'failed_at': 1699331601, 'file_ids': [], 'instructions': 'You are a personal math tutor. Write and run code to answer math questions.', 'last_error': {'code': 'rate_limit_exceeded', 'message': \"We're currently processing too many requests - please try again later.\"}, 'metadata': {}, 'model': 'gpt-4-1106-preview', 'object': 'thread.run', 'required_action': None, 'started_at': 1699331591, 'status': 'failed', 'thread_id': 'thread_wcdfruB9FPwjs27TbBv0p45U', 'tools': [{'function': {'description': 'Evaluates python code in a sandbox environment. The environment is long running and exists across multiple executions. You must send the whole script every time and print your outputs. Script should be pure python code that can be evaluated. It should be in python format NOT markdown. The code should NOT be wrapped in backticks. All python packages including requests, matplotlib, scipy, numpy, pandas, etc are available. Create and display chart using `plt.show()`.', 'name': 'e2b_data_analysis', 'parameters': {'title': 'E2BDataAnalysisToolArguments', 'description': 'Arguments for the E2BDataAnalysisTool.', 'type': 'object', 'properties': {'python_code': {'title': 'Python Code', 'description': 'The python script to be evaluated. The contents will be in main.py. It should not be in markdown format.', 'example': \"print('Hello World')\", 'type': 'string'}}, 'required': ['python_code']}}, 'type': 'function'}]}", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[5], line 4\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mschema\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01magent\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m AgentFinish\n\u001b[1;32m 3\u001b[0m tool_map \u001b[38;5;241m=\u001b[39m {tool\u001b[38;5;241m.\u001b[39mname: tool \u001b[38;5;28;01mfor\u001b[39;00m tool \u001b[38;5;129;01min\u001b[39;00m tools}\n\u001b[0;32m----> 4\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[43magent\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcontent\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mWhat\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43ms 10 - 4 raised to the 2.7\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m}\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 5\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(response, AgentFinish):\n\u001b[1;32m 6\u001b[0m tool_outputs \u001b[38;5;241m=\u001b[39m []\n", - "File \u001b[0;32m~/langchain/libs/experimental/langchain_experimental/openai_assistant/base.py:75\u001b[0m, in \u001b[0;36mOpenAIAssistantRunnable.invoke\u001b[0;34m(self, input, config)\u001b[0m\n\u001b[1;32m 71\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 72\u001b[0m run \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclient\u001b[38;5;241m.\u001b[39mbeta\u001b[38;5;241m.\u001b[39mthreads\u001b[38;5;241m.\u001b[39mruns\u001b[38;5;241m.\u001b[39msubmit_tool_outputs(\n\u001b[1;32m 73\u001b[0m thread_id\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mthread_id, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m\u001b[38;5;28minput\u001b[39m\n\u001b[1;32m 74\u001b[0m )\n\u001b[0;32m---> 75\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_get_response\u001b[49m\u001b[43m(\u001b[49m\u001b[43mrun\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mid\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/langchain/libs/experimental/langchain_experimental/openai_assistant/base.py:118\u001b[0m, in \u001b[0;36mOpenAIAssistantRunnable._get_response\u001b[0;34m(self, run_id)\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m actions\n\u001b[1;32m 117\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 118\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(run\u001b[38;5;241m.\u001b[39mdict())\n", - "\u001b[0;31mValueError\u001b[0m: {'id': 'run_6QY8OJeyV2M4BjUQMdWkAn1i', 'assistant_id': 'asst_8dAlzPI0zvjOaaenvwNCtIEl', 'cancelled_at': None, 'completed_at': None, 'created_at': 1699331591, 'expires_at': None, 'failed_at': 1699331601, 'file_ids': [], 'instructions': 'You are a personal math tutor. Write and run code to answer math questions.', 'last_error': {'code': 'rate_limit_exceeded', 'message': \"We're currently processing too many requests - please try again later.\"}, 'metadata': {}, 'model': 'gpt-4-1106-preview', 'object': 'thread.run', 'required_action': None, 'started_at': 1699331591, 'status': 'failed', 'thread_id': 'thread_wcdfruB9FPwjs27TbBv0p45U', 'tools': [{'function': {'description': 'Evaluates python code in a sandbox environment. The environment is long running and exists across multiple executions. You must send the whole script every time and print your outputs. Script should be pure python code that can be evaluated. It should be in python format NOT markdown. The code should NOT be wrapped in backticks. All python packages including requests, matplotlib, scipy, numpy, pandas, etc are available. Create and display chart using `plt.show()`.', 'name': 'e2b_data_analysis', 'parameters': {'title': 'E2BDataAnalysisToolArguments', 'description': 'Arguments for the E2BDataAnalysisTool.', 'type': 'object', 'properties': {'python_code': {'title': 'Python Code', 'description': 'The python script to be evaluated. The contents will be in main.py. It should not be in markdown format.', 'example': \"print('Hello World')\", 'type': 'string'}}, 'required': ['python_code']}}, 'type': 'function'}]}" + "name": "stdout", + "output_type": "stream", + "text": [ + "e2b_data_analysis {'python_code': \"result = 10 - 4**2.7\\nprint('The result of 10 - 4 raised to the 2.7 is:', result)\"} {\"stdout\": \"The result of 10 - 4 raised to the 2.7 is: -32.22425314473263\", \"stderr\": \"\", \"artifacts\": []}\n", + "\n", + "return_values={'output': 'The result of \\\\(10 - 4\\\\) raised to the \\\\(2.7\\\\) is approximately \\\\(-32.22425314473263\\\\).'} log='' run_id='run_ORbUDQ9DELkjjPcxP4arCoBA' thread_id='thread_Kaou0OV59KmtQpqR9e49jt8q'\n" ] } ], @@ -296,11 +244,11 @@ " tool_outputs = []\n", " for action in response:\n", " tool_output = tool_map[action.tool].invoke(action.tool_input)\n", - " print(action.tool, action.tool_input, tool_output)\n", + " print(action.tool, action.tool_input, tool_output, end=\"\\n\\n\")\n", " tool_outputs.append({\"output\": tool_output, \"tool_call_id\": action.tool_call_id})\n", - " response = agent.invoke({\"tool_outputs\": tool_outputs, \"run_id\": action.run_id})\n", + " response = agent.invoke({\"tool_outputs\": tool_outputs, \"run_id\": action.run_id, \"thread_id\": action.thread_id})\n", " \n", - "print(response.return_values[\"output\"])" + "print(response)" ] }, { diff --git a/libs/experimental/langchain_experimental/openai_assistant/base.py b/libs/experimental/langchain_experimental/openai_assistant/base.py index 3f307e4794b23..c2e41b5aef3fe 100644 --- a/libs/experimental/langchain_experimental/openai_assistant/base.py +++ b/libs/experimental/langchain_experimental/openai_assistant/base.py @@ -80,7 +80,7 @@ def create_assistant( openai_tools: List = [] for tool in tools: if isinstance(tool, BaseTool): - tool = format_tool_to_openai_function(tool) + tool = {"type": "function", "function": format_tool_to_openai_function(tool)} openai_tools.append(tool) assistant = client.beta.assistants.create( name=name, @@ -105,14 +105,18 @@ def invoke(self, input: dict, config: Optional[RunnableConfig] = None) -> Any: """""" input = self._parse_input(input) if "thread_id" not in input: - run = self._create_thread_and_run(input) - _ = self.client.beta.threads.messages.create( - run.thread_id, - content=input["content"], - role="user", - file_ids=input.get("file_ids", []), - metadata=input.get("message_metadata"), - ) + thread = { + "messages": [ + { + "role": "user", + "content": input["content"], + "file_ids": input.get("file_ids", []), + "metadata": input.get("message_metadata"), + } + ], + "metadata": input.get("thread_metadata"), + } + run = self._create_thread_and_run(input, thread) elif "run_id" not in input: _ = self.client.beta.threads.messages.create( input["thread_id"], @@ -150,14 +154,15 @@ def _create_run(self, input: dict) -> Any: **params, ) - def _create_thread_and_run(self, input: dict) -> Any: + def _create_thread_and_run(self, input: dict, thread: dict) -> Any: params = { k: v for k, v in input.items() - if k in ("instructions", "thread", "model", "tools", "run_metadata") + if k in ("instructions", "model", "tools", "run_metadata") } run = self.client.beta.threads.create_and_run( assistant_id=self.assistant_id, + thread=thread, **params, ) return run From 268f52794bfb0d6cb92f518d5c9d6c06cf4ceaeb Mon Sep 17 00:00:00 2001 From: Bagatur Date: Tue, 7 Nov 2023 10:02:44 -0800 Subject: [PATCH 11/22] nb --- cookbook/openai_v1_cookbook.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cookbook/openai_v1_cookbook.ipynb b/cookbook/openai_v1_cookbook.ipynb index fb8869e1b74e5..59815370d718a 100644 --- a/cookbook/openai_v1_cookbook.ipynb +++ b/cookbook/openai_v1_cookbook.ipynb @@ -143,7 +143,7 @@ "source": [ "from langchain.tools import E2BDataAnalysisTool\n", "\n", - "tools = [E2BDataAnalysisTool(api_key=\"e2b_c4547586ee874bb331944cfbf8cdf12fd64cfb59\")]" + "tools = [E2BDataAnalysisTool(api_key=\"...\")]" ] }, { From e4869413180b17d8918f602ff77969d79abe0211 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Tue, 7 Nov 2023 10:03:39 -0800 Subject: [PATCH 12/22] fmt --- .../langchain_experimental/openai_assistant/base.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/libs/experimental/langchain_experimental/openai_assistant/base.py b/libs/experimental/langchain_experimental/openai_assistant/base.py index c2e41b5aef3fe..abca7fec95210 100644 --- a/libs/experimental/langchain_experimental/openai_assistant/base.py +++ b/libs/experimental/langchain_experimental/openai_assistant/base.py @@ -80,7 +80,10 @@ def create_assistant( openai_tools: List = [] for tool in tools: if isinstance(tool, BaseTool): - tool = {"type": "function", "function": format_tool_to_openai_function(tool)} + tool = { + "type": "function", + "function": format_tool_to_openai_function(tool), + } openai_tools.append(tool) assistant = client.beta.assistants.create( name=name, From ee810eeec4430a48ce4139b4990430eb0a88fd7b Mon Sep 17 00:00:00 2001 From: Bagatur Date: Tue, 7 Nov 2023 10:06:24 -0800 Subject: [PATCH 13/22] next --- cookbook/openai_v1_cookbook.ipynb | 77 ++++++++++++++++++++++--------- 1 file changed, 56 insertions(+), 21 deletions(-) diff --git a/cookbook/openai_v1_cookbook.ipynb b/cookbook/openai_v1_cookbook.ipynb index 59815370d718a..1ddc2cdbcc150 100644 --- a/cookbook/openai_v1_cookbook.ipynb +++ b/cookbook/openai_v1_cookbook.ipynb @@ -221,34 +221,69 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 13, "id": "b76cb669-6aba-4827-868f-00aa960026f2", "metadata": {}, + "outputs": [], + "source": [ + "from langchain.schema.agent import AgentFinish\n", + "\n", + "def execute_agent(agent, tools, input):\n", + " tool_map = {tool.name: tool for tool in tools}\n", + " response = agent.invoke(input)\n", + " while not isinstance(response, AgentFinish):\n", + " tool_outputs = []\n", + " for action in response:\n", + " tool_output = tool_map[action.tool].invoke(action.tool_input)\n", + " print(action.tool, action.tool_input, tool_output, end=\"\\n\\n\")\n", + " tool_outputs.append({\"output\": tool_output, \"tool_call_id\": action.tool_call_id})\n", + " response = agent.invoke({\"tool_outputs\": tool_outputs, \"run_id\": action.run_id, \"thread_id\": action.thread_id})\n", + " \n", + " return response" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "7946116a-b82f-492e-835e-ca958a8949a5", + "metadata": {}, "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "e2b_data_analysis {'python_code': \"result = 10 - 4**2.7\\nprint('The result of 10 - 4 raised to the 2.7 is:', result)\"} {\"stdout\": \"The result of 10 - 4 raised to the 2.7 is: -32.22425314473263\", \"stderr\": \"\", \"artifacts\": []}\n", - "\n", - "return_values={'output': 'The result of \\\\(10 - 4\\\\) raised to the \\\\(2.7\\\\) is approximately \\\\(-32.22425314473263\\\\).'} log='' run_id='run_ORbUDQ9DELkjjPcxP4arCoBA' thread_id='thread_Kaou0OV59KmtQpqR9e49jt8q'\n" - ] + "data": { + "text/plain": [ + "OpenAIAssistantFinish(return_values={'output': 'The result of \\\\( 10 - 4^{2.7} \\\\) is approximately -32.2243.'}, log='', run_id='run_0HGfB3E7h5At9ZQTqlDcVDxX', thread_id='thread_yCuCltQgfxXKZGSI6gkylv4v')" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ - "from langchain.schema.agent import AgentFinish\n", - "\n", - "tool_map = {tool.name: tool for tool in tools}\n", - "response = agent.invoke({\"content\": \"What's 10 - 4 raised to the 2.7\"})\n", - "while not isinstance(response, AgentFinish):\n", - " tool_outputs = []\n", - " for action in response:\n", - " tool_output = tool_map[action.tool].invoke(action.tool_input)\n", - " print(action.tool, action.tool_input, tool_output, end=\"\\n\\n\")\n", - " tool_outputs.append({\"output\": tool_output, \"tool_call_id\": action.tool_call_id})\n", - " response = agent.invoke({\"tool_outputs\": tool_outputs, \"run_id\": action.run_id, \"thread_id\": action.thread_id})\n", - " \n", - "print(response)" + "response = execute_agent(agent, tools, {\"content\": \"What's 10 - 4 raised to the 2.7\"})\n", + "response" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "f2744a56-9f4f-4899-827a-fa55821c318c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "OpenAIAssistantFinish(return_values={'output': 'After adding 17.241 to \\\\(10 - 4^{2.7}\\\\), the result is approximately -14.9833.'}, log='', run_id='run_cJrAHdn0uuEotVr9cDXUJHjH', thread_id='thread_yCuCltQgfxXKZGSI6gkylv4v')" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "next_response = execute_agent(agent, tools, {\"content\": \"now add 17.241\", \"thread_id\": response.thread_id})\n", + "next_response" ] }, { From 4642e335e806c0bfa47aa6df54a5f08e9f7a3c8e Mon Sep 17 00:00:00 2001 From: Bagatur Date: Tue, 7 Nov 2023 10:50:43 -0800 Subject: [PATCH 14/22] typing --- .../openai_assistant/base.py | 30 ++++++++++++++++--- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/libs/experimental/langchain_experimental/openai_assistant/base.py b/libs/experimental/langchain_experimental/openai_assistant/base.py index abca7fec95210..6a6a67c978686 100644 --- a/libs/experimental/langchain_experimental/openai_assistant/base.py +++ b/libs/experimental/langchain_experimental/openai_assistant/base.py @@ -12,6 +12,10 @@ if TYPE_CHECKING: import openai + from openai.types.beta.threads import ThreadMessage + from openai.types.beta.threads.required_action_function_tool_call import ( + RequiredActionFunctionToolCall, + ) class OpenAIAssistantFinish(AgentFinish): @@ -41,7 +45,15 @@ def _get_openai_client() -> openai.OpenAI: ) from e -class OpenAIAssistantRunnable(RunnableSerializable[Dict, Any]): +OutputType = Union[ + List[OpenAIAssistantAction], + OpenAIAssistantFinish, + List[ThreadMessage], + List[RequiredActionFunctionToolCall], +] + + +class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]): """Run an OpenAI Assistant.""" client: openai.OpenAI = Field(default_factory=_get_openai_client) @@ -104,8 +116,18 @@ def validate_environment(cls, values: Dict) -> Dict: values["client"] = openai.OpenAI() return values - def invoke(self, input: dict, config: Optional[RunnableConfig] = None) -> Any: - """""" + def invoke( + self, input: dict, config: Optional[RunnableConfig] = None + ) -> OutputType: + """Invoke assistant. + + Args: + input: Runnable input. + config: Runnable config: + + Return: + If self.as_agent, will return + """ input = self._parse_input(input) if "thread_id" not in input: thread = { @@ -196,7 +218,7 @@ def _get_response(self, run_id: str, thread_id: str) -> Any: ) elif run.status == "requires_action": if not self.as_agent: - return run.required_action.submit_tool_outputs + return run.required_action.submit_tool_outputs.tool_calls actions = [] for tool_call in run.required_action.submit_tool_outputs.tool_calls: function = tool_call.function From e5424ce74bdad82b4c2eb5b0b8fdf06761a437bc Mon Sep 17 00:00:00 2001 From: Bagatur Date: Tue, 7 Nov 2023 11:13:13 -0800 Subject: [PATCH 15/22] docstring --- cookbook/openai_v1_cookbook.ipynb | 9 +- .../openai_assistant/base.py | 95 ++++++++++++++++++- 2 files changed, 101 insertions(+), 3 deletions(-) diff --git a/cookbook/openai_v1_cookbook.ipynb b/cookbook/openai_v1_cookbook.ipynb index 1ddc2cdbcc150..5a278a9be91d6 100644 --- a/cookbook/openai_v1_cookbook.ipynb +++ b/cookbook/openai_v1_cookbook.ipynb @@ -95,7 +95,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_experimental.openai_assistant.base import OpenAIAssistantRunnable\n" + "from langchain_experimental.openai_assistant import OpenAIAssistantRunnable\n" ] }, { @@ -279,6 +279,13 @@ "execution_count": 17, "metadata": {}, "output_type": "execute_result" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WebSocket received error while receiving messages: no close frame received or sent\n" + ] } ], "source": [ diff --git a/libs/experimental/langchain_experimental/openai_assistant/base.py b/libs/experimental/langchain_experimental/openai_assistant/base.py index 6a6a67c978686..586a15799f4b6 100644 --- a/libs/experimental/langchain_experimental/openai_assistant/base.py +++ b/libs/experimental/langchain_experimental/openai_assistant/base.py @@ -54,7 +54,83 @@ def _get_openai_client() -> openai.OpenAI: class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]): - """Run an OpenAI Assistant.""" + """Run an OpenAI Assistant. + + Example using OpenAI tools: + .. code-block:: python + + from langchain_experimental.openai_assistant import OpenAIAssistantRunnable + + interpreter_assistant = OpenAIAssistantRunnable.create_assistant( + name="langchain assistant", + instructions="You are a personal math tutor. Write and run code to answer math questions.", + tools=[{"type": "code_interpreter"}], + model="gpt-4-1106-preview" + ) + output = interpreter_assistant.invoke({"content": "What's 10 - 4 raised to the 2.7"}) + + Example using custom tools and AgentExecutor: + .. code-block:: python + + from langchain_experimental.openai_assistant import OpenAIAssistantRunnable + from langchain.agents import AgentExecutor + from langchain.tools import E2BDataAnalysisTool + + + tools = [E2BDataAnalysisTool(api_key="...")] + agent = OpenAIAssistantRunnable.create_assistant( + name="langchain assistant e2b tool", + instructions="You are a personal math tutor. Write and run code to answer math questions.", + tools=tools, + model="gpt-4-1106-preview", + as_agent=True + ) + + agent_executor = AgentExecutor(agent=agent, tools=tools) + agent_executor.invoke({"content": "What's 10 - 4 raised to the 2.7"}) + + + Example using custom tools and custom execution: + .. code-block:: python + + from langchain_experimental.openai_assistant import OpenAIAssistantRunnable + from langchain.agents import AgentExecutor + from langchain.schema.agent import AgentFinish + from langchain.tools import E2BDataAnalysisTool + + + tools = [E2BDataAnalysisTool(api_key="...")] + agent = OpenAIAssistantRunnable.create_assistant( + name="langchain assistant e2b tool", + instructions="You are a personal math tutor. Write and run code to answer math questions.", + tools=tools, + model="gpt-4-1106-preview", + as_agent=True + ) + + def execute_agent(agent, tools, input): + tool_map = {tool.name: tool for tool in tools} + response = agent.invoke(input) + while not isinstance(response, AgentFinish): + tool_outputs = [] + for action in response: + tool_output = tool_map[action.tool].invoke(action.tool_input) + print(action.tool, action.tool_input, tool_output, end="\n\n") + tool_outputs.append({"output": tool_output, "tool_call_id": action.tool_call_id}) + response = agent.invoke( + { + "tool_outputs": tool_outputs, + "run_id": action.run_id, + "thread_id": action.thread_id + } + ) + + return response + + response = execute_agent(agent, tools, {"content": "What's 10 - 4 raised to the 2.7"}) + next_response = execute_agent(agent, tools, {"content": "now add 17.241", "thread_id": response.thread_id}) + + """ # noqa: E501 client: openai.OpenAI = Field(default_factory=_get_openai_client) """OpenAI client.""" @@ -122,11 +198,26 @@ def invoke( """Invoke assistant. Args: - input: Runnable input. + input: Runnable input dict that can have: + content: User message when starting a new run. + thread_id: Existing thread to use. + run_id: Existing run to use. Should only be supplied when providing + the tool output for a required action after an initial invocation. + file_ids: File ids to include in new run. Used for retrieval. + message_metadata: Metadata to associate with new message. + thread_metadata: Metadata to associate with new thread. Only relevant + when new thread being created. + instructions: Additional run instructions. + model: Override Assistant model for this run. + tools: Override Assistant tools for this run. + run_metadata: Metadata to associate with new run. config: Runnable config: Return: If self.as_agent, will return + Union[List[OpenAIAssistantAction], OpenAIAssistantFinish]. Otherwise + will return OpenAI types + Union[List[ThreadMessage], List[RequiredActionFunctionToolCall]]. """ input = self._parse_input(input) if "thread_id" not in input: From b9c1fa8b0f9172cf9871ba8b7334a9979627d174 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Tue, 7 Nov 2023 11:17:02 -0800 Subject: [PATCH 16/22] cr --- .../openai_assistant/base.py | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/libs/experimental/langchain_experimental/openai_assistant/base.py b/libs/experimental/langchain_experimental/openai_assistant/base.py index 586a15799f4b6..a1d59170842f5 100644 --- a/libs/experimental/langchain_experimental/openai_assistant/base.py +++ b/libs/experimental/langchain_experimental/openai_assistant/base.py @@ -19,11 +19,15 @@ class OpenAIAssistantFinish(AgentFinish): + """AgentFinish with run and thread metadata.""" + run_id: str thread_id: str class OpenAIAssistantAction(AgentAction): + """AgentAction with info needed to submit custom tool output to existing run.""" + tool_call_id: str run_id: str thread_id: str @@ -181,17 +185,6 @@ def create_assistant( ) return cls(assistant_id=assistant.id, **kwargs) - @root_validator() - def validate_environment(cls, values: Dict) -> Dict: - if not values["client"]: - try: - import openai - except ImportError as e: - raise ImportError() from e - - values["client"] = openai.OpenAI() - return values - def invoke( self, input: dict, config: Optional[RunnableConfig] = None ) -> OutputType: @@ -300,7 +293,7 @@ def _get_response(self, run_id: str, thread_id: str) -> Any: isinstance(content, openai.types.beta.threads.MessageContentText) for content in answer ): - answer = "".join(content.text.value for content in answer) + answer = "\n".join(content.text.value for content in answer) return OpenAIAssistantFinish( return_values={"output": answer}, log="", From c8c8275344675ae0f0a38a23d12b91fde89c684f Mon Sep 17 00:00:00 2001 From: Bagatur Date: Tue, 7 Nov 2023 11:32:54 -0800 Subject: [PATCH 17/22] nb --- cookbook/openai_v1_cookbook.ipynb | 108 +++++++++--------- .../openai_assistant/base.py | 4 +- 2 files changed, 55 insertions(+), 57 deletions(-) diff --git a/cookbook/openai_v1_cookbook.ipynb b/cookbook/openai_v1_cookbook.ipynb index 5a278a9be91d6..2e474be2fb7bd 100644 --- a/cookbook/openai_v1_cookbook.ipynb +++ b/cookbook/openai_v1_cookbook.ipynb @@ -17,12 +17,12 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install \"openai>=1\"" + "# !pip install \"openai>=1\"" ] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "id": "c3e067ce-7a43-47a7-bc89-41f1de4cf136", "metadata": {}, "outputs": [], @@ -43,17 +43,17 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "id": "1c8c3965-d3c9-4186-b5f3-5e67855ef916", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content='The image appears to be a diagram representing the architecture or components of a software platform named \"LangChain.\" This diagram outlines various layers and elements of the platform, which seems to be related to language processing or computational linguistics, as suggested by the context clues in the names of the components.\\n\\nHere\\'s a breakdown of the components shown:\\n\\n- **LangSmith**: This seems to be a tool or suite related to testing, evaluation, monitoring, feedback, and annotation within the platform.\\n\\n- **LangServe**: This could represent a service layer that exposes the platform\\'s capabilities as REST API endpoints.\\n\\n- **Templates**: These are likely reference applications provided as starting points or examples for users of the platform.\\n\\n- **Chains, agents, agent executors**: This section describes the common application logic, perhaps indicating that the platform uses a chain of agents or processes to execute tasks.\\n\\n- **Model I/O**: This includes the components related to input/output processing for a model, like prompt, example selector, model, and output parser.\\n\\n- **Retrieval**: These components are involved in retrieving documents, splitting text, and managing embeddings and vector stores, which are important for tasks like search and information retrieval.\\n\\n- **Agent tooling**: This might refer to the tools used for creating,')" + "AIMessage(content='The image appears to be a diagram of a software architecture or framework related to language processing, named \"LangChain.\" This architecture seems to be designed to handle various aspects of natural language understanding or processing tasks. Here are the components as labeled in the diagram:\\n\\n1. **LangSmith**: This component is related to testing, evaluation, monitoring, feedback, and annotation. It also has a debugging aspect to it.\\n2. **LangServe**: This is a service that can chain as REST API, indicating it might serve language processing capabilities over a network.\\n3. **Templates**: Reference applications are mentioned here, which might be used as starting points or examples for building new applications.\\n4. **Chains, agents, agent executors**: This part of the system handles common application logic, potentially acting as the middleware or the operational logic of the framework.\\n5. **Model I/O**: Input/output management for the model, including prompts, example selectors, the models themselves, and output parsers.\\n6. **Retrieval**: Components related to retrieving documents, including a document loader, text splitter, embedding model, vector store, and retriever. These are likely used for finding and processing relevant information from a large corpus of text.\\n7. **Agent tooling**: This might refer')" ] }, - "execution_count": 2, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } @@ -85,12 +85,25 @@ "source": [ "## [OpenAI assistants](https://platform.openai.com/docs/assistants/overview)\n", "\n", - "> The Assistants API allows you to build AI assistants within your own applications. An Assistant has instructions and can leverage models, tools, and knowledge to respond to user queries. The Assistants API currently supports three types of tools: Code Interpreter, Retrieval, and Function calling" + "> The Assistants API allows you to build AI assistants within your own applications. An Assistant has instructions and can leverage models, tools, and knowledge to respond to user queries. The Assistants API currently supports three types of tools: Code Interpreter, Retrieval, and Function calling\n", + "\n", + "\n", + "You can interact with OpenAI Assistants using OpenAI tools or custom tools. When using exclusively OpenAI tools, you can just invoke the assistant directly and get final answers. When using custom tools, you can run the assistant and tool execution loop using the built-in AgentExecutor or easily write your own executor.\n", + "\n", + "Below we show the different ways to interact with Assistants. As a simple example, let's build a math tutor that can write and run code." + ] + }, + { + "cell_type": "markdown", + "id": "318da28d-4cec-42ab-ae3e-76d95bb34fa5", + "metadata": {}, + "source": [ + "### Using only OpenAI tools" ] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 4, "id": "a9064bbe-d9f7-4a29-a7b3-73933b3197e7", "metadata": {}, "outputs": [], @@ -100,17 +113,17 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 5, "id": "7a20a008-49ac-46d2-aa26-b270118af5ea", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[ThreadMessage(id='msg_bG50eUX5DhIKSTLKV7EgaHMM', assistant_id='asst_p9qkGkw1FfYEd90HLvPalaox', content=[MessageContentText(text=Text(annotations=[], value='The value of \\\\( 10 - 4^{2.7} \\\\) is approximately -32.224.'), type='text')], created_at=1699379844, file_ids=[], metadata={}, object='thread.message', role='assistant', run_id='run_2cc89e4VYnxBOhPQsR5pgcPE', thread_id='thread_ugmxDTcETSsM52k4NBsRB6tR')]" + "[ThreadMessage(id='msg_RGOsJ2RBYp79rILrZ0NsAX68', assistant_id='asst_9Xb1ZgefoAbp2V5ddRlDGisy', content=[MessageContentText(text=Text(annotations=[], value='\\\\( 10 - 4^{2.7} \\\\) is approximately -32.2243.'), type='text')], created_at=1699385426, file_ids=[], metadata={}, object='thread.message', role='assistant', run_id='run_E2PRoP04ryly4p5ds5ek2qxs', thread_id='thread_pu9CpsYIWWZtxemZxpbYfhm4')]" ] }, - "execution_count": 2, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -131,12 +144,14 @@ "id": "a8ddd181-ac63-4ab6-a40d-a236120379c1", "metadata": {}, "source": [ - "### As a LangChain agent with arbitrary tools" + "### As a LangChain agent with arbitrary tools\n", + "\n", + "Now let's recreate this functionality using our own tools. For this example we'll use the [E2B sandbox runtime tool](https://e2b.dev/docs?ref=landing-page-get-started)." ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 7, "id": "48681ac7-b267-48d4-972c-8a7df8393a21", "metadata": {}, "outputs": [], @@ -148,7 +163,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 8, "id": "1c01dd79-dd3e-4509-a2e2-009a7f99f16a", "metadata": {}, "outputs": [], @@ -167,12 +182,12 @@ "id": "1ac71d8b-4b4b-4f98-b826-6b3c57a34166", "metadata": {}, "source": [ - "#### Using AgentExecutor\n" + "#### Using AgentExecutor" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 9, "id": "1f137f94-801f-4766-9ff5-2de9df5e8079", "metadata": {}, "outputs": [ @@ -180,10 +195,10 @@ "data": { "text/plain": [ "{'content': \"What's 10 - 4 raised to the 2.7\",\n", - " 'output': 'The result of \\\\( 10 - 4^{2.7} \\\\) is approximately \\\\(-32.224\\\\).'}" + " 'output': 'The result of \\\\(10 - 4\\\\) raised to the power of \\\\(2.7\\\\) is approximately \\\\(126.19\\\\).'}" ] }, - "execution_count": 7, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } @@ -205,7 +220,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 19, "id": "c0475fa7-b6c1-4331-b8e2-55407466c724", "metadata": {}, "outputs": [], @@ -221,7 +236,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 22, "id": "b76cb669-6aba-4827-868f-00aa960026f2", "metadata": {}, "outputs": [], @@ -244,53 +259,44 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 23, "id": "7946116a-b82f-492e-835e-ca958a8949a5", "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "OpenAIAssistantFinish(return_values={'output': 'The result of \\\\( 10 - 4^{2.7} \\\\) is approximately -32.2243.'}, log='', run_id='run_0HGfB3E7h5At9ZQTqlDcVDxX', thread_id='thread_yCuCltQgfxXKZGSI6gkylv4v')" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "e2b_data_analysis {'python_code': 'print((10 - 4) ** 2.7)'} {\"stdout\": \"126.18518711065899\", \"stderr\": \"\", \"artifacts\": []}\n", + "\n", + "\\( 10 - 4 \\) raised to the power of 2.7 is approximately 126.185.\n" + ] } ], "source": [ "response = execute_agent(agent, tools, {\"content\": \"What's 10 - 4 raised to the 2.7\"})\n", - "response" + "print(response.return_values[\"output\"])" ] }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 24, "id": "f2744a56-9f4f-4899-827a-fa55821c318c", "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "OpenAIAssistantFinish(return_values={'output': 'After adding 17.241 to \\\\(10 - 4^{2.7}\\\\), the result is approximately -14.9833.'}, log='', run_id='run_cJrAHdn0uuEotVr9cDXUJHjH', thread_id='thread_yCuCltQgfxXKZGSI6gkylv4v')" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - }, - { - "name": "stderr", + "name": "stdout", "output_type": "stream", "text": [ - "WebSocket received error while receiving messages: no close frame received or sent\n" + "e2b_data_analysis {'python_code': 'result = (10 - 4) ** 2.7 + 17.241\\nprint(result)'} {\"stdout\": \"143.426187110659\", \"stderr\": \"\", \"artifacts\": []}\n", + "\n", + "\\( (10 - 4)^{2.7} + 17.241 \\) is approximately 143.426.\n" ] } ], "source": [ "next_response = execute_agent(agent, tools, {\"content\": \"now add 17.241\", \"thread_id\": response.thread_id})\n", - "next_response" + "print(next_response.return_values[\"output\"])" ] }, { @@ -307,7 +313,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 16, "id": "db6072c4-f3f3-415d-872b-71ea9f3c02bb", "metadata": {}, "outputs": [ @@ -350,7 +356,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 17, "id": "08e00ccf-b991-4249-846b-9500a0ccbfa0", "metadata": {}, "outputs": [ @@ -361,7 +367,7 @@ " {'name': 'Deepmind', 'origin': 'UK'}]}" ] }, - "execution_count": 9, + "execution_count": 17, "metadata": {}, "output_type": "execute_result" } @@ -384,7 +390,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 18, "id": "1281883c-bf8f-4665-89cd-4f33ccde69ab", "metadata": {}, "outputs": [ @@ -412,14 +418,6 @@ ")\n", "print(output.llm_output)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5c637ba1-322d-4fc9-b97e-3afa83dc4d72", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/libs/experimental/langchain_experimental/openai_assistant/base.py b/libs/experimental/langchain_experimental/openai_assistant/base.py index a1d59170842f5..45de5e946308e 100644 --- a/libs/experimental/langchain_experimental/openai_assistant/base.py +++ b/libs/experimental/langchain_experimental/openai_assistant/base.py @@ -52,8 +52,8 @@ def _get_openai_client() -> openai.OpenAI: OutputType = Union[ List[OpenAIAssistantAction], OpenAIAssistantFinish, - List[ThreadMessage], - List[RequiredActionFunctionToolCall], + List["ThreadMessage"], + List["RequiredActionFunctionToolCall"], ] From bbdd211a7bb6e7010b00935d71d835381222e2e7 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Tue, 7 Nov 2023 11:33:14 -0800 Subject: [PATCH 18/22] nit --- cookbook/openai_v1_cookbook.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cookbook/openai_v1_cookbook.ipynb b/cookbook/openai_v1_cookbook.ipynb index 2e474be2fb7bd..accb815660063 100644 --- a/cookbook/openai_v1_cookbook.ipynb +++ b/cookbook/openai_v1_cookbook.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "# !pip install \"openai>=1\"" + "!pip install \"openai>=1\" \"langchain>=0.0.331rc3\"" ] }, { From 57c95cb95edf9871e7a3c45a0284c19798161d5a Mon Sep 17 00:00:00 2001 From: Bagatur Date: Tue, 7 Nov 2023 11:37:33 -0800 Subject: [PATCH 19/22] nit --- cookbook/openai_v1_cookbook.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cookbook/openai_v1_cookbook.ipynb b/cookbook/openai_v1_cookbook.ipynb index accb815660063..d97cac515704a 100644 --- a/cookbook/openai_v1_cookbook.ipynb +++ b/cookbook/openai_v1_cookbook.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install \"openai>=1\" \"langchain>=0.0.331rc3\"" + "!pip install \"openai>=1\" \"langchain>=0.0.331rc3\" langchain-experimental" ] }, { From ce853ed928af5d0faba0507564297f67b1a81742 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Tue, 7 Nov 2023 11:37:52 -0800 Subject: [PATCH 20/22] nit --- cookbook/openai_v1_cookbook.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cookbook/openai_v1_cookbook.ipynb b/cookbook/openai_v1_cookbook.ipynb index d97cac515704a..d3fe697c147e8 100644 --- a/cookbook/openai_v1_cookbook.ipynb +++ b/cookbook/openai_v1_cookbook.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install \"openai>=1\" \"langchain>=0.0.331rc3\" langchain-experimental" + "!pip install -U openai \"langchain>=0.0.331rc3\" langchain-experimental" ] }, { From 95b42f67f745e4e5052b9984f610692aa2c41d7d Mon Sep 17 00:00:00 2001 From: Bagatur Date: Tue, 7 Nov 2023 11:40:22 -0800 Subject: [PATCH 21/22] lint --- .../langchain_experimental/openai_assistant/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/experimental/langchain_experimental/openai_assistant/base.py b/libs/experimental/langchain_experimental/openai_assistant/base.py index 45de5e946308e..4393071603d4f 100644 --- a/libs/experimental/langchain_experimental/openai_assistant/base.py +++ b/libs/experimental/langchain_experimental/openai_assistant/base.py @@ -4,7 +4,7 @@ from time import sleep from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union -from langchain.pydantic_v1 import Field, root_validator +from langchain.pydantic_v1 import Field from langchain.schema.agent import AgentAction, AgentFinish from langchain.schema.runnable import RunnableConfig, RunnableSerializable from langchain.tools import format_tool_to_openai_function From b345be405e6e8c7c61231d39082a869b827ab5f2 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Tue, 7 Nov 2023 11:44:15 -0800 Subject: [PATCH 22/22] nit --- cookbook/openai_v1_cookbook.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cookbook/openai_v1_cookbook.ipynb b/cookbook/openai_v1_cookbook.ipynb index d3fe697c147e8..8eb268d4358e7 100644 --- a/cookbook/openai_v1_cookbook.ipynb +++ b/cookbook/openai_v1_cookbook.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install -U openai \"langchain>=0.0.331rc3\" langchain-experimental" + "!pip install -U openai \"langchain>=0.0.331rc1\" langchain-experimental" ] }, {