diff --git a/gptme/chat.py b/gptme/chat.py index 74cffef7..71a84525 100644 --- a/gptme/chat.py +++ b/gptme/chat.py @@ -19,8 +19,10 @@ from .message import Message from .models import get_model from .tools import ToolUse, execute_msg, has_tool +from .tools.base import ConfirmFunc from .tools.browser import read_url from .util import ( + ask_execute, console, path_with_tilde, print_bell, @@ -89,6 +91,11 @@ def chat( manager.log.print(show_hidden=show_hidden) console.print("--- ^^^ past messages ^^^ ---") + def confirm_func(msg) -> bool: + if no_confirm: + return True + return ask_execute(msg) + # main loop while True: # if prompt_msgs given, process each prompt fully before moving to the next @@ -99,16 +106,14 @@ def chat( msg = _include_paths(msg) manager.append(msg) # if prompt is a user-command, execute it - if execute_cmd(msg, manager): + if execute_cmd(msg, manager, confirm_func): continue # Generate and execute response for this prompt while True: set_interruptible() try: - response_msgs = list( - step(manager.log, no_confirm, stream=stream) - ) + response_msgs = list(step(manager.log, stream, confirm_func)) except KeyboardInterrupt: console.log("Interrupted. Stopping current execution.") manager.append(Message("system", "Interrupted")) @@ -120,7 +125,7 @@ def chat( manager.append(response_msg) # run any user-commands, if msg is from user if response_msg.role == "user" and execute_cmd( - response_msg, manager + response_msg, manager, confirm_func ): break @@ -153,17 +158,17 @@ def chat( # ask for input if no prompt, generate reply, and run tools clear_interruptible() # Ensure we're not interruptible during user input - for msg in step(manager.log, no_confirm, stream=stream): # pragma: no cover + for msg in step(manager.log, stream, confirm_func): # pragma: no cover manager.append(msg) # run any user-commands, if msg is from user - if msg.role == "user" and execute_cmd(msg, manager): + if msg.role == "user" and execute_cmd(msg, manager, confirm_func): break def step( log: Log | list[Message], - no_confirm: bool, - stream: bool = True, + stream: bool, + confirm: ConfirmFunc, ) -> Generator[Message, None, None]: """Runs a single pass of the chat.""" if isinstance(log, list): @@ -200,7 +205,7 @@ def step( # log response and run tools if msg_response: yield msg_response.replace(quiet=True) - yield from execute_msg(msg_response, ask=not no_confirm) + yield from execute_msg(msg_response, confirm=confirm) except KeyboardInterrupt: clear_interruptible() yield Message("system", "Interrupted") diff --git a/gptme/commands.py b/gptme/commands.py index b25f3df1..a691cbc3 100644 --- a/gptme/commands.py +++ b/gptme/commands.py @@ -16,8 +16,8 @@ ) from .models import get_model from .tools import ToolUse, execute_msg, loaded_tools +from .tools.base import ConfirmFunc from .useredit import edit_text_with_editor -from .util import ask_execute logger = logging.getLogger(__name__) @@ -54,21 +54,23 @@ COMMANDS = list(action_descriptions.keys()) -def execute_cmd(msg: Message, log: LogManager) -> bool: +def execute_cmd(msg: Message, log: LogManager, confirm: ConfirmFunc) -> bool: """Executes any user-command, returns True if command was executed.""" assert msg.role == "user" # if message starts with ., treat as command # when command has been run, if msg.content[:1] in ["/"]: - for resp in handle_cmd(msg.content, log, no_confirm=True): + for resp in handle_cmd(msg.content, log, confirm): log.append(resp) return True return False def handle_cmd( - cmd: str, manager: LogManager, no_confirm: bool + cmd: str, + manager: LogManager, + confirm: ConfirmFunc, ) -> Generator[Message, None, None]: """Handles a command.""" cmd = cmd.lstrip("/") @@ -85,7 +87,7 @@ def handle_cmd( # rename the conversation print("Renaming conversation (enter empty name to auto-generate)") new_name = args[0] if args else input("New name: ") - rename(manager, new_name, ask=not no_confirm) + rename(manager, new_name, confirm) case "fork": # fork the conversation new_name = args[0] if args else input("New name: ") @@ -116,13 +118,13 @@ def handle_cmd( print("Replaying conversation...") for msg in manager.log: if msg.role == "assistant": - for reply_msg in execute_msg(msg, ask=True): + for reply_msg in execute_msg(msg, confirm): print_msg(reply_msg, oneline=False) case "impersonate": content = full_args if full_args else input("[impersonate] Assistant: ") msg = Message("assistant", content) yield msg - yield from execute_msg(msg, ask=not no_confirm) + yield from execute_msg(msg, confirm) case "tokens": manager.undo(1, quiet=True) n_tokens = len_tokens(manager.log.messages) @@ -146,7 +148,7 @@ def handle_cmd( # the case for python, shell, and other block_types supported by tools tooluse = ToolUse(name, [], full_args) if tooluse.is_runnable: - yield from tooluse.execute(ask=not no_confirm) + yield from tooluse.execute(confirm) else: if manager.log[-1].content.strip() == "/help": # undo the '/help' command itself @@ -176,16 +178,14 @@ def edit(manager: LogManager) -> Generator[Message, None, None]: # pragma: no c print("Applied edited messages, write /log to see the result") -def rename(manager: LogManager, new_name: str, ask: bool = True): +def rename(manager: LogManager, new_name: str, confirm: ConfirmFunc) -> None: if new_name in ["", "auto"]: new_name = llm.generate_name(prepare_messages(manager.log.messages)) assert " " not in new_name print(f"Generated name: {new_name}") - if ask: - confirm = ask_execute("Confirm?") - if not confirm: - print("Aborting") - return + if not confirm("Confirm?"): + print("Aborting") + return manager.rename(new_name, keep_date=True) else: manager.rename(new_name, keep_date=False) diff --git a/gptme/message.py b/gptme/message.py index 1246d9fb..6e771f7c 100644 --- a/gptme/message.py +++ b/gptme/message.py @@ -180,6 +180,9 @@ def to_xml(self) -> str: def format(self, oneline: bool = False, highlight: bool = False) -> str: return format_msgs([self], oneline=oneline, highlight=highlight)[0] + def print(self, oneline: bool = False, highlight: bool = True) -> None: + print_msg(self, oneline=oneline, highlight=highlight) + def to_toml(self) -> str: """Converts a message to a TOML string, for easy editing by hand in editor to then be parsed back.""" flags = [] diff --git a/gptme/server/api.py b/gptme/server/api.py index e0c6450c..fda5538e 100644 --- a/gptme/server/api.py +++ b/gptme/server/api.py @@ -83,6 +83,11 @@ def api_conversation_post(logfile: str): return {"status": "ok"} +# TODO: add support for confirmation +def confirm_func(msg: str) -> bool: + return True + + # generate response @api.route("/api/conversations//generate", methods=["POST"]) def api_conversation_generate(logfile: str): @@ -100,7 +105,7 @@ def api_conversation_generate(logfile: str): f = io.StringIO() print("Begin capturing stdout, to pass along command output.") with redirect_stdout(f): - resp = execute_cmd(manager.log[-1], manager) + resp = execute_cmd(manager.log[-1], manager, confirm_func) print("Done capturing stdout.") if resp: manager.write() @@ -121,7 +126,7 @@ def api_conversation_generate(logfile: str): resp_msgs = [] manager.append(msg) resp_msgs.append(msg) - for reply_msg in execute_msg(msg, ask=False): + for reply_msg in execute_msg(msg, confirm_func): manager.append(reply_msg) resp_msgs.append(reply_msg) diff --git a/gptme/tools/__init__.py b/gptme/tools/__init__.py index 81063662..8912ed6d 100644 --- a/gptme/tools/__init__.py +++ b/gptme/tools/__init__.py @@ -3,7 +3,7 @@ from functools import lru_cache from ..message import Message -from .base import ToolSpec, ToolUse +from .base import ConfirmFunc, ToolSpec, ToolUse from .browser import tool as browser_tool from .chats import tool as chats_tool from .gh import tool as gh_tool @@ -12,12 +12,12 @@ from .python import tool as python_tool from .read import tool as tool_read from .save import tool_append, tool_save +from .screenshot import tool as screenshot_tool from .shell import tool as shell_tool from .subagent import tool as subagent_tool from .tmux import tool as tmux_tool from .vision import tool as vision_tool from .youtube import tool as youtube_tool -from .screenshot import tool as screenshot_tool logger = logging.getLogger(__name__) @@ -82,12 +82,12 @@ def load_tool(tool: ToolSpec) -> None: loaded_tools.append(tool) -def execute_msg(msg: Message, ask: bool) -> Generator[Message, None, None]: +def execute_msg(msg: Message, confirm: ConfirmFunc) -> Generator[Message, None, None]: """Uses any tools called in a message and returns the response.""" assert msg.role == "assistant", "Only assistant messages can be executed" for tooluse in ToolUse.iter_from_content(msg.content): - yield from tooluse.execute(ask) + yield from tooluse.execute(confirm) # Called often when checking streaming output for executable blocks, diff --git a/gptme/tools/base.py b/gptme/tools/base.py index d5caa060..961a7dc2 100644 --- a/gptme/tools/base.py +++ b/gptme/tools/base.py @@ -20,9 +20,18 @@ exclusive_mode = False +class ConfirmFunc(Protocol): + def __call__(self, msg: str) -> bool: ... + + +def ask_confirm(msg: str) -> bool: + """Asks the user for confirmation.""" + return input(f"{msg} [y/n] ").lower().startswith("y") + + class ExecuteFunc(Protocol): def __call__( - self, code: str, ask: bool, args: list[str] + self, code: str, args: list[str], confirm: ConfirmFunc ) -> Generator[Message, None, None]: ... @@ -88,7 +97,7 @@ class ToolUse: content: str start: int | None = None - def execute(self, ask: bool) -> Generator[Message, None, None]: + def execute(self, confirm: ConfirmFunc) -> Generator[Message, None, None]: """Executes a tool-use tag and returns the output.""" # noreorder from . import get_tool # fmt: skip @@ -96,7 +105,7 @@ def execute(self, ask: bool) -> Generator[Message, None, None]: tool = get_tool(self.tool) if tool and tool.execute: try: - yield from tool.execute(self.content, ask, self.args) + yield from tool.execute(self.content, self.args, confirm) except Exception as e: # if we are testing, raise the exception if "pytest" in globals(): diff --git a/gptme/tools/patch.py b/gptme/tools/patch.py index 09cfc2ee..0b0a5c69 100644 --- a/gptme/tools/patch.py +++ b/gptme/tools/patch.py @@ -9,8 +9,8 @@ from pathlib import Path from ..message import Message -from ..util import ask_execute, print_preview -from .base import ToolSpec, ToolUse +from ..util import print_preview +from .base import ConfirmFunc, ToolSpec, ToolUse instructions = f""" To patch/modify files, we use an adapted version of git conflict markers. @@ -153,7 +153,9 @@ def apply(codeblock: str, content: str) -> str: def execute_patch( - code: str, ask: bool, args: list[str] + code: str, + args: list[str], + confirm: ConfirmFunc, ) -> Generator[Message, None, None]: """ Applies the patch. @@ -175,13 +177,13 @@ def execute_patch( yield Message("system", f"Patch failed: {e.args[0]}") return + # TODO: display minimal patches + # TODO: include patch headers to delimit multiple patches print_preview(patches_str, lang="diff") - if ask: - # TODO: display minimal patches - confirm = ask_execute(f"Apply patch to {fn}?") - if not confirm: - print("Patch not applied") - return + + if not confirm(f"Apply patch to {fn}?"): + print("Patch not applied") + return try: with open(path) as f: diff --git a/gptme/tools/python.py b/gptme/tools/python.py index 80a1e863..36fadcbf 100644 --- a/gptme/tools/python.py +++ b/gptme/tools/python.py @@ -19,8 +19,8 @@ ) from ..message import Message -from ..util import ask_execute, print_preview -from .base import ToolSpec, ToolUse +from ..util import print_preview +from .base import ConfirmFunc, ToolSpec, ToolUse if TYPE_CHECKING: from IPython.terminal.embed import InteractiveShellEmbed # fmt: skip @@ -93,19 +93,16 @@ def _get_ipython(): return _ipython -def execute_python(code: str, ask: bool, args=None) -> Generator[Message, None, None]: +def execute_python( + code: str, args: list[str], confirm: ConfirmFunc = lambda _: True +) -> Generator[Message, None, None]: """Executes a python codeblock and returns the output.""" code = code.strip() - if ask: - print_preview(code, "python") - confirm = ask_execute() - print() - if not confirm: - # early return - yield Message("system", "Aborted, user chose not to run command.") - return - else: - print("Skipping confirmation") + print_preview(code, "python") + if not confirm(f"{code}\n\nExecute this code?"): + # early return + yield Message("system", "Aborted, user chose not to run command.") + return # Create an IPython instance if it doesn't exist yet _ipython = _get_ipython() diff --git a/gptme/tools/save.py b/gptme/tools/save.py index 192a4896..3c967fc8 100644 --- a/gptme/tools/save.py +++ b/gptme/tools/save.py @@ -7,7 +7,7 @@ from ..message import Message from ..util import ask_execute, print_preview -from .base import ToolSpec, ToolUse +from .base import ConfirmFunc, ToolSpec, ToolUse from .patch import Patch # FIXME: this is markdown-specific instructions, thus will confuse the XML mode @@ -38,7 +38,9 @@ def execute_save( - code: str, ask: bool, args: list[str] + code: str, + args: list[str], + confirm: ConfirmFunc, ) -> Generator[Message, None, None]: """Save code to a file.""" fn = " ".join(args) @@ -53,20 +55,13 @@ def execute_save( # TODO: add check that it doesn't try to write a file with placeholders! - if ask: - if Path(fn).exists(): - current = Path(fn).read_text() - p = Patch(current, code) - # TODO: if inefficient save, replace request with patch (and vice versa), or even append - print_preview(p.diff_minimal(), "diff") + if Path(fn).exists(): + current = Path(fn).read_text() + p = Patch(current, code) + # TODO: if inefficient save, replace request with patch (and vice versa), or even append + print_preview(p.diff_minimal(), "diff") - confirm = ask_execute(f"Save to {fn}?") - print() - else: - confirm = True - print("Skipping confirmation.") - - if ask and not confirm: + if not confirm(f"Save to {fn}?"): # early return yield Message("system", "Save cancelled.") return @@ -75,31 +70,18 @@ def execute_save( # if the file exists, ask to overwrite if path.exists(): - if ask: - overwrite = ask_execute("File exists, overwrite?") - print() - else: - overwrite = True - print("Skipping overwrite confirmation.") - if not overwrite: + if not confirm("File exists, overwrite?"): # early return yield Message("system", "Save cancelled.") return # if the folder doesn't exist, ask to create it if not path.parent.exists(): - if ask: - create = ask_execute("Folder doesn't exist, create it?") - print() - else: - create = True - print("Skipping folder creation confirmation.") - if create: - path.parent.mkdir(parents=True) - else: + if not ask_execute("Folder doesn't exist, create it?"): # early return yield Message("system", "Save cancelled.") return + path.parent.mkdir(parents=True) print("Saving to " + fn) with open(path, "w") as f: @@ -108,7 +90,7 @@ def execute_save( def execute_append( - code: str, ask: bool, args: list[str] + code: str, args: list[str], confirm: ConfirmFunc ) -> Generator[Message, None, None]: """Append code to a file.""" fn = " ".join(args) @@ -119,14 +101,7 @@ def execute_append( if not code.endswith("\n"): code += "\n" - if ask: - confirm = ask_execute(f"Append to {fn}?") - print() - else: - confirm = True - print("Skipping append confirmation.") - - if ask and not confirm: + if not confirm(f"Append to {fn}?"): # early return yield Message("system", "Append cancelled.") return diff --git a/gptme/tools/shell.py b/gptme/tools/shell.py index ebec6c68..6525ac10 100644 --- a/gptme/tools/shell.py +++ b/gptme/tools/shell.py @@ -16,8 +16,8 @@ import bashlex from ..message import Message -from ..util import ask_execute, get_tokenizer, print_preview -from .base import ToolSpec, ToolUse +from ..util import get_tokenizer, print_preview +from .base import ConfirmFunc, ToolSpec, ToolUse logger = logging.getLogger(__name__) @@ -238,7 +238,7 @@ def set_shell(shell: ShellSession) -> None: def execute_shell( - code: str, ask: bool, args: list[str] + code: str, args: list[str], confirm: ConfirmFunc ) -> Generator[Message, None, None]: """Executes a shell command and returns the output.""" shell = get_shell() @@ -248,32 +248,30 @@ def execute_shell( if cmd.startswith("$ "): cmd = cmd[len("$ ") :] - confirm = True - if ask: - print_preview(f"$ {cmd}", "bash") - confirm = ask_execute() - print() - - if not ask or confirm: - try: - returncode, stdout, stderr = shell.run(cmd) - except Exception as e: - yield Message("system", f"Error: {e}") - return - stdout = _shorten_stdout(stdout.strip(), pre_tokens=2000, post_tokens=8000) - stderr = _shorten_stdout(stderr.strip(), pre_tokens=2000, post_tokens=2000) - - msg = _format_block_smart("Ran command", cmd, lang="bash") + "\n\n" - if stdout: - msg += _format_block_smart("", stdout, "stdout") + "\n\n" - if stderr: - msg += _format_block_smart("", stderr, "stderr") + "\n\n" - if not stdout and not stderr: - msg += "No output\n" - if returncode: - msg += f"Return code: {returncode}" - - yield Message("system", msg) + print_preview(f"$ {cmd}", "bash") + if not confirm(f"Run command `{cmd}`?"): + yield Message("system", "Command not run") + return + + try: + returncode, stdout, stderr = shell.run(cmd) + except Exception as e: + yield Message("system", f"Error: {e}") + return + stdout = _shorten_stdout(stdout.strip(), pre_tokens=2000, post_tokens=8000) + stderr = _shorten_stdout(stderr.strip(), pre_tokens=2000, post_tokens=2000) + + msg = _format_block_smart("Ran command", cmd, lang="bash") + "\n\n" + if stdout: + msg += _format_block_smart("", stdout, "stdout") + "\n\n" + if stderr: + msg += _format_block_smart("", stderr, "stderr") + "\n\n" + if not stdout and not stderr: + msg += "No output\n" + if returncode: + msg += f"Return code: {returncode}" + + yield Message("system", msg) def _format_block_smart(header: str, cmd: str, lang="") -> str: diff --git a/gptme/tools/tmux.py b/gptme/tools/tmux.py index 33a0096a..15749eb7 100644 --- a/gptme/tools/tmux.py +++ b/gptme/tools/tmux.py @@ -13,8 +13,8 @@ from time import sleep from ..message import Message -from ..util import ask_execute, print_preview -from .base import ToolSpec, ToolUse +from ..util import print_preview +from .base import ConfirmFunc, ToolSpec, ToolUse logger = logging.getLogger(__name__) @@ -149,19 +149,18 @@ def list_sessions() -> Message: def execute_tmux( - code: str, ask: bool, args: list[str] + code: str, + args: list[str], + confirm: ConfirmFunc, ) -> Generator[Message, None, None]: """Executes a command in tmux and returns the output.""" assert not args cmd = code.strip() - if ask: - print_preview(f"Command: {cmd}", "bash") - confirm = ask_execute() - print() - if not confirm: - yield Message("system", "Command execution cancelled.") - return + print_preview(f"Command: {cmd}", "bash") + if not confirm(f"Execute command: {cmd}?"): + yield Message("system", "Command execution cancelled.") + return parts = cmd.split(maxsplit=1) command = parts[0] diff --git a/scripts/treeofthoughts.py b/scripts/treeofthoughts.py index 74b6d367..dfcb685e 100644 --- a/scripts/treeofthoughts.py +++ b/scripts/treeofthoughts.py @@ -4,8 +4,14 @@ The idea is to evaluate if we are on the right track by checking if the current branch is "good"/making progress, and otherwise backtracking to the last good branch and trying a different prompt/approach. The goal is to have a more autonomous agent which can self-supervise and make several branching attempts to find the right path to the solution. + +TODO: +- [ ] add a "feedback" action which lets the critic give feedback as a user to the agent +- [ ] fix so that we undo to a meaningful point +- [ ] ask the agent wether changes are good before applying (tricky) """ +import logging import subprocess import sys from typing import Literal @@ -17,6 +23,12 @@ from gptme.prompts import get_prompt from lxml import etree +# Set up logging +logging.basicConfig( + level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" +) + + EvalAction = Literal["continue", "undo", "done"] @@ -26,17 +38,18 @@ def project_files() -> list[str]: return p.stdout.splitlines() -def changed_files() -> list[str]: - # Returns a list of changed files in the project - p = subprocess.run( - ["git", "diff", "--name-only", "HEAD"], capture_output=True, text=True - ) - return p.stdout.splitlines() - - -def unstaged_files() -> list[str]: - # Returns a list of unstaged files in the project - p = subprocess.run(["git", "diff", "--name-only"], capture_output=True, text=True) +def git_diff_files(diff_type: str = "HEAD") -> list[str]: + # Returns a list of files based on the git diff type + try: + p = subprocess.run( + ["git", "diff", "--name-only", diff_type], + capture_output=True, + text=True, + check=True, + ) + except subprocess.CalledProcessError as e: + print(f"Error getting git diff files: {e}") + return [] return p.stdout.splitlines() @@ -54,9 +67,14 @@ def context_from_files(files: list[str]) -> str: return context +def llm_confirm(msg: str) -> bool: + # TODO: asks a LLM if we should confirm + return True + + def step(log: Log) -> Log: # Steps the conversation forward - for msg in _step(log, no_confirm=True): + for msg in _step(log, stream=True, confirm=llm_confirm): log = log.append(msg) return log @@ -94,11 +112,12 @@ def recommendation(log: Log) -> EvalAction: def lint_format(log: Log) -> Log: # Lint, format, and fix the conversation by calling "make format" - p = subprocess.run(["make", "format"], capture_output=True, text=True) + cmd = "pre-commit run --files $(git ls-files -m)" + p = subprocess.run(cmd, shell=True, capture_output=True, text=True) if p.returncode == 0: return log - changed_files = [f for f in unstaged_files() if f in p.stdout or f in p.stderr] + changed_files = [f for f in git_diff_files() if f in p.stdout or f in p.stderr] files_str = f"""Files: {context_from_files(changed_files)} """ @@ -106,7 +125,7 @@ def lint_format(log: Log) -> Log: system_msg = Message( "system", f""" -Linting and formatting the code with "make format"... +Running checks with `{cmd}` stdout: {p.stdout} @@ -117,28 +136,7 @@ def lint_format(log: Log) -> Log: {files_str} """.strip(), ) - log = log.append(system_msg) - return log - - -def typecheck(log: Log) -> Log: - # Typecheck the code by calling "make typecheck" - p = subprocess.run(["make", "typecheck"], capture_output=True, text=True) - if p.returncode == 0: - return log - - system_msg = Message( - "system", - f""" -Typechecking the code with "make typecheck"... - -stdout: -{p.stdout} - -stderr: -{p.stderr} -""", - ) + system_msg.print() log = log.append(system_msg) return log @@ -148,7 +146,7 @@ def typecheck(log: Log) -> Log: def gather_context() -> Message: # Dynamically gather context from changed files - files = changed_files() + files = git_diff_files() return Message("system", context_header + context_from_files(files)) @@ -161,7 +159,7 @@ def update_context(log: Log) -> Log: def main(): print("Initializing the autonomous agent...") init( - model="openai/gpt-4o", + model="anthropic", interactive=False, tool_allowlist=["python", "shell", "save", "patch"], ) @@ -173,10 +171,14 @@ def main(): log = Log(initial_msgs + prompts) # Main loop for autonomous operation - while True: + max_iterations = 50 + iteration = 0 + progress = 0 + while iteration < max_iterations: + iteration += 1 # Gather and update context log = update_context(log) - print("Context updated.") + print(f"Context updated. Iteration: {iteration}/{max_iterations}") # Step the conversation forward log = step(log) @@ -191,7 +193,6 @@ def main(): ): print("Changes detected, performing lint and typecheck.") log = lint_format(log) - log = typecheck(log) # Get recommendation for next action action = recommendation(log) @@ -199,18 +200,21 @@ def main(): # Execute the recommended action if action == "continue": + progress += 1 + print(f"Progress: {progress}") continue elif action == "undo": log = log.pop() - print("Undoing last step.") + progress = max(0, progress - 1) + print(f"Undoing last step. Progress: {progress}") elif action == "done": - print("Task completed successfully.") + print(f"Task completed successfully. Total iterations: {iteration}") break else: print(f"Unexpected action: {action}") break - print("Exiting") + print(f"Exiting. Final progress: {progress}/{iteration}") if __name__ == "__main__": diff --git a/tests/test_tools_python.py b/tests/test_tools_python.py index 787ec5c1..57a58d03 100644 --- a/tests/test_tools_python.py +++ b/tests/test_tools_python.py @@ -4,7 +4,7 @@ def run(code): - return next(execute_python(code, ask=False)).content + return next(execute_python(code, [])).content def test_execute_python():