diff --git a/autogpts/autogpt/autogpt/agents/base.py b/autogpts/autogpt/autogpt/agents/base.py index 774070e8c6e9..f71d8c7a8bae 100644 --- a/autogpts/autogpt/autogpt/agents/base.py +++ b/autogpts/autogpt/autogpt/agents/base.py @@ -40,7 +40,11 @@ from autogpt.core.runner.client_lib.logging.helpers import dump_prompt from autogpt.file_storage.base import FileStorage from autogpt.llm.providers.openai import get_openai_command_specs -from autogpt.models.action_history import ActionResult, EpisodicActionHistory +from autogpt.models.action_history import ( + ActionResult, + ActionSuccessResult, + EpisodicActionHistory, +) from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT logger = logging.getLogger(__name__) @@ -173,6 +177,14 @@ def __init__( self.legacy_config = legacy_config """LEGACY: Monolithic application configuration.""" + # In case the agent is resumed, cursor is set to the last episode + if self.event_history: + # To prevent errors, when the last action is "finish", we register a result + # And move cursor to the next action + if self.event_history.current_episode.action.name == "finish": + self.event_history.register_result(ActionSuccessResult()) + self.event_history.cursor = len(self.event_history) + self.llm_provider = llm_provider self.prompt_strategy = prompt_strategy diff --git a/autogpts/autogpt/autogpt/app/agent_protocol_server.py b/autogpts/autogpt/autogpt/app/agent_protocol_server.py index 9cc0603b2d3f..5118782d157e 100644 --- a/autogpts/autogpt/autogpt/app/agent_protocol_server.py +++ b/autogpts/autogpt/autogpt/app/agent_protocol_server.py @@ -311,12 +311,16 @@ async def execute_step(self, task_id: str, step_request: StepRequestBody) -> Ste "name": execute_command, "args": execute_command_args, "result": ( - orjson.loads(execute_result.json()) - if not isinstance(execute_result, ActionErrorResult) - else { - "error": str(execute_result.error), - "reason": execute_result.reason, - } + "" + if execute_result is None + else ( + orjson.loads(execute_result.json()) + if not isinstance(execute_result, ActionErrorResult) + else { + "error": str(execute_result.error), + "reason": execute_result.reason, + } + ) ), }, } diff --git a/autogpts/autogpt/autogpt/app/main.py b/autogpts/autogpt/autogpt/app/main.py index 50c9bfff26a9..ad04bad8fb21 100644 --- a/autogpts/autogpt/autogpt/app/main.py +++ b/autogpts/autogpt/autogpt/app/main.py @@ -190,16 +190,14 @@ async def run_auto_gpt( # Resume an Existing Agent # ############################ if load_existing_agent: - agent_state = agent_manager.load_agent_state(load_existing_agent) + agent_state = None while True: answer = await clean_input(config, "Resume? [Y/n]") - if answer.lower() == "y": + if answer == "" or answer.lower() == "y": + agent_state = agent_manager.load_agent_state(load_existing_agent) break elif answer.lower() == "n": - agent_state = None break - else: - print("Please respond with 'y' or 'n'") if agent_state: agent = configure_agent_with_state(