Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update one_shot.py #8098

Closed
wants to merge 1 commit into from
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
124 changes: 62 additions & 62 deletions autogpt/autogpt/agents/prompt_strategies/one_shot.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,17 +28,29 @@

class AssistantThoughts(ModelWithSummary):
observations: str = Field(
description="Relevant observations from your last action (if any)"
..., description="Relevant observations from your last action (if any)"
)
text: str = Field(description="Thoughts")
reasoning: str = Field(description="Reasoning behind the thoughts")
self_criticism: str = Field(description="Constructive self-criticism")
plan: list[str] = Field(description="Short list that conveys the long-term plan")
speak: str = Field(description="Summary of thoughts, to say to user")
text: str = Field(..., description="Thoughts")
reasoning: str = Field(..., description="Reasoning behind the thoughts")
self_criticism: str = Field(..., description="Constructive self-criticism")
plan: list[str] = Field(
..., description="Short list that conveys the long-term plan"
)
speak: str = Field(..., description="Summary of thoughts, to say to user")

def summary(self) -> str:
return self.text

def summaryv2(self) -> str:
"## Your Task\n"
text = "The user will specify a task for you to execute, in triple quotes,"
" in the next message. Your job is to complete the task while following"
" your directives as given above, and terminate when your task is done."

Check warning on line 48 in autogpt/autogpt/agents/prompt_strategies/one_shot.py

View check run for this annotation

Codecov / codecov/patch

autogpt/autogpt/agents/prompt_strategies/one_shot.py#L46-L48

Added lines #L46 - L48 were not covered by tests

text += self.text
self.text = text
return self.text

Check warning on line 52 in autogpt/autogpt/agents/prompt_strategies/one_shot.py

View check run for this annotation

Codecov / codecov/patch

autogpt/autogpt/agents/prompt_strategies/one_shot.py#L50-L52

Added lines #L50 - L52 were not covered by tests


class OneShotAgentActionProposal(ActionProposal):
thoughts: AssistantThoughts # type: ignore
Expand Down Expand Up @@ -89,30 +101,28 @@
)

def __init__(
self,
configuration: OneShotAgentPromptConfiguration,
logger: Logger,
self,
configuration: OneShotAgentPromptConfiguration,
logger: Logger,
):
self.config = configuration
self.response_schema = JSONSchema.from_dict(
OneShotAgentActionProposal.model_json_schema()
)
self.response_schema = JSONSchema.from_dict(OneShotAgentActionProposal.schema())

Check warning on line 109 in autogpt/autogpt/agents/prompt_strategies/one_shot.py

View check run for this annotation

Codecov / codecov/patch

autogpt/autogpt/agents/prompt_strategies/one_shot.py#L109

Added line #L109 was not covered by tests
self.logger = logger

@property
def llm_classification(self) -> LanguageModelClassification:
def model_classification(self) -> LanguageModelClassification:
return LanguageModelClassification.FAST_MODEL # FIXME: dynamic switching

def build_prompt(
self,
*,
messages: list[ChatMessage],
task: str,
ai_profile: AIProfile,
ai_directives: AIDirectives,
commands: list[CompletionModelFunction],
include_os_info: bool,
**extras,
self,
*,
messages: list[ChatMessage],
task: str,
ai_profile: AIProfile,
ai_directives: AIDirectives,
commands: list[CompletionModelFunction],
include_os_info: bool,
**extras,
) -> ChatPrompt:
"""Constructs and returns a prompt with the following structure:
1. System prompt
Expand All @@ -139,40 +149,33 @@
)

def build_system_prompt(
self,
ai_profile: AIProfile,
ai_directives: AIDirectives,
commands: list[CompletionModelFunction],
include_os_info: bool,
self,
ai_profile: AIProfile,
ai_directives: AIDirectives,
commands: list[CompletionModelFunction],
include_os_info: bool,
) -> tuple[str, str]:
"""
Builds the system prompt.

Returns:
str: The system prompt body
str: The desired start for the LLM's response; used to steer the output
"""
response_fmt_instruction, response_prefill = self.response_format_instruction(
self.config.use_functions_api
)
system_prompt_parts = (
self._generate_intro_prompt(ai_profile)
+ (self._generate_os_info() if include_os_info else [])
+ [
self.config.body_template.format(
constraints=format_numbered_list(ai_directives.constraints),
resources=format_numbered_list(ai_directives.resources),
commands=self._generate_commands_list(commands),
best_practices=format_numbered_list(ai_directives.best_practices),
)
]
+ [
"## Your Task\n"
"The user will specify a task for you to execute, in triple quotes,"
" in the next message. Your job is to complete the task while following"
" your directives as given above, and terminate when your task is done."
]
+ ["## RESPONSE FORMAT\n" + response_fmt_instruction]
self._generate_intro_prompt(ai_profile)
+ (self._generate_os_info() if include_os_info else [])
+ [
self.config.body_template.format(
constraints=format_numbered_list(ai_directives.constraints),
resources=format_numbered_list(ai_directives.resources),
commands=self._generate_commands_list(commands),
best_practices=format_numbered_list(ai_directives.best_practices),
)
]
+ [
"## Your Task\n"
"The user will specify a task for you to execute, in triple quotes,"
" in the next message. Your job is to complete the task while following"
" your directives as given above, and terminate when your task is done."
]
+ ["## RESPONSE FORMAT\n" + response_fmt_instruction]
)

# Join non-empty parts together into paragraph format
Expand All @@ -182,7 +185,7 @@
)

def response_format_instruction(self, use_functions_api: bool) -> tuple[str, str]:
response_schema = self.response_schema.model_copy(deep=True)
response_schema = self.response_schema.copy(deep=True)

Check warning on line 188 in autogpt/autogpt/agents/prompt_strategies/one_shot.py

View check run for this annotation

Codecov / codecov/patch

autogpt/autogpt/agents/prompt_strategies/one_shot.py#L188

Added line #L188 was not covered by tests
assert response_schema.properties
if use_functions_api and "use_tool" in response_schema.properties:
del response_schema.properties["use_tool"]
Expand All @@ -197,9 +200,9 @@

return (
(
f"YOU MUST ALWAYS RESPOND WITH A JSON OBJECT OF THE FOLLOWING TYPE:\n"
f"{response_format}"
+ ("\n\nYOU MUST ALSO INVOKE A TOOL!" if use_functions_api else "")
f"YOU MUST ALWAYS RESPOND WITH A JSON OBJECT OF THE FOLLOWING TYPE:\n"
f"{response_format}"
+ ("\n\nYOU MUST ALSO INVOKE A TOOL!" if use_functions_api else "")
),
response_prefill,
)
Expand Down Expand Up @@ -250,13 +253,13 @@
raise

def parse_response_content(
self,
response: AssistantChatMessage,
self,
response: AssistantChatMessage,
) -> OneShotAgentActionProposal:
if not response.content:
raise InvalidAgentResponseError("Assistant response has no text content")

self.logger.debug(
self.logger.info(

Check warning on line 262 in autogpt/autogpt/agents/prompt_strategies/one_shot.py

View check run for this annotation

Codecov / codecov/patch

autogpt/autogpt/agents/prompt_strategies/one_shot.py#L262

Added line #L262 was not covered by tests
"LLM response content:"
+ (
f"\n{response.content}"
Expand All @@ -274,8 +277,5 @@
raise InvalidAgentResponseError("Assistant did not use a tool")
assistant_reply_dict["use_tool"] = response.tool_calls[0].function

parsed_response = OneShotAgentActionProposal.model_validate(
assistant_reply_dict
)
parsed_response.raw_message = response.copy()
parsed_response = OneShotAgentActionProposal.parse_obj(assistant_reply_dict)

Check warning on line 280 in autogpt/autogpt/agents/prompt_strategies/one_shot.py

View check run for this annotation

Codecov / codecov/patch

autogpt/autogpt/agents/prompt_strategies/one_shot.py#L280

Added line #L280 was not covered by tests
return parsed_response
Loading