Skip to content

Commit

Permalink
AutoGPT/v2: First pass with small fixes
Browse files Browse the repository at this point in the history
* Typing fixes & improvements

* Improved console output formatting

* Added support for all OpenAI GPT-3.5-turbo and GPT-4 model versions

* Added token counting functions to ModelProviders
  • Loading branch information
Pwuts committed Sep 17, 2023
1 parent f4d319c commit 11920b8
Show file tree
Hide file tree
Showing 18 changed files with 449 additions and 149 deletions.
2 changes: 1 addition & 1 deletion autogpts/autogpt/autogpt/core/ability/simple.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def __init__(
self._memory = memory
self._workspace = workspace
self._model_providers = model_providers
self._abilities = []
self._abilities: list[Ability] = []
for (
ability_name,
ability_configuration,
Expand Down
1 change: 0 additions & 1 deletion autogpts/autogpt/autogpt/core/planning/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
from autogpt.core.planning.schema import (
LanguageModelClassification,
LanguageModelPrompt,
LanguageModelResponse,
Task,
TaskStatus,
TaskType,
Expand Down
39 changes: 19 additions & 20 deletions autogpts/autogpt/autogpt/core/planning/schema.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
import enum
from typing import Optional

from pydantic import BaseModel, Field

from autogpt.core.ability.schema import AbilityResult
from autogpt.core.resource.model_providers.schema import (
LanguageModelFunction,
LanguageModelMessage,
LanguageModelProviderModelResponse,
)


Expand All @@ -19,43 +19,42 @@ class LanguageModelClassification(str, enum.Enum):
"""

FAST_MODEL: str = "fast_model"
SMART_MODEL: str = "smart_model"
FAST_MODEL = "fast_model"
SMART_MODEL = "smart_model"


class LanguageModelPrompt(BaseModel):
messages: list[LanguageModelMessage]
functions: list[LanguageModelFunction] = Field(default_factory=list)

def __str__(self):
return "\n\n".join([f"{m.role.value}: {m.content}" for m in self.messages])


class LanguageModelResponse(LanguageModelProviderModelResponse):
"""Standard response struct for a response from a language model."""
return "\n\n".join(
f"{m.role.value.upper()}: {m.content}"
for m in self.messages
)


class TaskType(str, enum.Enum):
RESEARCH: str = "research"
WRITE: str = "write"
EDIT: str = "edit"
CODE: str = "code"
DESIGN: str = "design"
TEST: str = "test"
PLAN: str = "plan"
RESEARCH = "research"
WRITE = "write"
EDIT = "edit"
CODE = "code"
DESIGN = "design"
TEST = "test"
PLAN = "plan"


class TaskStatus(str, enum.Enum):
BACKLOG: str = "backlog"
READY: str = "ready"
IN_PROGRESS: str = "in_progress"
DONE: str = "done"
BACKLOG = "backlog"
READY = "ready"
IN_PROGRESS = "in_progress"
DONE = "done"


class TaskContext(BaseModel):
cycle_count: int = 0
status: TaskStatus = TaskStatus.BACKLOG
parent: "Task" = None
parent: Optional["Task"] = None
prior_actions: list[AbilityResult] = Field(default_factory=list)
memories: list = Field(default_factory=list)
user_input: list[str] = Field(default_factory=list)
Expand Down
5 changes: 3 additions & 2 deletions autogpts/autogpt/autogpt/core/planning/simple.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,15 @@
from autogpt.core.planning.base import PromptStrategy
from autogpt.core.planning.schema import (
LanguageModelClassification,
LanguageModelResponse,
Task,
)
from autogpt.core.resource.model_providers import (
LanguageModelProvider,
LanguageModelResponse,
ModelProviderName,
OpenAIModelName,
)
from autogpt.core.runner.client_lib.logging.helpers import dump_prompt
from autogpt.core.workspace import Workspace


Expand Down Expand Up @@ -153,7 +154,7 @@ async def chat_with_model(
template_kwargs.update(kwargs)
prompt = prompt_strategy.build_prompt(**template_kwargs)

self._logger.debug(f"Using prompt:\n{prompt}\n\n")
self._logger.debug(f"Using prompt:\n{dump_prompt(prompt)}\n")
response = await provider.create_language_completion(
model_prompt=prompt.messages,
functions=prompt.functions,
Expand Down
18 changes: 13 additions & 5 deletions autogpts/autogpt/autogpt/core/planning/strategies/initial_plan.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import logging

from autogpt.core.configuration import SystemConfiguration, UserConfigurable
from autogpt.core.planning.base import PromptStrategy
from autogpt.core.planning.schema import (
Expand All @@ -13,6 +15,8 @@
MessageRole,
)

logger = logging.getLogger(__name__)


class InitialPlanConfiguration(SystemConfiguration):
model_classification: LanguageModelClassification = UserConfigurable()
Expand Down Expand Up @@ -98,7 +102,7 @@ class InitialPlan(PromptStrategy):
},
}

default_configuration = InitialPlanConfiguration(
default_configuration: InitialPlanConfiguration = InitialPlanConfiguration(
model_classification=LanguageModelClassification.SMART_MODEL,
system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE,
system_info=DEFAULT_SYSTEM_INFO,
Expand Down Expand Up @@ -183,8 +187,12 @@ def parse_response_content(
The parsed response.
"""
parsed_response = json_loads(response_content["function_call"]["arguments"])
parsed_response["task_list"] = [
Task.parse_obj(task) for task in parsed_response["task_list"]
]
try:
parsed_response = json_loads(response_content["function_call"]["arguments"])
parsed_response["task_list"] = [
Task.parse_obj(task) for task in parsed_response["task_list"]
]
except KeyError:
logger.debug(f"Failed to parse this response content: {response_content}")
raise
return parsed_response
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import logging

from autogpt.core.configuration import SystemConfiguration, UserConfigurable
from autogpt.core.planning.base import PromptStrategy
from autogpt.core.planning.schema import (
Expand All @@ -11,6 +13,7 @@
MessageRole,
)

logger = logging.getLogger(__name__)

class NameAndGoalsConfiguration(SystemConfiguration):
model_classification: LanguageModelClassification = UserConfigurable()
Expand All @@ -21,12 +24,16 @@ class NameAndGoalsConfiguration(SystemConfiguration):

class NameAndGoals(PromptStrategy):
DEFAULT_SYSTEM_PROMPT = (
"Your job is to respond to a user-defined task by invoking the `create_agent` function "
"to generate an autonomous agent to complete the task. You should supply a role-based "
"name for the agent, an informative description for what the agent does, and 1 to 5 "
"goals that are optimally aligned with the successful completion of its assigned task.\n\n"
"Your job is to respond to a user-defined task, given in triple quotes, by "
"invoking the `create_agent` function to generate an autonomous agent to "
"complete the task. "
"You should supply a role-based name for the agent, "
"an informative description for what the agent does, and "
"1 to 5 goals that are optimally aligned with the successful completion of "
"its assigned task.\n"
"\n"
"Example Input:\n"
"Help me with marketing my business\n\n"
'"""Help me with marketing my business"""\n\n'
"Example Function Call:\n"
"create_agent(name='CMOGPT', "
"description='A professional digital marketer AI that assists Solopreneurs in "
Expand All @@ -43,7 +50,7 @@ class NameAndGoals(PromptStrategy):
"remains on track.'])"
)

DEFAULT_USER_PROMPT_TEMPLATE = "'{user_objective}'"
DEFAULT_USER_PROMPT_TEMPLATE = '"""{user_objective}"""'

DEFAULT_CREATE_AGENT_FUNCTION = {
"name": "create_agent",
Expand Down Expand Up @@ -77,7 +84,7 @@ class NameAndGoals(PromptStrategy):
},
}

default_configuration = NameAndGoalsConfiguration(
default_configuration: NameAndGoalsConfiguration = NameAndGoalsConfiguration(
model_classification=LanguageModelClassification.SMART_MODEL,
system_prompt=DEFAULT_SYSTEM_PROMPT,
user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
Expand Down Expand Up @@ -135,5 +142,9 @@ def parse_response_content(
The parsed response.
"""
parsed_response = json_loads(response_content["function_call"]["arguments"])
try:
parsed_response = json_loads(response_content["function_call"]["arguments"])
except KeyError:
logger.debug(f"Failed to parse this response content: {response_content}")
raise
return parsed_response
28 changes: 18 additions & 10 deletions autogpts/autogpt/autogpt/core/planning/strategies/next_ability.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import logging

from autogpt.core.configuration import SystemConfiguration, UserConfigurable
from autogpt.core.planning.base import PromptStrategy
from autogpt.core.planning.schema import (
Expand All @@ -12,6 +14,8 @@
MessageRole,
)

logger = logging.getLogger(__name__)


class NextAbilityConfiguration(SystemConfiguration):
model_classification: LanguageModelClassification = UserConfigurable()
Expand Down Expand Up @@ -61,7 +65,7 @@ class NextAbility(PromptStrategy):
},
}

default_configuration = NextAbilityConfiguration(
default_configuration: NextAbilityConfiguration = NextAbilityConfiguration(
model_classification=LanguageModelClassification.SMART_MODEL,
system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE,
system_info=DEFAULT_SYSTEM_INFO,
Expand Down Expand Up @@ -171,13 +175,17 @@ def parse_response_content(
The parsed response.
"""
function_name = response_content["function_call"]["name"]
function_arguments = json_loads(response_content["function_call"]["arguments"])
parsed_response = {
"motivation": function_arguments.pop("motivation"),
"self_criticism": function_arguments.pop("self_criticism"),
"reasoning": function_arguments.pop("reasoning"),
"next_ability": function_name,
"ability_arguments": function_arguments,
}
try:
function_name = response_content["function_call"]["name"]
function_arguments = json_loads(response_content["function_call"]["arguments"])
parsed_response = {
"motivation": function_arguments.pop("motivation"),
"self_criticism": function_arguments.pop("self_criticism"),
"reasoning": function_arguments.pop("reasoning"),
"next_ability": function_name,
"ability_arguments": function_arguments,
}
except KeyError:
logger.debug(f"Failed to parse this response content: {response_content}")
raise
return parsed_response
Loading

0 comments on commit 11920b8

Please sign in to comment.