Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(agent, forge): Silence Pydantic v2 protected namespace model_ warning #7340

Merged
merged 2 commits into from
Jul 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions autogpt/autogpt/agent_factory/profile_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@


class AgentProfileGeneratorConfiguration(SystemConfiguration):
model_classification: LanguageModelClassification = UserConfigurable(
llm_classification: LanguageModelClassification = UserConfigurable(
default=LanguageModelClassification.SMART_MODEL
)
_example_call: object = {
Expand Down Expand Up @@ -148,21 +148,21 @@ class AgentProfileGenerator(PromptStrategy):

def __init__(
self,
model_classification: LanguageModelClassification,
llm_classification: LanguageModelClassification,
system_prompt: str,
user_prompt_template: str,
create_agent_function: dict,
):
self._model_classification = model_classification
self._llm_classification = llm_classification
self._system_prompt_message = system_prompt
self._user_prompt_template = user_prompt_template
self._create_agent_function = CompletionModelFunction.model_validate(
create_agent_function
)

@property
def model_classification(self) -> LanguageModelClassification:
return self._model_classification
def llm_classification(self) -> LanguageModelClassification:
return self._llm_classification

def build_prompt(self, user_objective: str = "", **kwargs) -> ChatPrompt:
system_message = ChatMessage.system(self._system_prompt_message)
Expand Down
2 changes: 1 addition & 1 deletion autogpt/autogpt/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def __init__(
lambda x: self.llm_provider.count_tokens(x, self.llm.name),
llm_provider,
ActionHistoryConfiguration(
model_name=app_config.fast_llm, max_tokens=self.send_token_limit
llm_name=app_config.fast_llm, max_tokens=self.send_token_limit
),
)
.run_after(WatchdogComponent)
Expand Down
2 changes: 1 addition & 1 deletion autogpt/autogpt/agents/prompt_strategies/one_shot.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def __init__(
self.logger = logger

@property
def model_classification(self) -> LanguageModelClassification:
def llm_classification(self) -> LanguageModelClassification:
return LanguageModelClassification.FAST_MODEL # FIXME: dynamic switching

def build_prompt(
Expand Down
6 changes: 3 additions & 3 deletions docs/content/forge/components/built-in-components.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ Necessary for saving and loading agent's state (preserving session).

| Config variable | Details | Type | Default |
| ---------------- | -------------------------------------- | ----- | ---------------------------------- |
| `storage_path` | Path to agent files, e.g. state | `str` | `agents/{agent_id}/`[^1] |
| `storage_path` | Path to agent files, e.g. state | `str` | `agents/{agent_id}/`[^1] |
| `workspace_path` | Path to files that agent has access to | `str` | `agents/{agent_id}/workspace/`[^1] |

[^1] This option is set dynamically during component construction as opposed to by default inside the configuration model, `{agent_id}` is replaced with the agent's unique identifier.
Expand Down Expand Up @@ -84,7 +84,7 @@ Keeps track of agent's actions and their outcomes. Provides their summary to the

| Config variable | Details | Type | Default |
| ---------------------- | ------------------------------------------------------- | ----------- | ------------------ |
| `model_name` | Name of the llm model used to compress the history | `ModelName` | `"gpt-3.5-turbo"` |
| `llm_name` | Name of the llm model used to compress the history | `ModelName` | `"gpt-3.5-turbo"` |
| `max_tokens` | Maximum number of tokens to use for the history summary | `int` | `1024` |
| `spacy_language_model` | Language model used for summary chunking using spacy | `str` | `"en_core_web_sm"` |
| `full_message_count` | Number of cycles to include unsummarized in the prompt | `int` | `4` |
Expand Down Expand Up @@ -178,7 +178,7 @@ Allows agent to read websites using Selenium.

| Config variable | Details | Type | Default |
| ----------------------------- | ------------------------------------------- | --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
| `model_name` | Name of the llm model used to read websites | `ModelName` | `"gpt-3.5-turbo"` |
| `llm_name` | Name of the llm model used to read websites | `ModelName` | `"gpt-3.5-turbo"` |
| `web_browser` | Web browser used by Selenium | `"chrome" \| "firefox" \| "safari" \| "edge"` | `"chrome"` |
| `headless` | Run browser in headless mode | `bool` | `True` |
| `user_agent` | User agent used by the browser | `str` | `"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"` |
Expand Down
4 changes: 2 additions & 2 deletions forge/forge/components/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ You can set sensitive variables in the `.json` file as well but it's recommended
"github_username": null
},
"ActionHistoryConfiguration": {
"model_name": "gpt-3.5-turbo",
"llm_name": "gpt-3.5-turbo",
"max_tokens": 1024,
"spacy_language_model": "en_core_web_sm"
},
Expand All @@ -129,7 +129,7 @@ You can set sensitive variables in the `.json` file as well but it's recommended
"duckduckgo_max_attempts": 3
},
"WebSeleniumConfiguration": {
"model_name": "gpt-3.5-turbo",
"llm_name": "gpt-3.5-turbo",
"web_browser": "chrome",
"headless": true,
"user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
Expand Down
4 changes: 2 additions & 2 deletions forge/forge/components/action_history/action_history.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@


class ActionHistoryConfiguration(BaseModel):
model_name: ModelName = OpenAIModelName.GPT3
llm_name: ModelName = OpenAIModelName.GPT3

Check warning on line 19 in forge/forge/components/action_history/action_history.py

View check run for this annotation

Codecov / codecov/patch

forge/forge/components/action_history/action_history.py#L19

Added line #L19 was not covered by tests
"""Name of the llm model used to compress the history"""
max_tokens: int = 1024
"""Maximum number of tokens to use up with generated history messages"""
Expand Down Expand Up @@ -97,7 +97,7 @@
async def after_execute(self, result: ActionResult) -> None:
self.event_history.register_result(result)
await self.event_history.handle_compression(
self.llm_provider, self.config.model_name, self.config.spacy_language_model
self.llm_provider, self.config.llm_name, self.config.spacy_language_model
)

@staticmethod
Expand Down
8 changes: 4 additions & 4 deletions forge/forge/components/web/selenium.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ class BrowsingError(CommandExecutionError):


class WebSeleniumConfiguration(BaseModel):
model_name: ModelName = OpenAIModelName.GPT3
llm_name: ModelName = OpenAIModelName.GPT3
"""Name of the llm model used to read websites"""
web_browser: Literal["chrome", "firefox", "safari", "edge"] = "chrome"
"""Web browser used by Selenium"""
Expand Down Expand Up @@ -164,7 +164,7 @@ async def read_webpage(
elif get_raw_content:
if (
output_tokens := self.llm_provider.count_tokens(
text, self.config.model_name
text, self.config.llm_name
)
) > MAX_RAW_CONTENT_LENGTH:
oversize_factor = round(output_tokens / MAX_RAW_CONTENT_LENGTH, 1)
Expand Down Expand Up @@ -382,7 +382,7 @@ async def summarize_webpage(
text,
topics_of_interest=topics_of_interest,
llm_provider=self.llm_provider,
model_name=self.config.model_name,
model_name=self.config.llm_name,
spacy_model=self.config.browse_spacy_language_model,
)
return "\n".join(f"* {i}" for i in information)
Expand All @@ -391,7 +391,7 @@ async def summarize_webpage(
text,
question=question,
llm_provider=self.llm_provider,
model_name=self.config.model_name,
model_name=self.config.llm_name,
spacy_model=self.config.browse_spacy_language_model,
)
return result
2 changes: 1 addition & 1 deletion forge/forge/llm/prompting/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
class PromptStrategy(abc.ABC):
@property
@abc.abstractmethod
def model_classification(self) -> LanguageModelClassification:
def llm_classification(self) -> LanguageModelClassification:
...

@abc.abstractmethod
Expand Down
4 changes: 2 additions & 2 deletions forge/forge/llm/providers/_openai_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ async def create_chat_completion(
tool_calls=tool_calls or None,
),
parsed_result=parsed_result,
model_info=self.CHAT_MODELS[model_name],
llm_info=self.CHAT_MODELS[model_name],
prompt_tokens_used=t_input,
completion_tokens_used=t_output,
)
Expand Down Expand Up @@ -457,7 +457,7 @@ async def create_embedding(

return EmbeddingModelResponse(
embedding=embedding_parser(response.data[0].embedding),
model_info=self.EMBEDDING_MODELS[model_name],
llm_info=self.EMBEDDING_MODELS[model_name],
prompt_tokens_used=response.usage.prompt_tokens,
)

Expand Down
2 changes: 1 addition & 1 deletion forge/forge/llm/providers/anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ async def create_chat_completion(
return ChatModelResponse(
response=assistant_msg,
parsed_result=parsed_result,
model_info=ANTHROPIC_CHAT_MODELS[model_name],
llm_info=ANTHROPIC_CHAT_MODELS[model_name],
prompt_tokens_used=t_input,
completion_tokens_used=t_output,
)
Expand Down
2 changes: 1 addition & 1 deletion forge/forge/llm/providers/schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ class ModelResponse(BaseModel):

prompt_tokens_used: int
completion_tokens_used: int
model_info: ModelInfo
llm_info: ModelInfo


class ModelProviderConfiguration(SystemConfiguration):
Expand Down
Loading