Skip to content

Commit

Permalink
Rename model_* fields to llm_*
Browse files Browse the repository at this point in the history
  • Loading branch information
kcze committed Jul 8, 2024
1 parent cc77248 commit 63da9c4
Show file tree
Hide file tree
Showing 11 changed files with 25 additions and 34 deletions.
13 changes: 5 additions & 8 deletions autogpt/autogpt/agent_factory/profile_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,14 @@
)
from forge.models.config import SystemConfiguration, UserConfigurable
from forge.models.json_schema import JSONSchema
from pydantic import ConfigDict

from autogpt.app.config import AppConfig

logger = logging.getLogger(__name__)


class AgentProfileGeneratorConfiguration(SystemConfiguration):
model_classification: LanguageModelClassification = UserConfigurable(
llm_classification: LanguageModelClassification = UserConfigurable(
default=LanguageModelClassification.SMART_MODEL
)
_example_call: object = {
Expand Down Expand Up @@ -141,8 +140,6 @@ class AgentProfileGeneratorConfiguration(SystemConfiguration):
).model_dump()
)

model_config = ConfigDict(protected_namespaces=())


class AgentProfileGenerator(PromptStrategy):
default_configuration: AgentProfileGeneratorConfiguration = (
Expand All @@ -151,21 +148,21 @@ class AgentProfileGenerator(PromptStrategy):

def __init__(
self,
model_classification: LanguageModelClassification,
llm_classification: LanguageModelClassification,
system_prompt: str,
user_prompt_template: str,
create_agent_function: dict,
):
self._model_classification = model_classification
self._llm_classification = llm_classification
self._system_prompt_message = system_prompt
self._user_prompt_template = user_prompt_template
self._create_agent_function = CompletionModelFunction.model_validate(
create_agent_function
)

@property
def model_classification(self) -> LanguageModelClassification:
return self._model_classification
def llm_classification(self) -> LanguageModelClassification:
return self._llm_classification

def build_prompt(self, user_objective: str = "", **kwargs) -> ChatPrompt:
system_message = ChatMessage.system(self._system_prompt_message)
Expand Down
2 changes: 1 addition & 1 deletion autogpt/autogpt/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def __init__(
lambda x: self.llm_provider.count_tokens(x, self.llm.name),
llm_provider,
ActionHistoryConfiguration(
model_name=app_config.fast_llm, max_tokens=self.send_token_limit
llm_name=app_config.fast_llm, max_tokens=self.send_token_limit
),
)
.run_after(WatchdogComponent)
Expand Down
2 changes: 1 addition & 1 deletion autogpt/autogpt/agents/prompt_strategies/one_shot.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def __init__(
self.logger = logger

@property
def model_classification(self) -> LanguageModelClassification:
def llm_classification(self) -> LanguageModelClassification:
return LanguageModelClassification.FAST_MODEL # FIXME: dynamic switching

def build_prompt(
Expand Down
6 changes: 3 additions & 3 deletions docs/content/forge/components/built-in-components.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ Necessary for saving and loading agent's state (preserving session).

| Config variable | Details | Type | Default |
| ---------------- | -------------------------------------- | ----- | ---------------------------------- |
| `storage_path` | Path to agent files, e.g. state | `str` | `agents/{agent_id}/`[^1] |
| `storage_path` | Path to agent files, e.g. state | `str` | `agents/{agent_id}/`[^1] |
| `workspace_path` | Path to files that agent has access to | `str` | `agents/{agent_id}/workspace/`[^1] |

[^1] This option is set dynamically during component construction as opposed to by default inside the configuration model, `{agent_id}` is replaced with the agent's unique identifier.
Expand Down Expand Up @@ -84,7 +84,7 @@ Keeps track of agent's actions and their outcomes. Provides their summary to the

| Config variable | Details | Type | Default |
| ---------------------- | ------------------------------------------------------- | ----------- | ------------------ |
| `model_name` | Name of the llm model used to compress the history | `ModelName` | `"gpt-3.5-turbo"` |
| `llm_name` | Name of the llm model used to compress the history | `ModelName` | `"gpt-3.5-turbo"` |
| `max_tokens` | Maximum number of tokens to use for the history summary | `int` | `1024` |
| `spacy_language_model` | Language model used for summary chunking using spacy | `str` | `"en_core_web_sm"` |
| `full_message_count` | Number of cycles to include unsummarized in the prompt | `int` | `4` |
Expand Down Expand Up @@ -178,7 +178,7 @@ Allows agent to read websites using Selenium.

| Config variable | Details | Type | Default |
| ----------------------------- | ------------------------------------------- | --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
| `model_name` | Name of the llm model used to read websites | `ModelName` | `"gpt-3.5-turbo"` |
| `llm_name` | Name of the llm model used to read websites | `ModelName` | `"gpt-3.5-turbo"` |
| `web_browser` | Web browser used by Selenium | `"chrome" \| "firefox" \| "safari" \| "edge"` | `"chrome"` |
| `headless` | Run browser in headless mode | `bool` | `True` |
| `user_agent` | User agent used by the browser | `str` | `"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"` |
Expand Down
4 changes: 2 additions & 2 deletions forge/forge/components/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ You can set sensitive variables in the `.json` file as well but it's recommended
"github_username": null
},
"ActionHistoryConfiguration": {
"model_name": "gpt-3.5-turbo",
"llm_name": "gpt-3.5-turbo",
"max_tokens": 1024,
"spacy_language_model": "en_core_web_sm"
},
Expand All @@ -129,7 +129,7 @@ You can set sensitive variables in the `.json` file as well but it's recommended
"duckduckgo_max_attempts": 3
},
"WebSeleniumConfiguration": {
"model_name": "gpt-3.5-turbo",
"llm_name": "gpt-3.5-turbo",
"web_browser": "chrome",
"headless": true,
"user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
Expand Down
8 changes: 3 additions & 5 deletions forge/forge/components/action_history/action_history.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from typing import Callable, Iterator, Optional

from pydantic import BaseModel, ConfigDict
from pydantic import BaseModel

from forge.agent.components import ConfigurableComponent
from forge.agent.protocols import AfterExecute, AfterParse, MessageProvider
Expand All @@ -16,7 +16,7 @@


class ActionHistoryConfiguration(BaseModel):
model_name: ModelName = OpenAIModelName.GPT3
llm_name: ModelName = OpenAIModelName.GPT3

Check warning on line 19 in forge/forge/components/action_history/action_history.py

View check run for this annotation

Codecov / codecov/patch

forge/forge/components/action_history/action_history.py#L19

Added line #L19 was not covered by tests
"""Name of the llm model used to compress the history"""
max_tokens: int = 1024
"""Maximum number of tokens to use up with generated history messages"""
Expand All @@ -25,8 +25,6 @@ class ActionHistoryConfiguration(BaseModel):
full_message_count: int = 4
"""Number of latest non-summarized messages to include in the history"""

model_config = ConfigDict(protected_namespaces=())


class ActionHistoryComponent(
MessageProvider,
Expand Down Expand Up @@ -99,7 +97,7 @@ def after_parse(self, result: AnyProposal) -> None:
async def after_execute(self, result: ActionResult) -> None:
self.event_history.register_result(result)
await self.event_history.handle_compression(
self.llm_provider, self.config.model_name, self.config.spacy_language_model
self.llm_provider, self.config.llm_name, self.config.spacy_language_model
)

@staticmethod
Expand Down
12 changes: 5 additions & 7 deletions forge/forge/components/web/selenium.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from urllib.request import urlretrieve

from bs4 import BeautifulSoup
from pydantic import BaseModel, ConfigDict
from pydantic import BaseModel
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.chrome.service import Service as ChromeDriverService
Expand Down Expand Up @@ -55,7 +55,7 @@ class BrowsingError(CommandExecutionError):


class WebSeleniumConfiguration(BaseModel):
model_name: ModelName = OpenAIModelName.GPT3
llm_name: ModelName = OpenAIModelName.GPT3
"""Name of the llm model used to read websites"""
web_browser: Literal["chrome", "firefox", "safari", "edge"] = "chrome"
"""Web browser used by Selenium"""
Expand All @@ -69,8 +69,6 @@ class WebSeleniumConfiguration(BaseModel):
browse_spacy_language_model: str = "en_core_web_sm"
"""Spacy language model used for chunking text"""

model_config = ConfigDict(protected_namespaces=())


class WebSeleniumComponent(
DirectiveProvider, CommandProvider, ConfigurableComponent[WebSeleniumConfiguration]
Expand Down Expand Up @@ -166,7 +164,7 @@ async def read_webpage(
elif get_raw_content:
if (
output_tokens := self.llm_provider.count_tokens(
text, self.config.model_name
text, self.config.llm_name
)
) > MAX_RAW_CONTENT_LENGTH:
oversize_factor = round(output_tokens / MAX_RAW_CONTENT_LENGTH, 1)
Expand Down Expand Up @@ -384,7 +382,7 @@ async def summarize_webpage(
text,
topics_of_interest=topics_of_interest,
llm_provider=self.llm_provider,
model_name=self.config.model_name,
model_name=self.config.llm_name,
spacy_model=self.config.browse_spacy_language_model,
)
return "\n".join(f"* {i}" for i in information)
Expand All @@ -393,7 +391,7 @@ async def summarize_webpage(
text,
question=question,
llm_provider=self.llm_provider,
model_name=self.config.model_name,
model_name=self.config.llm_name,
spacy_model=self.config.browse_spacy_language_model,
)
return result
2 changes: 1 addition & 1 deletion forge/forge/llm/prompting/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
class PromptStrategy(abc.ABC):
@property
@abc.abstractmethod
def model_classification(self) -> LanguageModelClassification:
def llm_classification(self) -> LanguageModelClassification:
...

@abc.abstractmethod
Expand Down
4 changes: 2 additions & 2 deletions forge/forge/llm/providers/_openai_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ async def create_chat_completion(
tool_calls=tool_calls or None,
),
parsed_result=parsed_result,
model_info=self.CHAT_MODELS[model_name],
llm_info=self.CHAT_MODELS[model_name],
prompt_tokens_used=t_input,
completion_tokens_used=t_output,
)
Expand Down Expand Up @@ -457,7 +457,7 @@ async def create_embedding(

return EmbeddingModelResponse(
embedding=embedding_parser(response.data[0].embedding),
model_info=self.EMBEDDING_MODELS[model_name],
llm_info=self.EMBEDDING_MODELS[model_name],
prompt_tokens_used=response.usage.prompt_tokens,
)

Expand Down
2 changes: 1 addition & 1 deletion forge/forge/llm/providers/anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ async def create_chat_completion(
return ChatModelResponse(
response=assistant_msg,
parsed_result=parsed_result,
model_info=ANTHROPIC_CHAT_MODELS[model_name],
llm_info=ANTHROPIC_CHAT_MODELS[model_name],
prompt_tokens_used=t_input,
completion_tokens_used=t_output,
)
Expand Down
4 changes: 1 addition & 3 deletions forge/forge/llm/providers/schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,9 +186,7 @@ class ModelResponse(BaseModel):

prompt_tokens_used: int
completion_tokens_used: int
model_info: ModelInfo

model_config = ConfigDict(protected_namespaces=())
llm_info: ModelInfo


class ModelProviderConfiguration(SystemConfiguration):
Expand Down

0 comments on commit 63da9c4

Please sign in to comment.