Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding pyautogen as optional dependency in pyproject.toml #613

Closed
wants to merge 11 commits into from
2 changes: 1 addition & 1 deletion docs/autogen.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

The MemGPT+AutoGen integration was last tested using AutoGen version v0.2.0.

If you are having issues, please first try installing the specific version of AutoGen using `pip install pyautogen==0.2.0`
You can run the `poetry install -E autogen` to install all the dependencies related to autogen module. If you are having issues, then try installing the specific version of AutoGen using `pip install pyautogen==0.2.0`.

## Overview

Expand Down
80 changes: 68 additions & 12 deletions memgpt/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,22 @@

from memgpt.persistence_manager import LocalStateManager
from memgpt.config import AgentConfig, MemGPTConfig
from memgpt.system import get_login_event, package_function_response, package_summarize_message, get_initial_boot_messages
from memgpt.system import (
get_login_event,
package_function_response,
package_summarize_message,
get_initial_boot_messages,
)
from memgpt.memory import CoreMemory as Memory, summarize_messages
from memgpt.openai_tools import create, is_context_overflow_error
from memgpt.utils import get_local_time, parse_json, united_diff, printd, count_tokens, get_schema_diff
from memgpt.utils import (
get_local_time,
parse_json,
united_diff,
printd,
count_tokens,
get_schema_diff,
)
from memgpt.constants import (
FIRST_MESSAGE_ATTEMPTS,
MESSAGE_SUMMARY_WARNING_FRAC,
Expand All @@ -29,13 +41,23 @@ def initialize_memory(ai_notes, human_notes):
raise ValueError(ai_notes)
if human_notes is None:
raise ValueError(human_notes)
memory = Memory(human_char_limit=CORE_MEMORY_HUMAN_CHAR_LIMIT, persona_char_limit=CORE_MEMORY_PERSONA_CHAR_LIMIT)
memory = Memory(
human_char_limit=CORE_MEMORY_HUMAN_CHAR_LIMIT,
persona_char_limit=CORE_MEMORY_PERSONA_CHAR_LIMIT,
)
memory.edit_persona(ai_notes)
memory.edit_human(human_notes)
return memory


def construct_system_with_memory(system, memory, memory_edit_timestamp, archival_memory=None, recall_memory=None, include_char_count=True):
def construct_system_with_memory(
system,
memory,
memory_edit_timestamp,
archival_memory=None,
recall_memory=None,
include_char_count=True,
):
full_system_message = "\n".join(
[
system,
Expand Down Expand Up @@ -68,7 +90,11 @@ def initialize_message_sequence(
memory_edit_timestamp = get_local_time()

full_system_message = construct_system_with_memory(
system, memory, memory_edit_timestamp, archival_memory=archival_memory, recall_memory=recall_memory
system,
memory,
memory_edit_timestamp,
archival_memory=archival_memory,
recall_memory=recall_memory,
)
first_user_message = get_login_event() # event letting MemGPT know the user just logged in

Expand Down Expand Up @@ -486,7 +512,11 @@ def handle_ai_response(self, response_message):
}
) # extend conversation with function response
self.interface.function_message(f"Error: {error_msg}")
return messages, None, True # force a heartbeat to allow agent to handle error
return (
messages,
None,
True,
) # force a heartbeat to allow agent to handle error

# Failure case 2: function name is OK, but function args are bad JSON
try:
Expand All @@ -503,7 +533,11 @@ def handle_ai_response(self, response_message):
}
) # extend conversation with function response
self.interface.function_message(f"Error: {error_msg}")
return messages, None, True # force a heartbeat to allow agent to handle error
return (
messages,
None,
True,
) # force a heartbeat to allow agent to handle error

# (Still parsing function args)
# Handle requests for immediate heartbeat
Expand Down Expand Up @@ -538,7 +572,11 @@ def handle_ai_response(self, response_message):
}
) # extend conversation with function response
self.interface.function_message(f"Error: {error_msg}")
return messages, None, True # force a heartbeat to allow agent to handle error
return (
messages,
None,
True,
) # force a heartbeat to allow agent to handle error

# If no failures happened along the way: ...
# Step 4: send the info on the function call and function response to GPT
Expand All @@ -560,7 +598,13 @@ def handle_ai_response(self, response_message):

return messages, heartbeat_request, function_failed

def step(self, user_message, first_message=False, first_message_retry_limit=FIRST_MESSAGE_ATTEMPTS, skip_verify=False):
def step(
self,
user_message,
first_message=False,
first_message_retry_limit=FIRST_MESSAGE_ATTEMPTS,
skip_verify=False,
):
"""Top-level event message handler for the MemGPT agent"""

try:
Expand Down Expand Up @@ -611,7 +655,11 @@ def step(self, user_message, first_message=False, first_message_retry_limit=FIRS
# (if yes) Step 4: send the info on the function call and function response to LLM
response_message = response.choices[0].message
response_message_copy = response_message.copy()
all_response_messages, heartbeat_request, function_failed = self.handle_ai_response(response_message)
(
all_response_messages,
heartbeat_request,
function_failed,
) = self.handle_ai_response(response_message)

# Add the extra metadata to the assistant response
# (e.g. enough metadata to enable recreating the API call)
Expand Down Expand Up @@ -657,7 +705,12 @@ def step(self, user_message, first_message=False, first_message_retry_limit=FIRS
)

self.append_to_messages(all_new_messages)
return all_new_messages, heartbeat_request, function_failed, active_memory_warning
return (
all_new_messages,
heartbeat_request,
function_failed,
active_memory_warning,
)

except Exception as e:
printd(f"step() failed\nuser_message = {user_message}\nerror = {e}")
Expand Down Expand Up @@ -735,7 +788,10 @@ def summarize_messages_inplace(self, cutoff=None, preserve_last_N_messages=True)
if (self.model is not None and self.model in LLM_MAX_TOKENS)
else str(LLM_MAX_TOKENS["DEFAULT"])
)
summary = summarize_messages(agent_config=self.config, message_sequence_to_summarize=message_sequence_to_summarize)
summary = summarize_messages(
agent_config=self.config,
message_sequence_to_summarize=message_sequence_to_summarize,
)
printd(f"Got summary: {summary}")

# Metadata that's useful for the agent to see
Expand Down
7 changes: 6 additions & 1 deletion memgpt/autogen/examples/agent_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,12 @@
memgpt_agent.load_and_attach("memgpt_research_paper", "directory")

# Initialize the group chat between the agents
groupchat = autogen.GroupChat(agents=[user_proxy, memgpt_agent], messages=[], max_round=12, speaker_selection_method="round_robin")
groupchat = autogen.GroupChat(
agents=[user_proxy, memgpt_agent],
messages=[],
max_round=12,
speaker_selection_method="round_robin",
)
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)

# Begin the group chat with a message from the user
Expand Down
16 changes: 14 additions & 2 deletions memgpt/autogen/memgpt_agent.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,10 @@
from autogen.agentchat import Agent, ConversableAgent, UserProxyAgent, GroupChat, GroupChatManager
from autogen.agentchat import (
Agent,
ConversableAgent,
UserProxyAgent,
GroupChat,
GroupChatManager,
)
from memgpt.agent import Agent as _Agent

from typing import Callable, Optional, List, Dict, Union, Any, Tuple
Expand All @@ -11,7 +17,13 @@
import memgpt.presets.presets as presets
from memgpt.config import AgentConfig, MemGPTConfig
from memgpt.cli.cli import attach
from memgpt.cli.cli_load import load_directory, load_webpage, load_index, load_database, load_vector_database
from memgpt.cli.cli_load import (
load_directory,
load_webpage,
load_index,
load_database,
load_vector_database,
)
from memgpt.connectors.storage import StorageConnector


Expand Down
30 changes: 24 additions & 6 deletions memgpt/cli/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,16 @@ def run(
model_wrapper: str = typer.Option(None, help="Specify the LLM model wrapper"),
model_endpoint: str = typer.Option(None, help="Specify the LLM model endpoint"),
model_endpoint_type: str = typer.Option(None, help="Specify the LLM model endpoint type"),
context_window: int = typer.Option(None, help="The context window of the LLM you are using (e.g. 8k for most Mistral 7B variants)"),
context_window: int = typer.Option(
None,
help="The context window of the LLM you are using (e.g. 8k for most Mistral 7B variants)",
),
# other
first: bool = typer.Option(False, "--first", help="Use --first to send the first message in the sequence"),
strip_ui: bool = typer.Option(False, help="Remove all the bells and whistles in CLI output (helpful for testing)"),
strip_ui: bool = typer.Option(
False,
help="Remove all the bells and whistles in CLI output (helpful for testing)",
),
debug: bool = typer.Option(False, "--debug", help="Use --debug to enable debugging output"),
no_verify: bool = typer.Option(False, help="Bypass message verification"),
yes: bool = typer.Option(False, "-y", help="Skip confirmation prompt and use defaults"),
Expand Down Expand Up @@ -68,7 +74,10 @@ def run(

# force re-configuration is config is from old version
if config.memgpt_version is None: # TODO: eventually add checks for older versions, if config changes again
typer.secho("MemGPT has been updated to a newer version, so re-running configuration.", fg=typer.colors.YELLOW)
typer.secho(
"MemGPT has been updated to a newer version, so re-running configuration.",
fg=typer.colors.YELLOW,
)
configure()
config = MemGPTConfig.load()

Expand Down Expand Up @@ -107,17 +116,26 @@ def run(
# persistence_manager = LocalStateManager(agent_config).load() # TODO: implement load
# TODO: load prior agent state
if persona and persona != agent_config.persona:
typer.secho(f"{CLI_WARNING_PREFIX}Overriding existing persona {agent_config.persona} with {persona}", fg=typer.colors.YELLOW)
typer.secho(
f"{CLI_WARNING_PREFIX}Overriding existing persona {agent_config.persona} with {persona}",
fg=typer.colors.YELLOW,
)
agent_config.persona = persona
# raise ValueError(f"Cannot override {agent_config.name} existing persona {agent_config.persona} with {persona}")
if human and human != agent_config.human:
typer.secho(f"{CLI_WARNING_PREFIX}Overriding existing human {agent_config.human} with {human}", fg=typer.colors.YELLOW)
typer.secho(
f"{CLI_WARNING_PREFIX}Overriding existing human {agent_config.human} with {human}",
fg=typer.colors.YELLOW,
)
agent_config.human = human
# raise ValueError(f"Cannot override {agent_config.name} existing human {agent_config.human} with {human}")

# Allow overriding model specifics (model, model wrapper, model endpoint IP + type, context_window)
if model and model != agent_config.model:
typer.secho(f"{CLI_WARNING_PREFIX}Overriding existing model {agent_config.model} with {model}", fg=typer.colors.YELLOW)
typer.secho(
f"{CLI_WARNING_PREFIX}Overriding existing model {agent_config.model} with {model}",
fg=typer.colors.YELLOW,
)
agent_config.model = model
if context_window is not None and int(context_window) != agent_config.context_window:
typer.secho(
Expand Down
Loading