Skip to content

Commit

Permalink
Fixes bugs with AutoGen implementation and exampes (#498)
Browse files Browse the repository at this point in the history
* patched bugs in autogen agent example, updated autogen agent creation to follow agentconfig paradigm

* more fixes

* black

* fix bug in autoreply

* black

* pass default autoreply through to the memgpt autogen conversibleagent subclass so that it doesn't leave empty messages which can trigger errors in local llm backends like lmstudio
  • Loading branch information
cpacker authored Nov 22, 2023
1 parent ea09bcc commit cd9fca5
Show file tree
Hide file tree
Showing 5 changed files with 241 additions and 149 deletions.
72 changes: 65 additions & 7 deletions memgpt/autogen/examples/agent_autoreply.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,20 +12,74 @@
import os
import autogen
from memgpt.autogen.memgpt_agent import create_memgpt_autogen_agent_from_config
from memgpt.presets.presets import DEFAULT_PRESET

# USE_OPENAI = True
USE_OPENAI = False
if USE_OPENAI:
# This config is for autogen agents that are not powered by MemGPT
config_list = [
{
"model": "gpt-4-1106-preview", # gpt-4-turbo (https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo)
"api_key": os.getenv("OPENAI_API_KEY"),
}
]

# This config is for autogen agents that powered by MemGPT
config_list_memgpt = [
{
"model": "gpt-4-1106-preview", # gpt-4-turbo (https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo)
"preset": "memgpt_docs",
"model": None,
"model_wrapper": None,
"model_endpoint_type": None,
"model_endpoint": None,
"context_window": 128000, # gpt-4-turbo
},
]

else:
# Example using LM Studio on a local machine
# You will have to change the parameters based on your setup

# Non-MemGPT agents will still use local LLMs, but they will use the ChatCompletions endpoint
config_list = [
{
"model": "NULL", # not needed
"api_base": "http://localhost:1234/v1", # ex. "http://127.0.0.1:5001/v1" if you are using webui, "http://localhost:1234/v1/" if you are using LM Studio
"api_key": "NULL", # not needed
"api_type": "open_ai",
},
]

# MemGPT-powered agents will also use local LLMs, but they need additional setup (also they use the Completions endpoint)
config_list_memgpt = [
{
"preset": DEFAULT_PRESET,
"model": None, # only required for Ollama, see: https://memgpt.readthedocs.io/en/latest/ollama/
"model_wrapper": "airoboros-l2-70b-2.1", # airoboros is the default wrapper and should work for most models
"model_endpoint_type": "lmstudio", # can use webui, ollama, llamacpp, etc.
"model_endpoint": "http://localhost:1234", # the IP address of your LLM backend
"context_window": 8192, # the context window of your model (for Mistral 7B-based models, it's likely 8192)
},
]

config_list = [
{
"model": "gpt-4",
"api_key": os.getenv("OPENAI_API_KEY"),
},
]

# If USE_MEMGPT is False, then this example will be the same as the official AutoGen repo
# (https://github.com/microsoft/autogen/blob/main/notebook/agentchat_groupchat.ipynb)
# If USE_MEMGPT is True, then we swap out the "coder" agent with a MemGPT agent
USE_MEMGPT = True

llm_config = {"config_list": config_list, "seed": 42}
llm_config_memgpt = {"config_list": config_list_memgpt, "seed": 42}

# Set to True if you want to print MemGPT's inner workings.
DEBUG = False
interface_kwargs = {
"debug": DEBUG,
"show_inner_thoughts": DEBUG,
"show_function_outputs": DEBUG,
}

# The user agent
user_proxy = autogen.UserProxyAgent(
Expand All @@ -45,17 +99,21 @@
system_message=f"I am a 10x engineer, trained in Python. I was the first engineer at Uber "
f"(which I make sure to tell everyone I work with).",
human_input_mode="TERMINATE",
default_auto_reply="...", # Set a default auto-reply message here (non-empty auto-reply is required for LM Studio)
)

else:
# In our example, we swap this AutoGen agent with a MemGPT agent
# This MemGPT agent will have all the benefits of MemGPT, ie persistent memory, etc.
coder = create_memgpt_autogen_agent_from_config(
"MemGPT_coder",
llm_config=llm_config,
llm_config=llm_config_memgpt,
nonmemgpt_llm_config=llm_config,
system_message=f"I am a 10x engineer, trained in Python. I was the first engineer at Uber "
f"(which I make sure to tell everyone I work with).",
human_input_mode="TERMINATE",
interface_kwargs=interface_kwargs,
default_auto_reply="...", # Set a default auto-reply message here (non-empty auto-reply is required for LM Studio)
)

# Begin the group chat with a message from the user
Expand Down
102 changes: 63 additions & 39 deletions memgpt/autogen/examples/agent_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,22 +16,55 @@
import autogen
from memgpt.autogen.memgpt_agent import create_autogen_memgpt_agent, create_memgpt_autogen_agent_from_config

# This config is for autogen agents that are not powered by MemGPT
config_list = [
{
"model": "gpt-4",
"api_key": os.getenv("OPENAI_API_KEY"),
}
]

# This config is for autogen agents that powered by MemGPT
config_list_memgpt = [
{
"model": "gpt-4",
},
]

USE_AUTOGEN_WORKFLOW = True
# USE_OPENAI = True
USE_OPENAI = False
if USE_OPENAI:
# This config is for autogen agents that are not powered by MemGPT
config_list = [
{
"model": "gpt-4-1106-preview", # gpt-4-turbo (https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo)
"api_key": os.getenv("OPENAI_API_KEY"),
}
]

# This config is for autogen agents that powered by MemGPT
config_list_memgpt = [
{
"model": "gpt-4-1106-preview", # gpt-4-turbo (https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo)
"preset": "memgpt_docs",
"model": None,
"model_wrapper": None,
"model_endpoint_type": None,
"model_endpoint": None,
"context_window": 128000, # gpt-4-turbo
},
]

else:
# Example using LM Studio on a local machine
# You will have to change the parameters based on your setup

# Non-MemGPT agents will still use local LLMs, but they will use the ChatCompletions endpoint
config_list = [
{
"model": "NULL", # not needed
"api_base": "http://localhost:1234/v1", # ex. "http://127.0.0.1:5001/v1" if you are using webui, "http://localhost:1234/v1/" if you are using LM Studio
"api_key": "NULL", # not needed
"api_type": "open_ai",
},
]

# MemGPT-powered agents will also use local LLMs, but they need additional setup (also they use the Completions endpoint)
config_list_memgpt = [
{
"preset": "memgpt_docs",
"model": None, # only required for Ollama, see: https://memgpt.readthedocs.io/en/latest/ollama/
"model_wrapper": "airoboros-l2-70b-2.1", # airoboros is the default wrapper and should work for most models
"model_endpoint_type": "lmstudio", # can use webui, ollama, llamacpp, etc.
"model_endpoint": "http://localhost:1234", # the IP address of your LLM backend
"context_window": 8192, # the context window of your model (for Mistral 7B-based models, it's likely 8192)
},
]

# Set to True if you want to print MemGPT's inner workings.
DEBUG = False
Expand All @@ -56,30 +89,21 @@

# In our example, we swap this AutoGen agent with a MemGPT agent
# This MemGPT agent will have all the benefits of MemGPT, ie persistent memory, etc.
if not USE_AUTOGEN_WORKFLOW:
coder = create_autogen_memgpt_agent(
"MemGPT_coder",
persona_description="I am a 10x engineer, trained in Python. I was the first engineer at Uber "
"(which I make sure to tell everyone I work with).",
user_description=f"You are participating in a group chat with a user ({user_proxy.name}) " f"and a product manager ({pm.name}).",
model=config_list_memgpt[0]["model"],
interface_kwargs=interface_kwargs,
)
else:
coder = create_memgpt_autogen_agent_from_config(
"MemGPT_coder",
llm_config=llm_config_memgpt,
system_message=f"I am a 10x engineer, trained in Python. I was the first engineer at Uber "
f"(which I make sure to tell everyone I work with).\n"
f"You are participating in a group chat with a user ({user_proxy.name}).",
interface_kwargs=interface_kwargs,
)
coder.attach("memgpt_research_paper") # See https://memgpt.readthedocs.io/en/latest/autogen/#loading-documents

# Initialize the group chat between the user and two LLM agents (PM and coder)
groupchat = autogen.GroupChat(agents=[user_proxy, coder], messages=[], max_round=12)
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)
memgpt_agent = create_memgpt_autogen_agent_from_config(
"MemGPT_agent",
llm_config=llm_config_memgpt,
system_message=f"I am a 10x engineer, trained in Python. I was the first engineer at Uber "
f"(which I make sure to tell everyone I work with).\n"
f"You are participating in a group chat with a user ({user_proxy.name}).",
interface_kwargs=interface_kwargs,
default_auto_reply="...", # Set a default auto-reply message here (non-empty auto-reply is required for LM Studio)
)
# NOTE: you need to follow steps to load document first: see https://memgpt.readthedocs.io/en/latest/autogen/#loading-documents
memgpt_agent.load_and_attach("memgpt_research_paper", "directory")

# Initialize the group chat between the agents
groupchat = autogen.GroupChat(agents=[user_proxy, memgpt_agent], messages=[], max_round=12)
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)

# Begin the group chat with a message from the user
user_proxy.initiate_chat(
Expand Down
124 changes: 61 additions & 63 deletions memgpt/autogen/examples/agent_groupchat.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,55 +13,63 @@
import os
import autogen
from memgpt.autogen.memgpt_agent import create_autogen_memgpt_agent, create_memgpt_autogen_agent_from_config
from memgpt.presets.presets import DEFAULT_PRESET
from memgpt.constants import LLM_MAX_TOKENS

USE_OPENAI = True
if USE_OPENAI:
# This config is for autogen agents that are not powered by MemGPT
config_list = [
{
"model": "gpt-4-1106-preview", # gpt-4-turbo (https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo)
"api_key": os.getenv("OPENAI_API_KEY"),
}
]

# This config is for autogen agents that powered by MemGPT
config_list_memgpt = [
{
"model": "gpt-4-1106-preview", # gpt-4-turbo (https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo)
"preset": DEFAULT_PRESET,
"model": None,
"model_wrapper": None,
"model_endpoint_type": None,
"model_endpoint": None,
"context_window": 128000, # gpt-4-turbo
},
]

# This config is for autogen agents that are not powered by MemGPT
config_list = [
{
"model": "gpt-4",
"api_key": os.getenv("OPENAI_API_KEY"),
}
]

# This config is for autogen agents that powered by MemGPT
config_list_memgpt = [
{
"model": "gpt-4",
},
]

# Uncomment and fill in the following for local LLM deployment:
# # This config is for autogen agents that are not powered by MemGPT
# # See https://github.com/oobabooga/text-generation-webui/tree/main/extensions/openai
config_list = [
{
"model": "YOUR_MODEL", # ex. This is the model name, not the wrapper
"api_base": "YOUR_URL", # ex. "http://127.0.0.1:5001/v1" if you are using webui, "http://localhost:1234/v1/" if you are using LM Studio
"api_key": "NULL", # this is a placeholder
"api_type": "open_ai",
},
]

# # This config is for autogen agents that powered by MemGPT
# # For this to work, you need to have your environment variables set correctly, e.g.
# # For web UI:
# # OPENAI_API_BASE=http://127.0.0.1:5000
# # BACKEND_TYPE=webui
# # For LM Studio:
# # OPENAI_API_BASE=http://127.0.0.1:1234
# # BACKEND_TYPE=lmstudio
# # "model" here specifies the "wrapper" that will be used, setting it to "gpt-4" uses the default
config_list_memgpt = [
{"model": "airoboros-l2-70b-2.1"}, # if you set this to gpt-4, it will fall back to the default wrapper
]

else:
# Example using LM Studio on a local machine
# You will have to change the parameters based on your setup

# Non-MemGPT agents will still use local LLMs, but they will use the ChatCompletions endpoint
config_list = [
{
"model": "NULL", # not needed
"api_base": "http://localhost:1234/v1", # ex. "http://127.0.0.1:5001/v1" if you are using webui, "http://localhost:1234/v1/" if you are using LM Studio
"api_key": "NULL", # not needed
"api_type": "open_ai",
},
]

# MemGPT-powered agents will also use local LLMs, but they need additional setup (also they use the Completions endpoint)
config_list_memgpt = [
{
"preset": DEFAULT_PRESET,
"model": None, # only required for Ollama, see: https://memgpt.readthedocs.io/en/latest/ollama/
"model_wrapper": "airoboros-l2-70b-2.1", # airoboros is the default wrapper and should work for most models
"model_endpoint_type": "lmstudio", # can use webui, ollama, llamacpp, etc.
"model_endpoint": "http://localhost:1234", # the IP address of your LLM backend
"context_window": 8192, # the context window of your model (for Mistral 7B-based models, it's likely 8192)
},
]

# If USE_MEMGPT is False, then this example will be the same as the official AutoGen repo
# (https://github.com/microsoft/autogen/blob/main/notebook/agentchat_groupchat.ipynb)
# If USE_MEMGPT is True, then we swap out the "coder" agent with a MemGPT agent
USE_MEMGPT = True

USE_AUTOGEN_WORKFLOW = True

# Set to True if you want to print MemGPT's inner workings.
DEBUG = False

Expand Down Expand Up @@ -101,26 +109,16 @@
else:
# In our example, we swap this AutoGen agent with a MemGPT agent
# This MemGPT agent will have all the benefits of MemGPT, ie persistent memory, etc.
if not USE_AUTOGEN_WORKFLOW:
coder = create_autogen_memgpt_agent(
"MemGPT_coder",
persona_description="I am a 10x engineer, trained in Python. I was the first engineer at Uber "
"(which I make sure to tell everyone I work with).",
user_description=f"You are participating in a group chat with a user ({user_proxy.name}) "
f"and a product manager ({pm.name}).",
model=config_list_memgpt[0]["model"],
interface_kwargs=interface_kwargs,
)
else:
coder = create_memgpt_autogen_agent_from_config(
"MemGPT_coder",
llm_config=llm_config_memgpt,
system_message=f"I am a 10x engineer, trained in Python. I was the first engineer at Uber "
f"(which I make sure to tell everyone I work with).\n"
f"You are participating in a group chat with a user ({user_proxy.name}) "
f"and a product manager ({pm.name}).",
interface_kwargs=interface_kwargs,
)
coder = create_memgpt_autogen_agent_from_config(
"MemGPT_coder",
llm_config=llm_config_memgpt,
system_message=f"I am a 10x engineer, trained in Python. I was the first engineer at Uber "
f"(which I make sure to tell everyone I work with).\n"
f"You are participating in a group chat with a user ({user_proxy.name}) "
f"and a product manager ({pm.name}).",
interface_kwargs=interface_kwargs,
default_auto_reply="...", # Set a default auto-reply message here (non-empty auto-reply is required for LM Studio)
)

# Initialize the group chat between the user and two LLM agents (PM and coder)
groupchat = autogen.GroupChat(agents=[user_proxy, pm, coder], messages=[], max_round=12)
Expand All @@ -129,5 +127,5 @@
# Begin the group chat with a message from the user
user_proxy.initiate_chat(
manager,
message="I want to design an app to make me one million dollars in one month. " "Yes, your heard that right.",
message="I want to design an app to make me one million dollars in one month. Yes, your heard that right.",
)
Loading

0 comments on commit cd9fca5

Please sign in to comment.