Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: add CLI CI test #1858

Merged
merged 9 commits into from
Oct 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 46 additions & 0 deletions .github/workflows/test_cli.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
name: Run CLI tests

env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}

on:
push:
branches: [ main ]
pull_request:
branches: [ main ]

jobs:
test:
runs-on: ubuntu-latest
timeout-minutes: 15

services:
qdrant:
image: qdrant/qdrant
ports:
- 6333:6333

steps:
- name: Checkout
uses: actions/checkout@v4

- name: Build and run container
run: bash db/run_postgres.sh

- name: "Setup Python, Poetry and Dependencies"
uses: packetcoders/action-setup-cache-python-poetry@main
with:
python-version: "3.12"
poetry-version: "1.8.2"
install-args: "-E dev -E postgres -E tests"

- name: Test `letta run` up until first message
env:
LETTA_PG_PORT: 8888
LETTA_PG_USER: letta
LETTA_PG_PASSWORD: letta
LETTA_PG_DB: letta
LETTA_PG_HOST: localhost
LETTA_SERVER_PASS: test_server_token
run: |
poetry run pytest -s -vv tests/test_cli.py::test_letta_run_create_new_agent
2 changes: 1 addition & 1 deletion .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -69,4 +69,4 @@ jobs:
LETTA_SERVER_PASS: test_server_token
PYTHONPATH: ${{ github.workspace }}:${{ env.PYTHONPATH }}
run: |
poetry run pytest -s -vv -k "not test_tools.py and not test_concurrent_connections.py and not test_quickstart and not test_endpoints and not test_storage and not test_server and not test_openai_client and not test_providers" tests
poetry run pytest -s -vv -k "not test_cli.py and not test_tools.py and not test_concurrent_connections.py and not test_quickstart and not test_endpoints and not test_storage and not test_server and not test_openai_client and not test_providers" tests
3 changes: 2 additions & 1 deletion letta/cli/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from letta.agent import Agent, save_agent
from letta.config import LettaConfig
from letta.constants import CLI_WARNING_PREFIX, LETTA_DIR
from letta.local_llm.constants import ASSISTANT_MESSAGE_CLI_SYMBOL
from letta.log import get_logger
from letta.metadata import MetadataStore
from letta.schemas.enums import OptionState
Expand Down Expand Up @@ -276,7 +277,7 @@ def run(
memory = ChatMemory(human=human_obj.value, persona=persona_obj.value, limit=core_memory_limit)
metadata = {"human": human_obj.name, "persona": persona_obj.name}

typer.secho(f"-> 🤖 Using persona profile: '{persona_obj.name}'", fg=typer.colors.WHITE)
typer.secho(f"-> {ASSISTANT_MESSAGE_CLI_SYMBOL} Using persona profile: '{persona_obj.name}'", fg=typer.colors.WHITE)
typer.secho(f"-> 🧑 Using human profile: '{human_obj.name}'", fg=typer.colors.WHITE)

# add tools
Expand Down
9 changes: 7 additions & 2 deletions letta/client/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,11 @@

from IPython.display import HTML, display

from letta.local_llm.constants import (
ASSISTANT_MESSAGE_CLI_SYMBOL,
INNER_THOUGHTS_CLI_SYMBOL,
)


def pprint(messages):
"""Utility function for pretty-printing the output of client.send_message in notebooks"""
Expand Down Expand Up @@ -47,13 +52,13 @@ def pprint(messages):
html_content += f"<p><strong>🛠️ [{date_formatted}] Function Return ({return_status}):</strong></p>"
html_content += f"<p class='function-return'>{return_string}</p>"
elif "internal_monologue" in message:
html_content += f"<p><strong>💭 [{date_formatted}] Internal Monologue:</strong></p>"
html_content += f"<p><strong>{INNER_THOUGHTS_CLI_SYMBOL} [{date_formatted}] Internal Monologue:</strong></p>"
html_content += f"<p class='internal-monologue'>{message['internal_monologue']}</p>"
elif "function_call" in message:
html_content += f"<p><strong>🛠️ [[{date_formatted}] Function Call:</strong></p>"
html_content += f"<p class='function-call'>{message['function_call']}</p>"
elif "assistant_message" in message:
html_content += f"<p><strong>🤖 [{date_formatted}] Assistant Message:</strong></p>"
html_content += f"<p><strong>{ASSISTANT_MESSAGE_CLI_SYMBOL} [{date_formatted}] Assistant Message:</strong></p>"
html_content += f"<p class='assistant-message'>{message['assistant_message']}</p>"
html_content += "<br>"
html_content += "</div>"
Expand Down
8 changes: 6 additions & 2 deletions letta/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,10 @@
from colorama import Fore, Style, init

from letta.constants import CLI_WARNING_PREFIX
from letta.local_llm.constants import (
ASSISTANT_MESSAGE_CLI_SYMBOL,
INNER_THOUGHTS_CLI_SYMBOL,
)
from letta.schemas.message import Message
from letta.utils import json_loads, printd

Expand Down Expand Up @@ -79,14 +83,14 @@ def warning_message(msg: str):
@staticmethod
def internal_monologue(msg: str, msg_obj: Optional[Message] = None):
# ANSI escape code for italic is '\x1B[3m'
fstr = f"\x1B[3m{Fore.LIGHTBLACK_EX}💭 {{msg}}{Style.RESET_ALL}"
fstr = f"\x1B[3m{Fore.LIGHTBLACK_EX}{INNER_THOUGHTS_CLI_SYMBOL} {{msg}}{Style.RESET_ALL}"
if STRIP_UI:
fstr = "{msg}"
print(fstr.format(msg=msg))

@staticmethod
def assistant_message(msg: str, msg_obj: Optional[Message] = None):
fstr = f"{Fore.YELLOW}{Style.BRIGHT}🤖 {Fore.YELLOW}{{msg}}{Style.RESET_ALL}"
fstr = f"{Fore.YELLOW}{Style.BRIGHT}{ASSISTANT_MESSAGE_CLI_SYMBOL} {Fore.YELLOW}{{msg}}{Style.RESET_ALL}"
if STRIP_UI:
fstr = "{msg}"
print(fstr.format(msg=msg))
Expand Down
3 changes: 3 additions & 0 deletions letta/local_llm/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,3 +29,6 @@

INNER_THOUGHTS_KWARG = "inner_thoughts"
INNER_THOUGHTS_KWARG_DESCRIPTION = "Deep inner monologue private to you only."
INNER_THOUGHTS_CLI_SYMBOL = "💭"

ASSISTANT_MESSAGE_CLI_SYMBOL = "🤖"
12 changes: 8 additions & 4 deletions letta/streaming_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,10 @@
from rich.markup import escape

from letta.interface import CLIInterface
from letta.local_llm.constants import (
ASSISTANT_MESSAGE_CLI_SYMBOL,
INNER_THOUGHTS_CLI_SYMBOL,
)
from letta.schemas.message import Message
from letta.schemas.openai.chat_completion_response import (
ChatCompletionChunkResponse,
Expand Down Expand Up @@ -296,15 +300,15 @@ def update_output(self, content: str):
def process_refresh(self, response: ChatCompletionResponse):
"""Process the response to rewrite the current output buffer."""
if not response.choices:
self.update_output("💭 [italic]...[/italic]")
self.update_output(f"{INNER_THOUGHTS_CLI_SYMBOL} [italic]...[/italic]")
return # Early exit if there are no choices

choice = response.choices[0]
inner_thoughts = choice.message.content if choice.message.content else ""
tool_calls = choice.message.tool_calls if choice.message.tool_calls else []

if self.fancy:
message_string = f"💭 [italic]{inner_thoughts}[/italic]" if inner_thoughts else ""
message_string = f"{INNER_THOUGHTS_CLI_SYMBOL} [italic]{inner_thoughts}[/italic]" if inner_thoughts else ""
else:
message_string = "[inner thoughts] " + inner_thoughts if inner_thoughts else ""

Expand All @@ -326,7 +330,7 @@ def process_refresh(self, response: ChatCompletionResponse):
message = function_args[len(prefix) :]
else:
message = function_args
message_string += f"🤖 [bold yellow]{message}[/bold yellow]"
message_string += f"{ASSISTANT_MESSAGE_CLI_SYMBOL} [bold yellow]{message}[/bold yellow]"
else:
message_string += f"{function_name}({function_args})"

Expand All @@ -336,7 +340,7 @@ def stream_start(self):
if self.streaming:
print()
self.live.start() # Start the Live display context and keep it running
self.update_output("💭 [italic]...[/italic]")
self.update_output(f"{INNER_THOUGHTS_CLI_SYMBOL} [italic]...[/italic]")

def stream_end(self):
if self.streaming:
Expand Down
91 changes: 60 additions & 31 deletions tests/test_cli.py
Original file line number Diff line number Diff line change
@@ -1,44 +1,73 @@
import subprocess
import os
import shutil
import sys

subprocess.check_call([sys.executable, "-m", "pip", "install", "pexpect"])
from prettytable.colortable import ColorTable
import pexpect
import pytest

from letta.cli.cli_config import ListChoice, add, delete
from letta.cli.cli_config import list as list_command
from letta.local_llm.constants import (
ASSISTANT_MESSAGE_CLI_SYMBOL,
INNER_THOUGHTS_CLI_SYMBOL,
)

# def test_configure_letta():
# configure_letta()
original_letta_path = os.path.expanduser("~/.letta")
backup_letta_path = os.path.expanduser("~/.letta_backup")

options = [ListChoice.agents, ListChoice.sources, ListChoice.humans, ListChoice.personas]

@pytest.fixture
def swap_letta_config():
if os.path.exists(backup_letta_path):
print("\nDelete the backup ~/.letta directory\n")
shutil.rmtree(backup_letta_path)

def test_cli_list():
for option in options:
output = list_command(arg=option)
# check if is a list
assert isinstance(output, ColorTable)
if os.path.exists(original_letta_path):
print("\nBackup the original ~/.letta directory\n")
shutil.move(original_letta_path, backup_letta_path)

try:
# Run the test
yield
finally:
# Ensure this runs no matter what
print("\nClean up ~/.letta and restore the original directory\n")
if os.path.exists(original_letta_path):
shutil.rmtree(original_letta_path)

def test_cli_config():
if os.path.exists(backup_letta_path):
shutil.move(backup_letta_path, original_letta_path)

# test add
for option in ["human", "persona"]:

# create initial
add(option=option, name="test", text="test data")
def test_letta_run_create_new_agent(swap_letta_config):
child = pexpect.spawn("poetry run letta run", encoding="utf-8")
# Start the letta run command
child.logfile = sys.stdout
child.expect("Creating new agent", timeout=10)
# Optional: LLM model selection
try:
child.expect("Select LLM model:", timeout=10)
child.sendline("\033[B\033[B\033[B\033[B\033[B")
except (pexpect.TIMEOUT, pexpect.EOF):
print("[WARNING] LLM model selection step was skipped.")

## update
# filename = "test.txt"
# open(filename, "w").write("test data new")
# child = pexpect.spawn(f"poetry run letta add --{str(option)} {filename} --name test --strip-ui")
# child.expect("Human test already exists. Overwrite?", timeout=TIMEOUT)
# child.sendline()
# child.expect(pexpect.EOF, timeout=TIMEOUT) # Wait for child to exit
# child.close()
# Optional: Embedding model selection
try:
child.expect("Select embedding model:", timeout=10)
child.sendline("text-embedding-ada-002")
except (pexpect.TIMEOUT, pexpect.EOF):
print("[WARNING] Embedding model selection step was skipped.")

for row in list_command(arg=ListChoice.humans if option == "human" else ListChoice.personas):
if row[0] == "test":
assert "test data" in row
# delete
delete(option=option, name="test")
child.expect("Created new agent", timeout=10)
child.sendline("")

# Get initial response
child.expect("Enter your message:", timeout=60)
# Capture the output up to this point
full_output = child.before
# Count occurrences of inner thoughts
cloud_emoji_count = full_output.count(INNER_THOUGHTS_CLI_SYMBOL)
assert cloud_emoji_count == 1, f"It appears that there are multiple instances of inner thought outputted."
# Count occurrences of assistant messages
robot = full_output.count(ASSISTANT_MESSAGE_CLI_SYMBOL)
assert robot == 1, f"It appears that there are multiple instances of assistant messages outputted."
# Make sure the user name was repeated back at least once
assert full_output.count("Chad") > 0, f"Chad was not mentioned...please manually inspect the outputs."
Loading