Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: migrate readline to prompt_toolkit, with many features and fixes #244

Merged
merged 11 commits into from
Dec 15, 2024
56 changes: 37 additions & 19 deletions gptme/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,12 @@
import termios
import urllib.parse
from collections.abc import Generator
from functools import lru_cache
from pathlib import Path

from gptme.constants import PROMPT_USER

from .commands import action_descriptions, execute_cmd
from .constants import PROMPT_USER
from .init import init
from .llm import reply
from .llm.models import get_model
Expand All @@ -25,17 +27,12 @@
)
from .tools.base import ConfirmFunc
from .tools.browser import read_url
from .util import (
console,
path_with_tilde,
print_bell,
rich_to_str,
)
from .util import console, path_with_tilde, print_bell
from .util.ask_execute import ask_execute
from .util.context import use_fresh_context
from .util.cost import log_costs
from .util.interrupt import clear_interruptible, set_interruptible
from .util.readline import add_history
from .util.prompt import add_history, get_input

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -255,27 +252,48 @@ def prompt_user(value=None) -> str: # pragma: no cover
try:
set_interruptible()
response = prompt_input(PROMPT_USER, value)
if response:
add_history(response)
except KeyboardInterrupt:
print("\nInterrupted. Press Ctrl-D to exit.")
except EOFError:
print("\nGoodbye!")
sys.exit(0)
clear_interruptible()
if response:
add_history(response) # readline history
return response


def prompt_input(prompt: str, value=None) -> str: # pragma: no cover
"""Get input using prompt_toolkit with fish-style suggestions."""
prompt = prompt.strip() + ": "
if value:
console.print(prompt + value)
else:
prompt = rich_to_str(prompt, color_system="256")

# https://stackoverflow.com/a/53260487/965332
original_stdout = sys.stdout
sys.stdout = sys.__stdout__
value = input(prompt.strip() + " ")
sys.stdout = original_stdout
return value
return value

return get_input(prompt, llm_suggest_callback=get_suggestions)


# TODO: Implement LLM suggestions
@lru_cache
def get_suggestions(text: str) -> list[str]:
enabled = False
if enabled:
response = reply(
messages=[
Message(
"system",
"""You are to tab-complete the user prompt with a relevant query.
Respond with one entry per line.
No preambles or greetings, or postamble.
Only 10 lines.""",
),
Message("user", text),
],
model=get_model().model,
stream=False,
)
return response.content.split("\n")
return []


def _include_paths(msg: Message, workspace: Path | None = None) -> Message:
Expand Down
4 changes: 2 additions & 2 deletions gptme/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
from .util import epoch_to_age
from .util.generate_name import generate_name
from .util.interrupt import handle_keyboard_interrupt, set_interruptible
from .util.readline import add_history
from .util.prompt import add_history

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -222,7 +222,7 @@ def main(
"Failed to switch to interactive mode, continuing in non-interactive mode"
)

# add prompts to readline history
# add prompts to prompt-toolkit history
for prompt in prompts:
if prompt and len(prompt) > 1000:
# skip adding long prompts to history (slows down startup, unlikely to be useful)
Expand Down
6 changes: 2 additions & 4 deletions gptme/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
# Optimized for code
# Discussion here: https://community.openai.com/t/cheat-sheet-mastering-temperature-and-top-p-in-chatgpt-api-a-few-tips-and-tricks-on-controlling-the-creativity-deterministic-output-of-prompt-responses/172683
# TODO: make these configurable

TEMPERATURE = 0
TOP_P = 0.1

Expand All @@ -21,10 +22,7 @@
"system": "grey42",
}

# colors wrapped in \001 and \002 to inform readline about non-printable characters
PROMPT_USER = (
f"\001[bold {ROLE_COLOR['user']}]\002User\001[/bold {ROLE_COLOR['user']}]\002"
)
PROMPT_USER = f"[bold {ROLE_COLOR['user']}]User[/bold {ROLE_COLOR['user']}]"
PROMPT_ASSISTANT = (
f"[bold {ROLE_COLOR['assistant']}]Assistant[/bold {ROLE_COLOR['assistant']}]"
)
4 changes: 4 additions & 0 deletions gptme/dirs.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@ def get_readline_history_file() -> Path:
return get_config_dir() / "history"


def get_pt_history_file() -> Path:
return get_data_dir() / "history.pt"


def get_data_dir() -> Path:
# used in testing, so must take precedence
if "XDG_DATA_HOME" in os.environ:
Expand Down
7 changes: 0 additions & 7 deletions gptme/init.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
)
from .tools import init_tools
from .util import console
from .util.readline import load_readline_history, register_tabcomplete

logger = logging.getLogger(__name__)
_init_done = False
Expand Down Expand Up @@ -65,12 +64,6 @@ def init(model: str | None, interactive: bool, tool_allowlist: list[str] | None)
init_llm(provider)
set_default_model(f"{provider}/{model}")

if interactive:
load_readline_history()

# for some reason it bugs out shell tests in CI
register_tabcomplete()

init_tools(frozenset(tool_allowlist) if tool_allowlist else None)


Expand Down
6 changes: 4 additions & 2 deletions gptme/util/ask_execute.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
from ..tools.base import ConfirmFunc
from . import print_bell
from .clipboard import copy, set_copytext
from .prompt import create_prompt_session
from .useredit import edit_text_with_editor

console = Console(log_path=False)
Expand Down Expand Up @@ -104,9 +105,10 @@ def ask_execute(question="Execute code?", default=True) -> bool:
choicestr += "/?"
choicestr += "]"

session = create_prompt_session()
answer = (
console.input(
f"[bold bright_yellow on red] {question} {choicestr} [/] ",
session.prompt(
[("bold fg:ansiyellow bg:red", f" {question} {choicestr} "), ("", " ")],
)
.lower()
.strip()
Expand Down
132 changes: 132 additions & 0 deletions gptme/util/prompt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
import logging
from collections.abc import Callable

from gptme.util import rich_to_str
from prompt_toolkit import PromptSession
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.completion import Completer, Completion, PathCompleter
from prompt_toolkit.document import Document
from prompt_toolkit.formatted_text import ANSI, HTML, to_formatted_text
from prompt_toolkit.history import FileHistory
from prompt_toolkit.lexers import PygmentsLexer
from prompt_toolkit.shortcuts import CompleteStyle
from prompt_toolkit.styles import Style
from pygments.lexer import RegexLexer
from pygments.token import Name, Text

from ..dirs import get_pt_history_file


class PathLexer(RegexLexer):
"""Simple lexer that highlights path-like patterns."""

name = "Path"
tokens = {
"root": [
(r"(?:/[^/\s]+)+/?|~/[^/\s]+|\.\.?/[^/\s]+", Name.Variable), # paths
(r".", Text), # everything else
]
}


logger = logging.getLogger(__name__)


class GptmeCompleter(Completer):
"""Completer that combines command, path and LLM suggestions."""

def __init__(self, llm_suggest_callback: Callable[[str], list[str]] | None = None):
self.path_completer = PathCompleter(expanduser=True)
self.llm_suggest_callback = llm_suggest_callback

def get_completions(self, document, complete_event):
from ..commands import COMMANDS # fmt: skip

document.get_word_before_cursor()
text = document.text_before_cursor
path_seg = text.split(" ")[-1]

# Command completion
if text.startswith("/"):
cmd_text = text[1:]
for cmd in COMMANDS:
if cmd.startswith(cmd_text):
yield Completion(
cmd,
start_position=-len(cmd_text),
display=HTML(f"<blue>/{cmd}</blue>"),
)

# Path completion
elif any(path_seg.startswith(prefix) for prefix in ["../", "~/", "./"]):
yield from self.path_completer.get_completions(
Document(path_seg), complete_event
)

# LLM suggestions
elif self.llm_suggest_callback and len(text) > 2:
try:
suggestions = self.llm_suggest_callback(text)
if suggestions:
for suggestion in suggestions:
if suggestion.startswith(text):
yield Completion(
suggestion,
start_position=-len(text),
display_meta="AI suggestion",
)
except Exception:
# Fail silently if LLM suggestions timeout/fail
pass


def create_prompt_session(
llm_suggest_callback: Callable[[str], list[str]] | None = None,
) -> PromptSession:
"""Create a PromptSession with history and completion support."""
history = FileHistory(str(get_pt_history_file()))
completer = GptmeCompleter(llm_suggest_callback)

return PromptSession(
history=history,
completer=completer,
complete_while_typing=True,
auto_suggest=AutoSuggestFromHistory(),
enable_history_search=True,
complete_style=CompleteStyle.READLINE_LIKE,
)


def get_input(
prompt: str,
llm_suggest_callback: Callable[[str], list[str]] | None = None,
) -> str:
"""Get input from user with completion support."""
session = create_prompt_session(llm_suggest_callback)
try:
logger.debug(f"Original prompt: {repr(prompt)}")

result = session.prompt(
to_formatted_text(
ANSI(rich_to_str(prompt.rstrip() + " ", color_system="256"))
)[:-1],
lexer=PygmentsLexer(PathLexer),
style=Style.from_dict(
{
"pygments.name.variable": "#87afff bold", # bright blue, bold for paths
}
),
include_default_pygments_style=False,
)
return result
except (EOFError, KeyboardInterrupt) as e:
# Re-raise EOFError to handle Ctrl+D properly
if isinstance(e, EOFError):
raise
return ""


def add_history(line: str) -> None:
"""Add a line to the prompt_toolkit history."""
session = create_prompt_session()
session.history.append_string(line)
Loading
Loading