Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: migrate readline to prompt_toolkit, with many features and fixes #244

Merged
merged 11 commits into from
Dec 15, 2024
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,8 @@ You can find more [Demos][docs-demos] and [Examples][docs-examples] in the [docu
- ✨ Many smaller features to ensure a great experience
- 🚰 Pipe in context via `stdin` or as arguments.
- Passing a filename as an argument will read the file and include it as context.
- → Tab completion
- → Smart completion and highlighting:
- Tab completion and highlighting for commands and paths
- 📝 Automatic naming of conversations
- 💬 Optional basic [Web UI and REST API][docs-server]

Expand Down
32 changes: 13 additions & 19 deletions gptme/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,9 @@
from collections.abc import Generator
from pathlib import Path

from gptme.constants import PROMPT_USER

from .commands import action_descriptions, execute_cmd
from .constants import PROMPT_USER
from .init import init
from .llm import reply
from .llm.models import get_model
Expand All @@ -25,17 +26,12 @@
)
from .tools.base import ConfirmFunc
from .tools.browser import read_url
from .util import (
console,
path_with_tilde,
print_bell,
rich_to_str,
)
from .util import console, path_with_tilde, print_bell
from .util.ask_execute import ask_execute
from .util.context import use_fresh_context
from .util.cost import log_costs
from .util.interrupt import clear_interruptible, set_interruptible
from .util.readline import add_history
from .util.prompt import add_history, get_input

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -255,27 +251,25 @@ def prompt_user(value=None) -> str: # pragma: no cover
try:
set_interruptible()
response = prompt_input(PROMPT_USER, value)
if response:
add_history(response)
except KeyboardInterrupt:
print("\nInterrupted. Press Ctrl-D to exit.")
except EOFError:
print("\nGoodbye!")
sys.exit(0)
clear_interruptible()
if response:
add_history(response) # readline history
return response


def prompt_input(prompt: str, value=None) -> str: # pragma: no cover
"""Get input using prompt_toolkit with fish-style suggestions."""
prompt = prompt.strip() + ": "
if value:
console.print(prompt + value)
else:
prompt = rich_to_str(prompt, color_system="256")

# https://stackoverflow.com/a/53260487/965332
original_stdout = sys.stdout
sys.stdout = sys.__stdout__
value = input(prompt.strip() + " ")
sys.stdout = original_stdout
return value
return value

return get_input(prompt)


def _include_paths(msg: Message, workspace: Path | None = None) -> Message:
Expand Down
4 changes: 2 additions & 2 deletions gptme/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
from .util import epoch_to_age
from .util.generate_name import generate_name
from .util.interrupt import handle_keyboard_interrupt, set_interruptible
from .util.readline import add_history
from .util.prompt import add_history

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -222,7 +222,7 @@ def main(
"Failed to switch to interactive mode, continuing in non-interactive mode"
)

# add prompts to readline history
# add prompts to prompt-toolkit history
for prompt in prompts:
if prompt and len(prompt) > 1000:
# skip adding long prompts to history (slows down startup, unlikely to be useful)
Expand Down
6 changes: 2 additions & 4 deletions gptme/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
# Optimized for code
# Discussion here: https://community.openai.com/t/cheat-sheet-mastering-temperature-and-top-p-in-chatgpt-api-a-few-tips-and-tricks-on-controlling-the-creativity-deterministic-output-of-prompt-responses/172683
# TODO: make these configurable

TEMPERATURE = 0
TOP_P = 0.1

Expand All @@ -21,10 +22,7 @@
"system": "grey42",
}

# colors wrapped in \001 and \002 to inform readline about non-printable characters
PROMPT_USER = (
f"\001[bold {ROLE_COLOR['user']}]\002User\001[/bold {ROLE_COLOR['user']}]\002"
)
PROMPT_USER = f"[bold {ROLE_COLOR['user']}]User[/bold {ROLE_COLOR['user']}]"
PROMPT_ASSISTANT = (
f"[bold {ROLE_COLOR['assistant']}]Assistant[/bold {ROLE_COLOR['assistant']}]"
)
4 changes: 4 additions & 0 deletions gptme/dirs.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@ def get_readline_history_file() -> Path:
return get_config_dir() / "history"


def get_pt_history_file() -> Path:
return get_data_dir() / "history.pt"


def get_data_dir() -> Path:
# used in testing, so must take precedence
if "XDG_DATA_HOME" in os.environ:
Expand Down
7 changes: 0 additions & 7 deletions gptme/init.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
)
from .tools import init_tools
from .util import console
from .util.readline import load_readline_history, register_tabcomplete

logger = logging.getLogger(__name__)
_init_done = False
Expand Down Expand Up @@ -65,12 +64,6 @@ def init(model: str | None, interactive: bool, tool_allowlist: list[str] | None)
init_llm(provider)
set_default_model(f"{provider}/{model}")

if interactive:
load_readline_history()

# for some reason it bugs out shell tests in CI
register_tabcomplete()

init_tools(frozenset(tool_allowlist) if tool_allowlist else None)


Expand Down
11 changes: 8 additions & 3 deletions gptme/message.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@

from .codeblock import Codeblock
from .constants import ROLE_COLOR
from .util import console, get_tokenizer, rich_to_str
from .util import console, get_tokenizer
from .util.prompt import rich_to_str

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -222,8 +223,12 @@ def format_msgs(
output += textwrap.indent(block, prefix=indent * " ")
continue
elif highlight:
lang = block.split("\n")[0]
block = rich_to_str(Syntax(block.rstrip(), lang))
lang = block.split("\n", 1)[0]
content = block.split("\n", 1)[-1]
fmt = "underline blue"
block = f"[{fmt}]{lang}\n[/{fmt}]" + rich_to_str(
Syntax(content.rstrip(), lang)
)
output += f"```{block.rstrip()}\n```"
outputs.append(f"{userprefix}: {output.rstrip()}")
return outputs
Expand Down
8 changes: 0 additions & 8 deletions gptme/util/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
"""

import functools
import io
import logging
import re
import shutil
Expand All @@ -13,7 +12,6 @@
from datetime import datetime, timedelta
from functools import lru_cache
from pathlib import Path
from typing import Any

from rich import print
from rich.console import Console
Expand Down Expand Up @@ -203,12 +201,6 @@ def decorator(func): # pragma: no cover
return decorator


def rich_to_str(s: Any, **kwargs) -> str:
c = Console(file=io.StringIO(), **kwargs)
c.print(s)
return c.file.getvalue() # type: ignore


def path_with_tilde(path: Path) -> str:
home = str(Path.home())
path_str = str(path)
Expand Down
6 changes: 4 additions & 2 deletions gptme/util/ask_execute.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
from ..tools.base import ConfirmFunc
from . import print_bell
from .clipboard import copy, set_copytext
from .prompt import get_prompt_session
from .useredit import edit_text_with_editor

console = Console(log_path=False)
Expand Down Expand Up @@ -104,9 +105,10 @@ def ask_execute(question="Execute code?", default=True) -> bool:
choicestr += "/?"
choicestr += "]"

session = get_prompt_session()
answer = (
console.input(
f"[bold bright_yellow on red] {question} {choicestr} [/] ",
session.prompt(
[("bold fg:ansiyellow bg:red", f" {question} {choicestr} "), ("", " ")],
)
.lower()
.strip()
Expand Down
Loading
Loading