Skip to content

Commit

Permalink
llm models default command, plus refactored env variables
Browse files Browse the repository at this point in the history
Closes #76
Closes #31
  • Loading branch information
simonw committed Jul 10, 2023
1 parent 031979c commit 566573c
Show file tree
Hide file tree
Showing 9 changed files with 152 additions and 56 deletions.
12 changes: 11 additions & 1 deletion docs/help.md
Original file line number Diff line number Diff line change
Expand Up @@ -179,14 +179,24 @@ Options:
--help Show this message and exit.
Commands:
list List available models
default Show or set the default model
list List available models
```
#### llm models list --help
```
Usage: llm models list [OPTIONS]
List available models
Options:
--help Show this message and exit.
```
#### llm models default --help
```
Usage: llm models default [OPTIONS] [MODEL]
Show or set the default model
Options:
--help Show this message and exit.
```
Expand Down
2 changes: 0 additions & 2 deletions docs/logging.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,6 @@ On my Mac that outputs:
```
This will differ for other operating systems.

(You can customize the location of this file by setting a path in the `LLM_LOG_PATH` environment variable.)

Once that SQLite database has been created any prompts you run will be logged to that database.

To avoid logging a prompt, pass `--no-log` or `-n` to the command:
Expand Down
14 changes: 14 additions & 0 deletions docs/setup.md
Original file line number Diff line number Diff line change
Expand Up @@ -89,3 +89,17 @@ The environment variable will be used only if no `--key` option is passed to the
If no environment variable is found, the tool will fall back to checking `keys.json`.

You can force the tool to use the key from `keys.json` even if an environment variable has also been set using `llm "prompt" --key openai`.

## Custom directory location

This tool stores various files - prompt templates, stored keys, preferences, a database of logs - in a directory on your computer.

On macOS this is `~/Library/Application Support/io.datasette.llm/`.

On Linux it may be something like `~/.config/io.datasette.llm/`.

You can set a custom location for this directory by setting the `LLM_USER_PATH` environment variable:

```bash
export LLM_USER_PATH=/path/to/my/custom/directory
```
37 changes: 37 additions & 0 deletions docs/usage.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,3 +61,40 @@ This is useful for piping content to standard input, for example:

curl -s 'https://simonwillison.net/2023/May/15/per-interpreter-gils/' | \
llm -s 'Suggest topics for this post as a JSON array'

## Listing available models

The `llm models list` command lists every model that can be used with LLM, along with any aliases:

```
llm models list
```
Example output:
```
OpenAI Chat: gpt-3.5-turbo (aliases: 3.5, chatgpt)
OpenAI Chat: gpt-3.5-turbo-16k (aliases: chatgpt-16k, 3.5-16k)
OpenAI Chat: gpt-4 (aliases: 4, gpt4)
OpenAI Chat: gpt-4-32k (aliases: 4-32k)
PaLM 2: chat-bison-001 (aliases: palm, palm2)
```
You can use pass the full model name or any of the aliases to the `-m/--model` option:

```
llm -m chatgpt-16k 'As many names for cheesecakes as you can think of, with detailed descriptions'
```
Models that have been installed using plugins will be shown here as well.

## Setting a custom model

The model used when calling `llm` without the `-m/--model` option defaults to `gpt-3.5-turbo` - the fastest and least expensive OpenAI model, and the same model family that powers ChatGPT.

You can use the `llm models default` command to set a different default model. For GPT-4 (slower and more expensive, but more capable) run this:

```bash
llm models default gpt-4
```
You can view the current model by running this:
```
llm models default
```
Any of the supported aliases for a model can be passed to this command.
69 changes: 47 additions & 22 deletions llm/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,13 @@
import json
from llm import Template
from .migrations import migrate
from .plugins import pm, get_plugins, get_model_aliases, get_models_with_aliases
from .plugins import (
pm,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
)
import openai
import os
import pathlib
Expand Down Expand Up @@ -180,7 +186,7 @@ def prompt(

# Figure out which model we are using
if model_id is None:
model_id = history_model or DEFAULT_MODEL
model_id = history_model or get_default_model()

# Now resolve the model
try:
Expand Down Expand Up @@ -255,7 +261,7 @@ def init_db():
All subsequent prompts will be logged to this database.
"""
path = log_db_path()
path = logs_db_path()
if path.exists():
return
# Ensure directory exists
Expand All @@ -276,11 +282,7 @@ def keys_path_command():


def keys_path():
llm_keys_path = os.environ.get("LLM_KEYS_PATH")
if llm_keys_path:
return pathlib.Path(llm_keys_path)
else:
return user_dir() / "keys.json"
return user_dir() / "keys.json"


@keys.command(name="set")
Expand Down Expand Up @@ -321,7 +323,7 @@ def logs():
@logs.command(name="path")
def logs_path():
"Output the path to the logs.db file"
click.echo(log_db_path())
click.echo(logs_db_path())


@logs.command(name="list")
Expand All @@ -340,7 +342,7 @@ def logs_path():
@click.option("-t", "--truncate", is_flag=True, help="Truncate long strings in output")
def logs_list(count, path, truncate):
"Show recent logged prompts and their responses"
path = pathlib.Path(path or log_db_path())
path = pathlib.Path(path or logs_db_path())
if not path.exists():
raise click.ClickException("No log database found at {}".format(path))
db = sqlite_utils.Database(path)
Expand Down Expand Up @@ -369,6 +371,21 @@ def models_list():
click.echo(output)


@models.command(name="default")
@click.argument("model", required=False)
def models_default(model):
"Show or set the default model"
if not model:
click.echo(get_default_model())
return
# Validate it is a known model
try:
model = get_model(model)
set_default_model(model.model_id)
except KeyError:
raise click.ClickException("Unknown model: {}".format(model))


@cli.group()
def templates():
"Manage stored prompt templates"
Expand Down Expand Up @@ -473,11 +490,7 @@ def uninstall(packages, yes):


def template_dir():
llm_templates_path = os.environ.get("LLM_TEMPLATES_PATH")
if llm_templates_path:
path = pathlib.Path(llm_templates_path)
else:
path = user_dir() / "templates"
path = user_dir() / "templates"
path.mkdir(parents=True, exist_ok=True)
return path

Expand Down Expand Up @@ -514,15 +527,27 @@ def load_keys():


def user_dir():
llm_user_path = os.environ.get("LLM_USER_PATH")
if llm_user_path:
return pathlib.Path(llm_user_path)
return pathlib.Path(click.get_app_dir("io.datasette.llm"))


def log_db_path():
llm_log_path = os.environ.get("LLM_LOG_PATH")
if llm_log_path:
return pathlib.Path(llm_log_path)
def get_default_model():
path = user_dir() / "default_model.txt"
if path.exists():
return path.read_text().strip()
else:
return user_dir() / "logs.db"
return DEFAULT_MODEL


def set_default_model(model):
path = user_dir() / "default_model.txt"
path.write_text(model)


def logs_db_path():
return user_dir() / "logs.db"


def log(no_log, system, prompt, response, model, chat_id=None, debug=None, start=None):
Expand All @@ -532,7 +557,7 @@ def log(no_log, system, prompt, response, model, chat_id=None, debug=None, start
duration_ms = int((end - start) * 1000)
if no_log:
return
log_path = log_db_path()
log_path = logs_db_path()
if not log_path.exists():
return
db = sqlite_utils.Database(log_path)
Expand Down Expand Up @@ -574,7 +599,7 @@ def load_template(name):
def get_history(chat_id):
if chat_id is None:
return None, []
log_path = log_db_path()
log_path = logs_db_path()
if not log_path.exists():
raise click.ClickException(
"This feature requires logging. Run `llm init-db` to create logs.db"
Expand Down
12 changes: 12 additions & 0 deletions llm/plugins.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,3 +52,15 @@ def get_model_aliases() -> Dict[str, Model]:
model_aliases[alias] = model_with_aliases.model
model_aliases[model_with_aliases.model.model_id] = model_with_aliases.model
return model_aliases


class UnknownModelError(KeyError):
pass


def get_model(name):
aliases = get_model_aliases()
try:
return aliases[name]
except KeyError:
raise UnknownModelError(name)
25 changes: 10 additions & 15 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,27 +8,22 @@ def pytest_configure(config):


@pytest.fixture
def log_path(tmpdir):
return tmpdir / "logs.db"
def user_path(tmpdir):
dir = tmpdir / "llm.datasette.io"
dir.mkdir()
return dir


@pytest.fixture
def keys_path(tmpdir):
return tmpdir / "keys.json"


@pytest.fixture
def templates_path(tmpdir):
path = tmpdir / "templates"
path.mkdir()
return path
def templates_path(user_path):
dir = user_path / "templates"
dir.mkdir()
return dir


@pytest.fixture(autouse=True)
def env_setup(monkeypatch, log_path, keys_path, templates_path):
monkeypatch.setenv("LLM_KEYS_PATH", str(keys_path))
monkeypatch.setenv("LLM_LOG_PATH", str(log_path))
monkeypatch.setenv("LLM_TEMPLATES_PATH", str(templates_path))
def env_setup(monkeypatch, user_path):
monkeypatch.setenv("LLM_USER_PATH", str(user_path))


@pytest.fixture
Expand Down
21 changes: 12 additions & 9 deletions tests/test_keys.py
Original file line number Diff line number Diff line change
@@ -1,38 +1,41 @@
from click.testing import CliRunner
import json
from llm.cli import cli
import pathlib
import pytest


@pytest.mark.parametrize("env", ({}, {"LLM_KEYS_PATH": "/tmp/foo.json"}))
def test_keys_path(monkeypatch, env, keys_path):
@pytest.mark.parametrize("env", ({}, {"LLM_USER_PATH": "/tmp/llm-keys-test"}))
def test_keys_in_user_path(monkeypatch, env, user_path):
for key, value in env.items():
monkeypatch.setenv(key, value)
runner = CliRunner()
result = runner.invoke(cli, ["keys", "path"])
assert result.exit_code == 0
if env:
expected = env["LLM_KEYS_PATH"]
expected = env["LLM_USER_PATH"] + "/keys.json"
else:
expected = keys_path
expected = user_path + "/keys.json"
assert result.output.strip() == expected


def test_keys_set(monkeypatch, tmpdir):
keys_path = str(tmpdir / "keys.json")
monkeypatch.setenv("LLM_KEYS_PATH", keys_path)
user_path = str(tmpdir / "user/keys")
monkeypatch.setenv("LLM_USER_PATH", user_path)
runner = CliRunner()
result = runner.invoke(cli, ["keys", "set", "openai"], input="foo")
assert result.exit_code == 0
content = open(keys_path).read()
content = open(user_path + "/keys.json").read()
assert json.loads(content) == {
"// Note": "This file stores secret API credentials. Do not share!",
"openai": "foo",
}


def test_uses_correct_key(mocked_openai, monkeypatch, tmpdir):
keys_path = tmpdir / "keys.json"
user_dir = tmpdir / "user-dir"
pathlib.Path(user_dir).mkdir()
keys_path = user_dir / "keys.json"
keys_path.write_text(
json.dumps(
{
Expand All @@ -42,7 +45,7 @@ def test_uses_correct_key(mocked_openai, monkeypatch, tmpdir):
),
"utf-8",
)
monkeypatch.setenv("LLM_KEYS_PATH", str(keys_path))
monkeypatch.setenv("LLM_USER_PATH", str(user_dir))
monkeypatch.setenv("OPENAI_API_KEY", "from-env")

def assert_key(key):
Expand Down
Loading

0 comments on commit 566573c

Please sign in to comment.