Skip to content

Commit

Permalink
get_model() improvement, get_default_model() / set_default_wodel() no…
Browse files Browse the repository at this point in the history
…w documented

Refs #553
  • Loading branch information
simonw committed Aug 19, 2024
1 parent 24cc042 commit 6deed8f
Show file tree
Hide file tree
Showing 4 changed files with 94 additions and 41 deletions.
63 changes: 49 additions & 14 deletions docs/python-api.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,22 +7,25 @@ Understanding this API is also important for writing {ref}`plugins`.

## Basic prompt execution

To run a prompt against the `gpt-3.5-turbo` model, run this:
To run a prompt against the `gpt-4o-mini` model, run this:

```python
import llm

model = llm.get_model("gpt-3.5-turbo")
model.key = 'YOUR_API_KEY_HERE'
model = llm.get_model("gpt-4o-mini")
# Optional, you can configure the key in other ways:
model.key = "sk-..."
response = model.prompt("Five surprising names for a pet pelican")
print(response.text())
```
The `llm.get_model()` function accepts model names or aliases - so `chatgpt` would work here too.
The `llm.get_model()` function accepts model names or aliases. You can also omit it to use the currently configured default model, which is `gpt-4o-mini` if you have not changed the default.

In this example the key is set by Python code. You can also provide the key using the `OPENAI_API_KEY` environment variable, or use the `llm keys set openai` command to store it in a `keys.json` file, see {ref}`api-keys`.

The `__str__()` method of `response` also returns the text of the response, so you can do this instead:

```python
print(response)
print(llm.get_model().prompt("Five surprising names for a pet pelican"))
```

You can run this command to see a list of available models and their aliases:
Expand Down Expand Up @@ -52,27 +55,28 @@ response = model.prompt(
For models that support options (view those with `llm models --options`) you can pass options as keyword arguments to the `.prompt()` method:

```python
model = llm.get_model("gpt-3.5-turbo")
model.key = "... key here ..."
model = llm.get_model()
print(model.prompt("Names for otters", temperature=0.2))
```

### Models from plugins

Any models you have installed as plugins will also be available through this mechanism, for example to use Google's PaLM 2 model with [llm-palm](https://github.com/simonw/llm-palm)
Any models you have installed as plugins will also be available through this mechanism, for example to use Anthropic's Claude 3.5 Sonnet model with [llm-claude-3](https://github.com/simonw/llm-claude-3):

```bash
pip install llm-palm
pip install llm-claude-3
```
Then in your Python code:
```python
import llm

model = llm.get_model("palm")
model = llm.get_model("claude-3.5-sonnet")
# Use this if you have not set the key using 'llm keys set claude':
model.key = 'YOUR_API_KEY_HERE'
response = model.prompt("Five surprising names for a pet pelican")
print(response.text())
```
You can omit the `model.key = ` line for models that do not use an API key
Some models do not use API keys at all.

## Streaming responses

Expand All @@ -94,8 +98,7 @@ LLM supports *conversations*, where you ask follow-up questions of a model as pa
To start a new conversation, use the `model.conversation()` method:

```python
model = llm.get_model("gpt-3.5-turbo")
model.key = 'YOUR_API_KEY_HERE'
model = llm.get_model()
conversation = model.conversation()
```
You can then use the `conversation.prompt()` method to execute prompts against this conversation:
Expand Down Expand Up @@ -124,7 +127,7 @@ The `llm.set_alias()` function can be used to define a new alias:
```python
import llm

llm.set_alias("turbo", "gpt-3.5-turbo")
llm.set_alias("mini", "gpt-4o-mini")
```
The second argument can be a model identifier or another alias, in which case that alias will be resolved.

Expand All @@ -141,3 +144,35 @@ import llm

llm.remove_alias("turbo")
```

### set_default_model(alias)

This sets the default model to the given model ID or alias. Any changes to defaults will be persisted in the LLM configuration folder, and will affect all programs using LLM on the system, including the `llm` CLI tool.

```python
import llm

llm.set_default_model("claude-3.5-sonnet")
```

### get_default_model()

This returns the currently configured default model, or `gpt-4o-mini` if no default has been set.

```python
import llm

model_id = llm.get_default_model()
```

To detect if no default has been set you can use this pattern:

```python
if llm.get_default_model(default=None) is None:
print("No default has been set")
```
Here the `default=` parameter specifies the value that should be returned if there is no configured default.

### set_default_embedding_model(alias) and get_default_embedding_model()

These two methods work the same as `set_default_model()` and `get_default_model()` but for the default {ref}`embedding model <embeddings>` instead.
28 changes: 27 additions & 1 deletion llm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
"ModelError",
"NeedsKeyException",
]
DEFAULT_MODEL = "gpt-4o-mini"


def get_plugins(all=False):
Expand Down Expand Up @@ -144,8 +145,9 @@ class UnknownModelError(KeyError):
pass


def get_model(name):
def get_model(name: Optional[str] = None) -> Model:
aliases = get_model_aliases()
name = name or get_default_model()
try:
return aliases[name]
except KeyError:
Expand Down Expand Up @@ -256,3 +258,27 @@ def cosine_similarity(a, b):
magnitude_a = sum(x * x for x in a) ** 0.5
magnitude_b = sum(x * x for x in b) ** 0.5
return dot_product / (magnitude_a * magnitude_b)


def get_default_model(filename="default_model.txt", default=DEFAULT_MODEL):
path = user_dir() / filename
if path.exists():
return path.read_text().strip()
else:
return default


def set_default_model(model, filename="default_model.txt"):
path = user_dir() / filename
if model is None and path.exists():
path.unlink()
else:
path.write_text(model)


def get_default_embedding_model():
return get_default_model("default_embedding_model.txt", None)


def set_default_embedding_model(model):
set_default_model(model, "default_embedding_model.txt")
30 changes: 4 additions & 26 deletions llm/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
Template,
UnknownModelError,
encode,
get_default_model,
get_default_embedding_model,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
Expand All @@ -20,6 +22,8 @@
get_models_with_aliases,
user_dir,
set_alias,
set_default_model,
set_default_embedding_model,
remove_alias,
)

Expand All @@ -41,8 +45,6 @@

warnings.simplefilter("ignore", ResourceWarning)

DEFAULT_MODEL = "gpt-4o-mini"

DEFAULT_TEMPLATE = "prompt: "


Expand Down Expand Up @@ -1574,30 +1576,6 @@ def _truncate_string(s, max_length=100):
return s


def get_default_model(filename="default_model.txt", default=DEFAULT_MODEL):
path = user_dir() / filename
if path.exists():
return path.read_text().strip()
else:
return default


def set_default_model(model, filename="default_model.txt"):
path = user_dir() / filename
if model is None and path.exists():
path.unlink()
else:
path.write_text(model)


def get_default_embedding_model():
return get_default_model("default_embedding_model.txt", None)


def set_default_embedding_model(model):
set_default_model(model, "default_embedding_model.txt")


def logs_db_path():
return user_dir() / "logs.db"

Expand Down
14 changes: 14 additions & 0 deletions tests/test_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from llm.migrations import migrate
import json
import os
import pathlib
import pytest
import re
import sqlite_utils
Expand Down Expand Up @@ -556,3 +557,16 @@ def test_llm_user_dir(tmpdir, monkeypatch):
user_dir2 = llm.user_dir()
assert user_dir == str(user_dir2)
assert os.path.exists(user_dir)


def test_model_defaults(tmpdir, monkeypatch):
user_dir = str(tmpdir / "u")
monkeypatch.setenv("LLM_USER_PATH", user_dir)
config_path = pathlib.Path(user_dir) / "default_model.txt"
assert not config_path.exists()
assert llm.get_default_model() == "gpt-4o-mini"
assert llm.get_model().model_id == "gpt-4o-mini"
llm.set_default_model("gpt-4o")
assert config_path.exists()
assert llm.get_default_model() == "gpt-4o"
assert llm.get_model().model_id == "gpt-4o"

0 comments on commit 6deed8f

Please sign in to comment.