Skip to content

Commit

Permalink
fix: fixed incorrectly always using gpt-4 model metadata when reducing
Browse files Browse the repository at this point in the history
  • Loading branch information
ErikBjare committed Dec 11, 2024
1 parent c05f9c6 commit 2343ab6
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 2 deletions.
4 changes: 4 additions & 0 deletions gptme/llm/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,10 @@ class _ModelDictMeta(TypedDict):
}


def get_default_model() -> ModelMeta | None:
return DEFAULT_MODEL


def set_default_model(model: str) -> None:
modelmeta = get_model(model)
assert modelmeta
Expand Down
4 changes: 2 additions & 2 deletions gptme/util/reduce.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from collections.abc import Generator

from ..codeblock import Codeblock
from ..llm.models import DEFAULT_MODEL, get_model
from ..llm.models import get_default_model, get_model
from ..message import Message, len_tokens

logger = logging.getLogger(__name__)
Expand All @@ -21,7 +21,7 @@ def reduce_log(
) -> Generator[Message, None, None]:
"""Reduces log until it is below `limit` tokens by continually summarizing the longest messages until below the limit."""
# get the token limit
model = DEFAULT_MODEL or get_model("gpt-4")
model = get_default_model() or get_model("gpt-4")
if limit is None:
limit = 0.9 * model.context

Expand Down

0 comments on commit 2343ab6

Please sign in to comment.