diff --git a/Makefile b/Makefile index ae161dc5..2b31c77c 100644 --- a/Makefile +++ b/Makefile @@ -160,4 +160,6 @@ cloc-total: cloc ${SRCFILES} --by-file bench-importtime: - time poetry run python -X importtime -m gptme --model openrouter --non-interactive 2>&1 | grep "import time" | cut -d'|' -f 2- | sort -n + time poetry run python -X importtime -m gptme --model openai --non-interactive 2>&1 | grep "import time" | cut -d'|' -f 2- | sort -n + @#time poetry run python -X importtime -m gptme --model openrouter --non-interactive 2>&1 | grep "import time" | cut -d'|' -f 2- | sort -n + @#time poetry run python -X importtime -m gptme --model anthropic --non-interactive 2>&1 | grep "import time" | cut -d'|' -f 2- | sort -n diff --git a/gptme/llm/llm_anthropic.py b/gptme/llm/llm_anthropic.py index 48abc213..d9610faa 100644 --- a/gptme/llm/llm_anthropic.py +++ b/gptme/llm/llm_anthropic.py @@ -10,9 +10,6 @@ cast, ) -import anthropic.types -from anthropic import NOT_GIVEN -from anthropic.types.beta.prompt_caching import PromptCachingBetaToolParam from typing_extensions import Required from ..constants import TEMPERATURE, TOP_P @@ -23,6 +20,9 @@ if TYPE_CHECKING: + # noreorder + import anthropic # fmt: skip + import anthropic.types.beta.prompt_caching # fmt: skip from anthropic import Anthropic # fmt: skip _anthropic: "Anthropic | None" = None @@ -59,6 +59,8 @@ class MessagePart(TypedDict, total=False): def chat(messages: list[Message], model: str, tools: list[ToolSpec] | None) -> str: + from anthropic import NOT_GIVEN # fmt: skip + assert _anthropic, "LLM not initialized" messages, system_messages = _transform_system_messages(messages) @@ -84,6 +86,9 @@ def chat(messages: list[Message], model: str, tools: list[ToolSpec] | None) -> s def stream( messages: list[Message], model: str, tools: list[ToolSpec] | None ) -> Generator[str, None, None]: + import anthropic.types # fmt: skip + from anthropic import NOT_GIVEN # fmt: skip + assert _anthropic, "LLM not initialized" messages, system_messages = _transform_system_messages(messages) @@ -260,7 +265,9 @@ def parameters2dict(parameters: list[Parameter]) -> dict[str, object]: } -def _spec2tool(spec: ToolSpec) -> "PromptCachingBetaToolParam": +def _spec2tool( + spec: ToolSpec, +) -> "anthropic.types.beta.prompt_caching.PromptCachingBetaToolParam": name = spec.name if spec.block_types: name = spec.block_types[0] diff --git a/gptme/llm/llm_openai.py b/gptme/llm/llm_openai.py index 9452b6ca..55849c71 100644 --- a/gptme/llm/llm_openai.py +++ b/gptme/llm/llm_openai.py @@ -3,21 +3,23 @@ from collections.abc import Generator from pathlib import Path from typing import TYPE_CHECKING, Any, cast -from openai.types.chat import ChatCompletionChunk, ChatCompletionToolParam -from openai.types.chat.chat_completion_chunk import ( - ChoiceDeltaToolCall, - ChoiceDeltaToolCallFunction, -) - -from ..tools.base import ToolSpec, Parameter from ..config import Config from ..constants import TEMPERATURE, TOP_P from ..message import Message, msgs2dicts +from ..tools.base import Parameter, ToolSpec from .models import Provider, get_model if TYPE_CHECKING: - from openai import OpenAI + # noreorder + from openai import OpenAI # fmt: skip + from openai.types.chat import ( # fmt: skip + ChatCompletionToolParam, + ) + from openai.types.chat.chat_completion_chunk import ( # fmt: skip + ChoiceDeltaToolCall, + ChoiceDeltaToolCallFunction, + ) openai: "OpenAI | None" = None logger = logging.getLogger(__name__) @@ -174,6 +176,8 @@ def stream( ), ): # Cast the chunk to the correct type + from openai.types.chat import ChatCompletionChunk # fmt: skip + chunk = cast(ChatCompletionChunk, chunk_raw) if not chunk.choices: @@ -293,7 +297,7 @@ def parameters2dict(parameters: list[Parameter]) -> dict[str, object]: } -def _spec2tool(spec: ToolSpec) -> ChatCompletionToolParam: +def _spec2tool(spec: ToolSpec) -> "ChatCompletionToolParam": name = spec.name if spec.block_types: name = spec.block_types[0]