diff --git a/libs/core/langchain_core/_api/beta_decorator.py b/libs/core/langchain_core/_api/beta_decorator.py index 4d6810dbec3473..117242bb5f8932 100644 --- a/libs/core/langchain_core/_api/beta_decorator.py +++ b/libs/core/langchain_core/_api/beta_decorator.py @@ -50,7 +50,7 @@ def beta( ``@beta`` would mess up ``__init__`` inheritance when installing its own (annotation-emitting) ``C.__init__``). - Arguments: + Args: message : str, optional Override the default beta message. The %(since)s, %(name)s, %(alternative)s, %(obj_type)s, %(addendum)s, @@ -63,8 +63,7 @@ def beta( addendum : str, optional Additional text appended directly to the final message. - Examples - -------- + Examples: .. code-block:: python diff --git a/libs/core/langchain_core/_api/deprecation.py b/libs/core/langchain_core/_api/deprecation.py index 0e254053d88199..6c9f94b1dd5646 100644 --- a/libs/core/langchain_core/_api/deprecation.py +++ b/libs/core/langchain_core/_api/deprecation.py @@ -95,7 +95,7 @@ def deprecated( defaults to 'class' if decorating a class, 'attribute' if decorating a property, and 'function' otherwise. - Arguments: + Args: since : str The release at which this API became deprecated. message : str, optional @@ -122,8 +122,7 @@ def deprecated( since. Set to other Falsy values to not schedule a removal date. Cannot be used together with pending. - Examples - -------- + Examples: .. code-block:: python @@ -183,7 +182,6 @@ def warning_emitting_wrapper(*args: Any, **kwargs: Any) -> Any: async def awarning_emitting_wrapper(*args: Any, **kwargs: Any) -> Any: """Same as warning_emitting_wrapper, but for async functions.""" - nonlocal warned if not warned and not is_caller_internal(): warned = True diff --git a/libs/core/langchain_core/agents.py b/libs/core/langchain_core/agents.py index e49a19af2ae578..3a7fea6d328c5b 100644 --- a/libs/core/langchain_core/agents.py +++ b/libs/core/langchain_core/agents.py @@ -74,7 +74,8 @@ def is_lc_serializable(cls) -> bool: @classmethod def get_lc_namespace(cls) -> list[str]: """Get the namespace of the langchain object. - Default is ["langchain", "schema", "agent"].""" + Default is ["langchain", "schema", "agent"]. + """ return ["langchain", "schema", "agent"] @property @@ -189,7 +190,6 @@ def _convert_agent_observation_to_messages( Returns: AIMessage that corresponds to the original tool invocation. """ - if isinstance(agent_action, AgentActionMessageLog): return [_create_function_message(agent_action, observation)] else: diff --git a/libs/core/langchain_core/beta/runnables/context.py b/libs/core/langchain_core/beta/runnables/context.py index a53e8fdf579690..2be721387cbcd0 100644 --- a/libs/core/langchain_core/beta/runnables/context.py +++ b/libs/core/langchain_core/beta/runnables/context.py @@ -307,8 +307,7 @@ async def ainvoke( class Context: - """ - Context for a runnable. + """Context for a runnable. The `Context` class provides methods for creating context scopes, getters, and setters within a runnable. It allows for managing diff --git a/libs/core/langchain_core/callbacks/base.py b/libs/core/langchain_core/callbacks/base.py index b3a61aa9a2a205..98fd824ec182d2 100644 --- a/libs/core/langchain_core/callbacks/base.py +++ b/libs/core/langchain_core/callbacks/base.py @@ -131,7 +131,8 @@ def on_chain_end( outputs (Dict[str, Any]): The outputs of the chain. run_id (UUID): The run ID. This is the ID of the current run. parent_run_id (UUID): The parent run ID. This is the ID of the parent run. - kwargs (Any): Additional keyword arguments.""" + kwargs (Any): Additional keyword arguments. + """ def on_chain_error( self, @@ -147,7 +148,8 @@ def on_chain_error( error (BaseException): The error that occurred. run_id (UUID): The run ID. This is the ID of the current run. parent_run_id (UUID): The parent run ID. This is the ID of the parent run. - kwargs (Any): Additional keyword arguments.""" + kwargs (Any): Additional keyword arguments. + """ def on_agent_action( self, @@ -163,7 +165,8 @@ def on_agent_action( action (AgentAction): The agent action. run_id (UUID): The run ID. This is the ID of the current run. parent_run_id (UUID): The parent run ID. This is the ID of the parent run. - kwargs (Any): Additional keyword arguments.""" + kwargs (Any): Additional keyword arguments. + """ def on_agent_finish( self, @@ -179,7 +182,8 @@ def on_agent_finish( finish (AgentFinish): The agent finish. run_id (UUID): The run ID. This is the ID of the current run. parent_run_id (UUID): The parent run ID. This is the ID of the parent run. - kwargs (Any): Additional keyword arguments.""" + kwargs (Any): Additional keyword arguments. + """ class ToolManagerMixin: @@ -199,7 +203,8 @@ def on_tool_end( output (Any): The output of the tool. run_id (UUID): The run ID. This is the ID of the current run. parent_run_id (UUID): The parent run ID. This is the ID of the parent run. - kwargs (Any): Additional keyword arguments.""" + kwargs (Any): Additional keyword arguments. + """ def on_tool_error( self, @@ -215,7 +220,8 @@ def on_tool_error( error (BaseException): The error that occurred. run_id (UUID): The run ID. This is the ID of the current run. parent_run_id (UUID): The parent run ID. This is the ID of the parent run. - kwargs (Any): Additional keyword arguments.""" + kwargs (Any): Additional keyword arguments. + """ class CallbackManagerMixin: @@ -824,7 +830,8 @@ async def on_retriever_end( run_id (UUID): The run ID. This is the ID of the current run. parent_run_id (UUID): The parent run ID. This is the ID of the parent run. tags (Optional[List[str]]): The tags. - kwargs (Any): Additional keyword arguments.""" + kwargs (Any): Additional keyword arguments. + """ async def on_retriever_error( self, diff --git a/libs/core/langchain_core/callbacks/manager.py b/libs/core/langchain_core/callbacks/manager.py index cabe04b6a94d11..29591a8c285b77 100644 --- a/libs/core/langchain_core/callbacks/manager.py +++ b/libs/core/langchain_core/callbacks/manager.py @@ -164,6 +164,7 @@ async def atrace_as_chain_group( Defaults to None. metadata (Dict[str, Any], optional): The metadata to apply to all runs. Defaults to None. + Returns: AsyncCallbackManager: The async callback manager for the chain group. @@ -216,8 +217,7 @@ async def atrace_as_chain_group( def shielded(func: Func) -> Func: - """ - Makes so an awaitable method is always shielded from cancellation. + """Makes so an awaitable method is always shielded from cancellation. Args: func (Callable): The function to shield. @@ -1310,7 +1310,6 @@ def on_chat_model_start( List[CallbackManagerForLLMRun]: A callback manager for each list of messages as an LLM run. """ - managers = [] for message_list in messages: if run_id is not None: @@ -1729,7 +1728,6 @@ async def on_llm_start( callback managers, one for each LLM Run corresponding to each prompt. """ - inline_tasks = [] non_inline_tasks = [] inline_handlers = [handler for handler in self.handlers if handler.run_inline] diff --git a/libs/core/langchain_core/chat_history.py b/libs/core/langchain_core/chat_history.py index 4a579b4a985d21..1b5c1e7f782742 100644 --- a/libs/core/langchain_core/chat_history.py +++ b/libs/core/langchain_core/chat_history.py @@ -1,6 +1,5 @@ """**Chat message history** stores a history of the message interactions in a chat. - **Class hierarchy:** .. code-block:: @@ -187,10 +186,10 @@ async def aadd_messages(self, messages: Sequence[BaseMessage]) -> None: @abstractmethod def clear(self) -> None: - """Remove all messages from the store""" + """Remove all messages from the store.""" async def aclear(self) -> None: - """Async remove all messages from the store""" + """Async remove all messages from the store.""" from langchain_core.runnables.config import run_in_executor await run_in_executor(None, self.clear) diff --git a/libs/core/langchain_core/chat_sessions.py b/libs/core/langchain_core/chat_sessions.py index ededbc3155e80a..6cbc19fd1061fd 100644 --- a/libs/core/langchain_core/chat_sessions.py +++ b/libs/core/langchain_core/chat_sessions.py @@ -8,7 +8,8 @@ class ChatSession(TypedDict, total=False): """Chat Session represents a single - conversation, channel, or other group of messages.""" + conversation, channel, or other group of messages. + """ messages: Sequence[BaseMessage] """A sequence of the LangChain chat messages loaded from the source.""" diff --git a/libs/core/langchain_core/document_loaders/base.py b/libs/core/langchain_core/document_loaders/base.py index 2f458731916ce6..b2cd20038eb345 100644 --- a/libs/core/langchain_core/document_loaders/base.py +++ b/libs/core/langchain_core/document_loaders/base.py @@ -48,7 +48,6 @@ def load_and_split( Returns: List of Documents. """ - if text_splitter is None: try: from langchain_text_splitters import RecursiveCharacterTextSplitter diff --git a/libs/core/langchain_core/example_selectors/base.py b/libs/core/langchain_core/example_selectors/base.py index a70c680e83630a..e344e80551f577 100644 --- a/libs/core/langchain_core/example_selectors/base.py +++ b/libs/core/langchain_core/example_selectors/base.py @@ -15,15 +15,16 @@ def add_example(self, example: dict[str, str]) -> Any: Args: example: A dictionary with keys as input variables - and values as their values.""" + and values as their values. + """ async def aadd_example(self, example: dict[str, str]) -> Any: """Async add new example to store. Args: example: A dictionary with keys as input variables - and values as their values.""" - + and values as their values. + """ return await run_in_executor(None, self.add_example, example) @abstractmethod @@ -32,13 +33,14 @@ def select_examples(self, input_variables: dict[str, str]) -> list[dict]: Args: input_variables: A dictionary with keys as input variables - and values as their values.""" + and values as their values. + """ async def aselect_examples(self, input_variables: dict[str, str]) -> list[dict]: """Async select which examples to use based on the inputs. Args: input_variables: A dictionary with keys as input variables - and values as their values.""" - + and values as their values. + """ return await run_in_executor(None, self.select_examples, input_variables) diff --git a/libs/core/langchain_core/example_selectors/length_based.py b/libs/core/langchain_core/example_selectors/length_based.py index e393e2a1dee62c..792e6317cfd8c8 100644 --- a/libs/core/langchain_core/example_selectors/length_based.py +++ b/libs/core/langchain_core/example_selectors/length_based.py @@ -50,7 +50,6 @@ async def aadd_example(self, example: dict[str, str]) -> None: example: A dictionary with keys as input variables and values as their values. """ - self.add_example(example) @model_validator(mode="after") diff --git a/libs/core/langchain_core/indexing/api.py b/libs/core/langchain_core/indexing/api.py index 3a8d859379fd8d..11343d17f71848 100644 --- a/libs/core/langchain_core/indexing/api.py +++ b/libs/core/langchain_core/indexing/api.py @@ -241,7 +241,7 @@ def index( For the time being, documents are indexed using their hashes, and users are not able to specify the uid of the document. - IMPORTANT: + Important: * In full mode, the loader should be returning the entire dataset, and not just a subset of the dataset. Otherwise, the auto_cleanup will remove documents that it is not @@ -546,7 +546,7 @@ async def aindex( For the time being, documents are indexed using their hashes, and users are not able to specify the uid of the document. - IMPORTANT: + Important: * In full mode, the loader should be returning the entire dataset, and not just a subset of the dataset. Otherwise, the auto_cleanup will remove documents that it is not @@ -614,7 +614,6 @@ async def aindex( * Added `scoped_full` cleanup mode. """ - if cleanup not in {"incremental", "full", "scoped_full", None}: msg = ( f"cleanup should be one of 'incremental', 'full', 'scoped_full' or None. " diff --git a/libs/core/langchain_core/indexing/base.py b/libs/core/langchain_core/indexing/base.py index d9418de7408d50..d2a7d09e58a16b 100644 --- a/libs/core/langchain_core/indexing/base.py +++ b/libs/core/langchain_core/indexing/base.py @@ -288,7 +288,6 @@ def update( ids. ValueError: If time_at_least is in the future. """ - if group_ids and len(keys) != len(group_ids): msg = "Length of keys must match length of group_ids" raise ValueError(msg) diff --git a/libs/core/langchain_core/language_models/chat_models.py b/libs/core/langchain_core/language_models/chat_models.py index 0cfaf434a3ac0a..f3d94e49f01a34 100644 --- a/libs/core/langchain_core/language_models/chat_models.py +++ b/libs/core/langchain_core/language_models/chat_models.py @@ -84,7 +84,6 @@ def generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult: Returns: ChatResult: Chat result. """ - generation = next(stream, None) if generation: generation += list(stream) @@ -112,7 +111,6 @@ async def agenerate_from_stream( Returns: ChatResult: Chat result. """ - chunks = [chunk async for chunk in stream] return await run_in_executor(None, generate_from_stream, iter(chunks)) @@ -521,7 +519,6 @@ def _get_ls_params( **kwargs: Any, ) -> LangSmithParams: """Get standard params for tracing.""" - # get default provider from class name default_provider = self.__class__.__name__ if default_provider.startswith("Chat"): @@ -955,7 +952,7 @@ def _generate( run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: - """Top Level call""" + """Top Level call.""" async def _agenerate( self, @@ -964,7 +961,7 @@ async def _agenerate( run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: - """Top Level call""" + """Top Level call.""" return await run_in_executor( None, self._generate, diff --git a/libs/core/langchain_core/language_models/fake.py b/libs/core/langchain_core/language_models/fake.py index 74545f3eca9291..64bb637068daba 100644 --- a/libs/core/langchain_core/language_models/fake.py +++ b/libs/core/langchain_core/language_models/fake.py @@ -42,7 +42,7 @@ def _call( run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: - """Return next response""" + """Return next response.""" response = self.responses[self.i] if self.i < len(self.responses) - 1: self.i += 1 @@ -57,7 +57,7 @@ async def _acall( run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: - """Return next response""" + """Return next response.""" response = self.responses[self.i] if self.i < len(self.responses) - 1: self.i += 1 diff --git a/libs/core/langchain_core/language_models/fake_chat_models.py b/libs/core/langchain_core/language_models/fake_chat_models.py index 2f3c74a4f5c4e2..a77a12d9d10b94 100644 --- a/libs/core/langchain_core/language_models/fake_chat_models.py +++ b/libs/core/langchain_core/language_models/fake_chat_models.py @@ -220,7 +220,7 @@ def _generate( run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: - """Top Level call""" + """Top Level call.""" message = next(self.messages) message_ = AIMessage(content=message) if isinstance(message, str) else message generation = ChatGeneration(message=message_) @@ -342,7 +342,7 @@ def _generate( run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: - """Top Level call""" + """Top Level call.""" return ChatResult(generations=[ChatGeneration(message=messages[-1])]) @property diff --git a/libs/core/langchain_core/language_models/llms.py b/libs/core/langchain_core/language_models/llms.py index 72073852101296..4e077b277be145 100644 --- a/libs/core/langchain_core/language_models/llms.py +++ b/libs/core/langchain_core/language_models/llms.py @@ -91,7 +91,6 @@ def create_base_retry_decorator( Raises: ValueError: If the cache is not set and cache is True. """ - _logging = before_sleep_log(logger, logging.WARNING) def _before_sleep(retry_state: RetryCallState) -> None: @@ -278,7 +277,6 @@ async def aupdate_cache( Raises: ValueError: If the cache is not set and cache is True. """ - llm_cache = _resolve_cache(cache) for i, result in enumerate(new_results.generations): existing_prompts[missing_prompt_idxs[i]] = result @@ -292,7 +290,8 @@ async def aupdate_cache( class BaseLLM(BaseLanguageModel[str], ABC): """Base LLM abstract interface. - It should take in a prompt and return a string.""" + It should take in a prompt and return a string. + """ callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) """[DEPRECATED]""" @@ -346,7 +345,6 @@ def _get_ls_params( **kwargs: Any, ) -> LangSmithParams: """Get standard params for tracing.""" - # get default provider from class name default_provider = self.__class__.__name__ if default_provider.endswith("LLM"): diff --git a/libs/core/langchain_core/load/mapping.py b/libs/core/langchain_core/load/mapping.py index d3863e65759ead..ca88faac28d615 100644 --- a/libs/core/langchain_core/load/mapping.py +++ b/libs/core/langchain_core/load/mapping.py @@ -1,5 +1,4 @@ -""" -This file contains a mapping between the lc_namespace path for a given +"""This file contains a mapping between the lc_namespace path for a given subclass that implements from Serializable to the namespace where that class is actually located. diff --git a/libs/core/langchain_core/messages/function.py b/libs/core/langchain_core/messages/function.py index 080f5807788199..2cc346de81230e 100644 --- a/libs/core/langchain_core/messages/function.py +++ b/libs/core/langchain_core/messages/function.py @@ -28,7 +28,8 @@ class FunctionMessage(BaseMessage): @classmethod def get_lc_namespace(cls) -> list[str]: """Get the namespace of the langchain object. - Default is ["langchain", "schema", "messages"].""" + Default is ["langchain", "schema", "messages"]. + """ return ["langchain", "schema", "messages"] @@ -48,7 +49,8 @@ class FunctionMessageChunk(FunctionMessage, BaseMessageChunk): @classmethod def get_lc_namespace(cls) -> list[str]: """Get the namespace of the langchain object. - Default is ["langchain", "schema", "messages"].""" + Default is ["langchain", "schema", "messages"]. + """ return ["langchain", "schema", "messages"] def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore diff --git a/libs/core/langchain_core/messages/human.py b/libs/core/langchain_core/messages/human.py index 8a847e39329d04..825e62af4bcf9e 100644 --- a/libs/core/langchain_core/messages/human.py +++ b/libs/core/langchain_core/messages/human.py @@ -41,7 +41,8 @@ class HumanMessage(BaseMessage): @classmethod def get_lc_namespace(cls) -> list[str]: """Get the namespace of the langchain object. - Default is ["langchain", "schema", "messages"].""" + Default is ["langchain", "schema", "messages"]. + """ return ["langchain", "schema", "messages"] def __init__( @@ -72,5 +73,6 @@ class HumanMessageChunk(HumanMessage, BaseMessageChunk): @classmethod def get_lc_namespace(cls) -> list[str]: """Get the namespace of the langchain object. - Default is ["langchain", "schema", "messages"].""" + Default is ["langchain", "schema", "messages"]. + """ return ["langchain", "schema", "messages"] diff --git a/libs/core/langchain_core/messages/modifier.py b/libs/core/langchain_core/messages/modifier.py index a7face34fe95a6..89d33332e637d7 100644 --- a/libs/core/langchain_core/messages/modifier.py +++ b/libs/core/langchain_core/messages/modifier.py @@ -28,7 +28,8 @@ def __init__(self, id: str, **kwargs: Any) -> None: @classmethod def get_lc_namespace(cls) -> list[str]: """Get the namespace of the langchain object. - Default is ["langchain", "schema", "messages"].""" + Default is ["langchain", "schema", "messages"]. + """ return ["langchain", "schema", "messages"] diff --git a/libs/core/langchain_core/messages/system.py b/libs/core/langchain_core/messages/system.py index a767fc21af2ad7..bb04d1a93b28d4 100644 --- a/libs/core/langchain_core/messages/system.py +++ b/libs/core/langchain_core/messages/system.py @@ -35,7 +35,8 @@ class SystemMessage(BaseMessage): @classmethod def get_lc_namespace(cls) -> list[str]: """Get the namespace of the langchain object. - Default is ["langchain", "schema", "messages"].""" + Default is ["langchain", "schema", "messages"]. + """ return ["langchain", "schema", "messages"] def __init__( @@ -66,5 +67,6 @@ class SystemMessageChunk(SystemMessage, BaseMessageChunk): @classmethod def get_lc_namespace(cls) -> list[str]: """Get the namespace of the langchain object. - Default is ["langchain", "schema", "messages"].""" + Default is ["langchain", "schema", "messages"]. + """ return ["langchain", "schema", "messages"] diff --git a/libs/core/langchain_core/messages/tool.py b/libs/core/langchain_core/messages/tool.py index 873f872cef2685..5c14ae045af3ed 100644 --- a/libs/core/langchain_core/messages/tool.py +++ b/libs/core/langchain_core/messages/tool.py @@ -89,7 +89,8 @@ class ToolMessage(BaseMessage, ToolOutputMixin): @classmethod def get_lc_namespace(cls) -> list[str]: """Get the namespace of the langchain object. - Default is ["langchain", "schema", "messages"].""" + Default is ["langchain", "schema", "messages"]. + """ return ["langchain", "schema", "messages"] @model_validator(mode="before") diff --git a/libs/core/langchain_core/messages/utils.py b/libs/core/langchain_core/messages/utils.py index 435bd1e2f1e072..b3735383b17465 100644 --- a/libs/core/langchain_core/messages/utils.py +++ b/libs/core/langchain_core/messages/utils.py @@ -817,7 +817,6 @@ def dummy_token_counter(messages: list[BaseMessage]) -> int: AIMessage( [{"type": "text", "text": "This is the FIRST 4 token block."}], id="second"), ] """ # noqa: E501 - if start_on and strategy == "first": raise ValueError if include_system and strategy == "first": diff --git a/libs/core/langchain_core/output_parsers/list.py b/libs/core/langchain_core/output_parsers/list.py index ebaca8f8ca94f8..06c015f2931146 100644 --- a/libs/core/langchain_core/output_parsers/list.py +++ b/libs/core/langchain_core/output_parsers/list.py @@ -46,8 +46,8 @@ def parse(self, text: str) -> list[str]: Args: text: The output of an LLM call. - Returns: - A list of strings. + Returns: + A list of strings. """ def parse_iter(self, text: str) -> Iterator[re.Match]: @@ -135,7 +135,9 @@ class CommaSeparatedListOutputParser(ListOutputParser): @classmethod def is_lc_serializable(cls) -> bool: """Check if the langchain object is serializable. - Returns True.""" + + Returns True. + """ return True @classmethod diff --git a/libs/core/langchain_core/output_parsers/openai_functions.py b/libs/core/langchain_core/output_parsers/openai_functions.py index 66b8865a1ea2bd..118090bc69bb28 100644 --- a/libs/core/langchain_core/output_parsers/openai_functions.py +++ b/libs/core/langchain_core/output_parsers/openai_functions.py @@ -84,7 +84,6 @@ def parse_result(self, result: list[Generation], *, partial: bool = False) -> An Raises: OutputParserException: If the output is not valid JSON. """ - if len(result) != 1: msg = f"Expected exactly one result, but got {len(result)}" raise OutputParserException(msg) @@ -189,7 +188,6 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser): the provided schema. Example: - ... code-block:: python message = AIMessage( diff --git a/libs/core/langchain_core/output_parsers/openai_tools.py b/libs/core/langchain_core/output_parsers/openai_tools.py index 2944eff4223ebf..9b05ed813fb34c 100644 --- a/libs/core/langchain_core/output_parsers/openai_tools.py +++ b/libs/core/langchain_core/output_parsers/openai_tools.py @@ -168,7 +168,6 @@ def parse_result(self, result: list[Generation], *, partial: bool = False) -> An Raises: OutputParserException: If the output is not valid JSON. """ - generation = result[0] if not isinstance(generation, ChatGeneration): msg = "This output parser can only be used with a chat generation." diff --git a/libs/core/langchain_core/prompt_values.py b/libs/core/langchain_core/prompt_values.py index d8b19fcf23d7b4..c3092d92453691 100644 --- a/libs/core/langchain_core/prompt_values.py +++ b/libs/core/langchain_core/prompt_values.py @@ -129,7 +129,8 @@ def to_messages(self) -> list[BaseMessage]: class ChatPromptValueConcrete(ChatPromptValue): """Chat prompt value which explicitly lists out the message types it accepts. - For use in external schemas.""" + For use in external schemas. + """ messages: Sequence[AnyMessage] """Sequence of messages.""" diff --git a/libs/core/langchain_core/prompts/base.py b/libs/core/langchain_core/prompts/base.py index 803302f1898d01..bb85416d3ffa4e 100644 --- a/libs/core/langchain_core/prompts/base.py +++ b/libs/core/langchain_core/prompts/base.py @@ -98,13 +98,15 @@ def validate_variable_names(self) -> Self: @classmethod def get_lc_namespace(cls) -> list[str]: """Get the namespace of the langchain object. - Returns ["langchain", "schema", "prompt_template"].""" + Returns ["langchain", "schema", "prompt_template"]. + """ return ["langchain", "schema", "prompt_template"] @classmethod def is_lc_serializable(cls) -> bool: """Return whether this class is serializable. - Returns True.""" + Returns True. + """ return True model_config = ConfigDict( diff --git a/libs/core/langchain_core/prompts/chat.py b/libs/core/langchain_core/prompts/chat.py index 1629962ba13330..27a8755f1b4eac 100644 --- a/libs/core/langchain_core/prompts/chat.py +++ b/libs/core/langchain_core/prompts/chat.py @@ -54,7 +54,8 @@ class BaseMessagePromptTemplate(Serializable, ABC): @classmethod def is_lc_serializable(cls) -> bool: """Return whether or not the class is serializable. - Returns: True""" + Returns: True. + """ return True @classmethod @@ -392,8 +393,7 @@ async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]: @property def input_variables(self) -> list[str]: - """ - Input variables for this prompt template. + """Input variables for this prompt template. Returns: List of input variable names. @@ -624,8 +624,7 @@ async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]: @property def input_variables(self) -> list[str]: - """ - Input variables for this prompt template. + """Input variables for this prompt template. Returns: List of input variable names. @@ -742,8 +741,7 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC): @property def lc_attributes(self) -> dict: - """ - Return a list of attribute names that should be included in the + """Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. """ @@ -980,7 +978,6 @@ def __init__( A chat prompt template. Examples: - Instantiation from a list of message templates: .. code-block:: python @@ -1173,7 +1170,6 @@ def from_messages( """Create a chat prompt template from a variety of message formats. Examples: - Instantiation from a list of message templates: .. code-block:: python diff --git a/libs/core/langchain_core/prompts/few_shot.py b/libs/core/langchain_core/prompts/few_shot.py index 40f1d86a64cccf..5794a913325313 100644 --- a/libs/core/langchain_core/prompts/few_shot.py +++ b/libs/core/langchain_core/prompts/few_shot.py @@ -265,7 +265,6 @@ class FewShotChatMessagePromptTemplate( to dynamically select examples based on the input. Examples: - Prompt template with a fixed list of examples (matching the sample conversation above): diff --git a/libs/core/langchain_core/prompts/loading.py b/libs/core/langchain_core/prompts/loading.py index 3d65f8323f1b8f..d6928c72717a16 100644 --- a/libs/core/langchain_core/prompts/loading.py +++ b/libs/core/langchain_core/prompts/loading.py @@ -184,8 +184,7 @@ def _load_prompt_from_file( def _load_chat_prompt(config: dict) -> ChatPromptTemplate: - """Load chat prompt from config""" - + """Load chat prompt from config.""" messages = config.pop("messages") template = messages[0]["prompt"].pop("template") if messages else None config.pop("input_variables") diff --git a/libs/core/langchain_core/prompts/pipeline.py b/libs/core/langchain_core/prompts/pipeline.py index f316ba3d121751..6124acb4058b1f 100644 --- a/libs/core/langchain_core/prompts/pipeline.py +++ b/libs/core/langchain_core/prompts/pipeline.py @@ -23,8 +23,7 @@ def _get_inputs(inputs: dict, input_variables: list[str]) -> dict: ), ) class PipelinePromptTemplate(BasePromptTemplate): - """ - This has been deprecated in favor of chaining individual prompts together in your + """This has been deprecated in favor of chaining individual prompts together in your code. E.g. using a for loop, you could do: .. code-block:: python diff --git a/libs/core/langchain_core/prompts/prompt.py b/libs/core/langchain_core/prompts/prompt.py index 325ee067ecaf9e..37f7eda64acff4 100644 --- a/libs/core/langchain_core/prompts/prompt.py +++ b/libs/core/langchain_core/prompts/prompt.py @@ -284,7 +284,6 @@ def from_template( Returns: The prompt template loaded from the template. """ - input_variables = get_template_variables(template, template_format) _partial_variables = partial_variables or {} diff --git a/libs/core/langchain_core/prompts/string.py b/libs/core/langchain_core/prompts/string.py index f9128147e2b1ae..61fc1d35d4f0b4 100644 --- a/libs/core/langchain_core/prompts/string.py +++ b/libs/core/langchain_core/prompts/string.py @@ -64,8 +64,7 @@ def jinja2_formatter(template: str, /, **kwargs: Any) -> str: def validate_jinja2(template: str, input_variables: list[str]) -> None: - """ - Validate that the input variables are valid for the template. + """Validate that the input variables are valid for the template. Issues a warning if missing or extra variables are found. Args: diff --git a/libs/core/langchain_core/prompts/structured.py b/libs/core/langchain_core/prompts/structured.py index a3a01cd68f3e1d..1f38695760be6f 100644 --- a/libs/core/langchain_core/prompts/structured.py +++ b/libs/core/langchain_core/prompts/structured.py @@ -72,7 +72,6 @@ def from_messages_and_schema( """Create a chat prompt template from a variety of message formats. Examples: - Instantiation from a list of message templates: .. code-block:: python diff --git a/libs/core/langchain_core/retrievers.py b/libs/core/langchain_core/retrievers.py index e1be4588081a2e..0f70c6e492718b 100644 --- a/libs/core/langchain_core/retrievers.py +++ b/libs/core/langchain_core/retrievers.py @@ -199,7 +199,6 @@ async def _aget_relevant_documents( def _get_ls_params(self, **kwargs: Any) -> LangSmithRetrieverParams: """Get standard params for tracing.""" - default_retriever_name = self.get_name() if default_retriever_name.startswith("Retriever"): default_retriever_name = default_retriever_name[9:] @@ -342,6 +341,7 @@ def _get_relevant_documents( Args: query: String to find relevant documents for. run_manager: The callback handler to use. + Returns: List of relevant documents. """ diff --git a/libs/core/langchain_core/runnables/base.py b/libs/core/langchain_core/runnables/base.py index 893f393d8b1747..0f62bdb506d0ab 100644 --- a/libs/core/langchain_core/runnables/base.py +++ b/libs/core/langchain_core/runnables/base.py @@ -483,7 +483,6 @@ def config_schema( Returns: A pydantic model that can be used to validate config. """ - include = include or [] config_specs = self.config_specs configurable = ( @@ -817,8 +816,8 @@ def batch_as_completed( **kwargs: Optional[Any], ) -> Iterator[tuple[int, Union[Output, Exception]]]: """Run invoke in parallel on a list of inputs, - yielding results as they complete.""" - + yielding results as they complete. + """ if not inputs: return @@ -949,7 +948,6 @@ async def abatch_as_completed( Yields: A tuple of the index of the input and the output from the Runnable. """ - if not inputs: return @@ -981,8 +979,7 @@ def stream( config: Optional[RunnableConfig] = None, **kwargs: Optional[Any], ) -> Iterator[Output]: - """ - Default implementation of stream, which calls invoke. + """Default implementation of stream, which calls invoke. Subclasses should override this method if they support streaming output. Args: @@ -1001,8 +998,7 @@ async def astream( config: Optional[RunnableConfig] = None, **kwargs: Optional[Any], ) -> AsyncIterator[Output]: - """ - Default implementation of astream, which calls ainvoke. + """Default implementation of astream, which calls ainvoke. Subclasses should override this method if they support streaming output. Args: @@ -1064,8 +1060,7 @@ async def astream_log( exclude_tags: Optional[Sequence[str]] = None, **kwargs: Any, ) -> Union[AsyncIterator[RunLogPatch], AsyncIterator[RunLog]]: - """ - Stream all output from a Runnable, as reported to the callback system. + """Stream all output from a Runnable, as reported to the callback system. This includes all inner runs of LLMs, Retrievers, Tools, etc. Output is streamed as Log objects, which include a list of @@ -1392,8 +1387,8 @@ def transform( config: Optional[RunnableConfig] = None, **kwargs: Optional[Any], ) -> Iterator[Output]: - """ - Default implementation of transform, which buffers input and then calls stream. + """Default implementation of transform, which buffers input and calls astream. + Subclasses should override this method if they can start producing output while input is still being generated. @@ -1434,8 +1429,7 @@ async def atransform( config: Optional[RunnableConfig] = None, **kwargs: Optional[Any], ) -> AsyncIterator[Output]: - """ - Default implementation of atransform, which buffers input and calls astream. + """Default implementation of atransform, which buffers input and calls astream. Subclasses should override this method if they can start producing output while input is still being generated. @@ -1472,8 +1466,7 @@ async def atransform( yield output def bind(self, **kwargs: Any) -> Runnable[Input, Output]: - """ - Bind arguments to a Runnable, returning a new Runnable. + """Bind arguments to a Runnable, returning a new Runnable. Useful when a Runnable in a chain requires an argument that is not in the output of the previous Runnable or included in the user input. @@ -1520,8 +1513,7 @@ def with_config( # Sadly Unpack is not well-supported by mypy so this will have to be untyped **kwargs: Any, ) -> Runnable[Input, Output]: - """ - Bind config to a Runnable, returning a new Runnable. + """Bind config to a Runnable, returning a new Runnable. Args: config: The config to bind to the Runnable. @@ -1552,8 +1544,7 @@ def with_listeners( Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]] ] = None, ) -> Runnable[Input, Output]: - """ - Bind lifecycle listeners to a Runnable, returning a new Runnable. + """Bind lifecycle listeners to a Runnable, returning a new Runnable. on_start: Called before the Runnable starts running, with the Run object. on_end: Called after the Runnable finishes running, with the Run object. @@ -1620,8 +1611,7 @@ def with_alisteners( on_end: Optional[AsyncListener] = None, on_error: Optional[AsyncListener] = None, ) -> Runnable[Input, Output]: - """ - Bind asynchronous lifecycle listeners to a Runnable, returning a new Runnable. + """Bind async lifecycle listeners to a Runnable, returning a new Runnable. on_start: Asynchronously called before the Runnable starts running. on_end: Asynchronously called after the Runnable finishes running. @@ -1711,8 +1701,7 @@ def with_types( input_type: Optional[type[Input]] = None, output_type: Optional[type[Output]] = None, ) -> Runnable[Input, Output]: - """ - Bind input and output types to a Runnable, returning a new Runnable. + """Bind input and output types to a Runnable, returning a new Runnable. Args: input_type: The input type to bind to the Runnable. Defaults to None. @@ -1799,8 +1788,7 @@ def _lambda(x: int) -> None: ) def map(self) -> Runnable[list[Input], list[Output]]: - """ - Return a new Runnable that maps a list of inputs to a list of outputs, + """Return a new Runnable that maps a list of inputs to a list of outputs, by calling invoke() with each input. Returns: @@ -1906,7 +1894,8 @@ def _call_with_config( **kwargs: Optional[Any], ) -> Output: """Helper method to transform an Input value to an Output value, - with callbacks. Use this method to implement invoke() in subclasses.""" + with callbacks. Use this method to implement invoke() in subclasses. + """ config = ensure_config(config) callback_manager = get_callback_manager_for_config(config) run_manager = callback_manager.on_chain_start( @@ -1955,7 +1944,8 @@ async def _acall_with_config( **kwargs: Optional[Any], ) -> Output: """Helper method to transform an Input value to an Output value, - with callbacks. Use this method to implement ainvoke() in subclasses.""" + with callbacks. Use this method to implement ainvoke() in subclasses. + """ config = ensure_config(config) callback_manager = get_async_callback_manager_for_config(config) run_manager = await callback_manager.on_chain_start( @@ -2004,7 +1994,8 @@ def _batch_with_config( **kwargs: Optional[Any], ) -> list[Output]: """Helper method to transform an Input value to an Output value, - with callbacks. Use this method to implement invoke() in subclasses.""" + with callbacks. Use this method to implement invoke() in subclasses. + """ if not input: return [] @@ -2076,7 +2067,8 @@ async def _abatch_with_config( **kwargs: Optional[Any], ) -> list[Output]: """Helper method to transform an Input value to an Output value, - with callbacks. Use this method to implement invoke() in subclasses.""" + with callbacks. Use this method to implement invoke() in subclasses. + """ if not input: return [] @@ -2149,7 +2141,8 @@ def _transform_stream_with_config( ) -> Iterator[Output]: """Helper method to transform an Iterator of Input values into an Iterator of Output values, with callbacks. - Use this to implement `stream()` or `transform()` in Runnable subclasses.""" + Use this to implement `stream()` or `transform()` in Runnable subclasses. + """ # Mixin that is used by both astream log and astream events implementation from langchain_core.tracers._streaming import _StreamingCallbackHandler @@ -2249,7 +2242,8 @@ async def _atransform_stream_with_config( ) -> AsyncIterator[Output]: """Helper method to transform an Async Iterator of Input values into an Async Iterator of Output values, with callbacks. - Use this to implement `astream()` or `atransform()` in Runnable subclasses.""" + Use this to implement `astream()` or `atransform()` in Runnable subclasses. + """ # Mixin that is used by both astream log and astream events implementation from langchain_core.tracers._streaming import _StreamingCallbackHandler @@ -5601,7 +5595,6 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): - ``with_fallbacks``: Bind a fallback policy to the underlying Runnable. Example: - `bind`: Bind kwargs to pass to the underlying Runnable when running it. .. code-block:: python diff --git a/libs/core/langchain_core/runnables/config.py b/libs/core/langchain_core/runnables/config.py index 7c285137fafe3f..62750d7bdb4214 100644 --- a/libs/core/langchain_core/runnables/config.py +++ b/libs/core/langchain_core/runnables/config.py @@ -116,7 +116,7 @@ class RunnableConfig(TypedDict, total=False): def _set_config_context(config: RunnableConfig) -> None: - """Set the child Runnable config + tracing context + """Set the child Runnable config + tracing context. Args: config (RunnableConfig): The config to set. diff --git a/libs/core/langchain_core/runnables/configurable.py b/libs/core/langchain_core/runnables/configurable.py index 35f976340cab7b..b59d0239fb1f3e 100644 --- a/libs/core/langchain_core/runnables/configurable.py +++ b/libs/core/langchain_core/runnables/configurable.py @@ -404,7 +404,8 @@ def configurable_fields( self, **kwargs: AnyConfigurableField ) -> RunnableSerializable[Input, Output]: """Get a new RunnableConfigurableFields with the specified - configurable fields.""" + configurable fields. + """ return self.default.configurable_fields(**{**self.fields, **kwargs}) def _prepare( diff --git a/libs/core/langchain_core/runnables/graph.py b/libs/core/langchain_core/runnables/graph.py index d0e69e0b481b5c..53399a52fc6c01 100644 --- a/libs/core/langchain_core/runnables/graph.py +++ b/libs/core/langchain_core/runnables/graph.py @@ -137,7 +137,7 @@ class Branch(NamedTuple): class CurveStyle(Enum): - """Enum for different curve styles supported by Mermaid""" + """Enum for different curve styles supported by Mermaid.""" BASIS = "basis" BUMP_X = "bumpX" @@ -169,7 +169,7 @@ class NodeStyles: class MermaidDrawMethod(Enum): - """Enum for different draw methods supported by Mermaid""" + """Enum for different draw methods supported by Mermaid.""" PYPPETEER = "pyppeteer" # Uses Pyppeteer to render the graph API = "api" # Uses Mermaid.INK API to render the graph @@ -306,7 +306,8 @@ def __bool__(self) -> bool: def next_id(self) -> str: """Return a new unique node - identifier that can be used to add a node to the graph.""" + identifier that can be used to add a node to the graph. + """ return uuid4().hex def add_node( @@ -422,7 +423,8 @@ def prefixed(id: str) -> str: def reid(self) -> Graph: """Return a new graph with all nodes re-identified, - using their unique, readable names where possible.""" + using their unique, readable names where possible. + """ node_name_to_ids = defaultdict(list) for node in self.nodes.values(): node_name_to_ids[node.name].append(node.id) @@ -457,18 +459,21 @@ def _get_node_id(node_id: str) -> str: def first_node(self) -> Optional[Node]: """Find the single node that is not a target of any edge. If there is no such node, or there are multiple, return None. - When drawing the graph, this node would be the origin.""" + When drawing the graph, this node would be the origin. + """ return _first_node(self) def last_node(self) -> Optional[Node]: """Find the single node that is not a source of any edge. If there is no such node, or there are multiple, return None. - When drawing the graph, this node would be the destination.""" + When drawing the graph, this node would be the destination. + """ return _last_node(self) def trim_first_node(self) -> None: """Remove the first node if it exists and has a single outgoing edge, - i.e., if removing it would not leave the graph without a "first" node.""" + i.e., if removing it would not leave the graph without a "first" node. + """ first_node = self.first_node() if ( first_node @@ -479,7 +484,8 @@ def trim_first_node(self) -> None: def trim_last_node(self) -> None: """Remove the last node if it exists and has a single incoming edge, - i.e., if removing it would not leave the graph without a "last" node.""" + i.e., if removing it would not leave the graph without a "last" node. + """ last_node = self.last_node() if ( last_node @@ -634,7 +640,8 @@ def _first_node(graph: Graph, exclude: Sequence[str] = ()) -> Optional[Node]: """Find the single node that is not a target of any edge. Exclude nodes/sources with ids in the exclude list. If there is no such node, or there are multiple, return None. - When drawing the graph, this node would be the origin.""" + When drawing the graph, this node would be the origin. + """ targets = {edge.target for edge in graph.edges if edge.source not in exclude} found: list[Node] = [] for node in graph.nodes.values(): @@ -647,7 +654,8 @@ def _last_node(graph: Graph, exclude: Sequence[str] = ()) -> Optional[Node]: """Find the single node that is not a source of any edge. Exclude nodes/targets with ids in the exclude list. If there is no such node, or there are multiple, return None. - When drawing the graph, this node would be the destination.""" + When drawing the graph, this node would be the destination. + """ sources = {edge.source for edge in graph.edges if edge.target not in exclude} found: list[Node] = [] for node in graph.nodes.values(): diff --git a/libs/core/langchain_core/runnables/graph_ascii.py b/libs/core/langchain_core/runnables/graph_ascii.py index 96ea29424ee60f..b1cd5cb63b6aed 100644 --- a/libs/core/langchain_core/runnables/graph_ascii.py +++ b/libs/core/langchain_core/runnables/graph_ascii.py @@ -1,5 +1,6 @@ """Draws DAG in ASCII. -Adapted from https://github.com/iterative/dvc/blob/main/dvc/dagascii.py""" +Adapted from https://github.com/iterative/dvc/blob/main/dvc/dagascii.py. +""" import math import os @@ -239,7 +240,6 @@ def draw_ascii(vertices: Mapping[str, str], edges: Sequence[LangEdge]) -> str: | 1 | +---+ """ - # NOTE: coordinates might me negative, so we need to shift # everything to the positive plane before we actually draw it. xlist = [] diff --git a/libs/core/langchain_core/runnables/graph_png.py b/libs/core/langchain_core/runnables/graph_png.py index 4ce64ca6c1fc0d..98aa50dd8081e2 100644 --- a/libs/core/langchain_core/runnables/graph_png.py +++ b/libs/core/langchain_core/runnables/graph_png.py @@ -132,7 +132,6 @@ def draw(self, graph: Graph, output_path: Optional[str] = None) -> Optional[byte :param graph: The graph to draw :param output_path: The path to save the PNG. If None, PNG bytes are returned. """ - try: import pygraphviz as pgv # type: ignore[import] except ImportError as exc: diff --git a/libs/core/langchain_core/runnables/retry.py b/libs/core/langchain_core/runnables/retry.py index 0469dd961b47ab..f5e729a73c3da1 100644 --- a/libs/core/langchain_core/runnables/retry.py +++ b/libs/core/langchain_core/runnables/retry.py @@ -43,7 +43,6 @@ class RunnableRetry(RunnableBindingBase[Input, Output]): way to use it is through the `.with_retry()` method on all Runnables. Example: - Here's an example that uses a RunnableLambda to raise an exception .. code-block:: python diff --git a/libs/core/langchain_core/runnables/router.py b/libs/core/langchain_core/runnables/router.py index 8d353648cd9285..ba9d22ca39eb51 100644 --- a/libs/core/langchain_core/runnables/router.py +++ b/libs/core/langchain_core/runnables/router.py @@ -44,8 +44,7 @@ class RouterInput(TypedDict): class RouterRunnable(RunnableSerializable[RouterInput, Output]): - """ - Runnable that routes to a set of Runnables based on Input['key']. + """Runnable that routes to a set of Runnables based on Input['key']. Returns the output of the selected Runnable. Parameters: diff --git a/libs/core/langchain_core/runnables/utils.py b/libs/core/langchain_core/runnables/utils.py index 89e3651dcce4cf..a731e521b66217 100644 --- a/libs/core/langchain_core/runnables/utils.py +++ b/libs/core/langchain_core/runnables/utils.py @@ -458,9 +458,7 @@ def indent_lines_after_first(text: str, prefix: str) -> str: class AddableDict(dict[str, Any]): - """ - Dictionary that can be added to another dictionary. - """ + """Dictionary that can be added to another dictionary.""" def __add__(self, other: AddableDict) -> AddableDict: chunk = AddableDict(self) diff --git a/libs/core/langchain_core/tools/structured.py b/libs/core/langchain_core/tools/structured.py index 174e7b2f53704f..ef185b3e1e844b 100644 --- a/libs/core/langchain_core/tools/structured.py +++ b/libs/core/langchain_core/tools/structured.py @@ -162,7 +162,6 @@ def add(a: int, b: int) -> int: tool = StructuredTool.from_function(add) tool.run(1, 2) # 3 """ - if func is not None: source_function = func elif coroutine is not None: diff --git a/libs/core/langchain_core/tracers/base.py b/libs/core/langchain_core/tracers/base.py index ba8c90f2c9a9e2..f3ae965f6025cc 100644 --- a/libs/core/langchain_core/tracers/base.py +++ b/libs/core/langchain_core/tracers/base.py @@ -531,8 +531,7 @@ async def _persist_run(self, run: Run) -> None: """Persist a run.""" async def _start_trace(self, run: Run) -> None: - """ - Start a trace for a run. + """Start a trace for a run. Starting a trace will run concurrently with each _on_[run_type]_start method. No _on_[run_type]_start callback should depend on operations in _start_trace. @@ -541,8 +540,7 @@ async def _start_trace(self, run: Run) -> None: await self._on_run_create(run) async def _end_trace(self, run: Run) -> None: - """ - End a trace for a run. + """End a trace for a run. Ending a trace will run concurrently with each _on_[run_type]_end method. No _on_[run_type]_end callback should depend on operations in _end_trace. diff --git a/libs/core/langchain_core/tracers/core.py b/libs/core/langchain_core/tracers/core.py index 696d35150ff926..d3544df04e3dcf 100644 --- a/libs/core/langchain_core/tracers/core.py +++ b/libs/core/langchain_core/tracers/core.py @@ -40,8 +40,7 @@ class _TracerCore(ABC): - """ - Abstract base class for tracers. + """Abstract base class for tracers. This class provides common methods, and reusable methods for tracers. """ @@ -233,9 +232,7 @@ def _llm_run_with_token_event( parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Run: - """ - Append token event to LLM run and return the run. - """ + """Append token event to LLM run and return the run.""" llm_run = self._get_run(run_id, run_type={"llm", "chat_model"}) event_kwargs: dict[str, Any] = {"token": token} if chunk: diff --git a/libs/core/langchain_core/tracers/evaluation.py b/libs/core/langchain_core/tracers/evaluation.py index d74c5d86baedf3..b7bd4508a14f70 100644 --- a/libs/core/langchain_core/tracers/evaluation.py +++ b/libs/core/langchain_core/tracers/evaluation.py @@ -35,35 +35,33 @@ def wait_for_all_evaluators() -> None: class EvaluatorCallbackHandler(BaseTracer): """Tracer that runs a run evaluator whenever a run is persisted. - Parameters - ---------- - evaluators : Sequence[RunEvaluator] - The run evaluators to apply to all top level runs. - client : LangSmith Client, optional - The LangSmith client instance to use for evaluating the runs. - If not specified, a new instance will be created. - example_id : Union[UUID, str], optional - The example ID to be associated with the runs. - project_name : str, optional - The LangSmith project name to be organize eval chain runs under. - - Attributes - ---------- - example_id : Union[UUID, None] - The example ID associated with the runs. - client : Client - The LangSmith client instance used for evaluating the runs. - evaluators : Sequence[RunEvaluator] - The sequence of run evaluators to be executed. - executor : ThreadPoolExecutor - The thread pool executor used for running the evaluators. - futures : Set[Future] - The set of futures representing the running evaluators. - skip_unfinished : bool - Whether to skip runs that are not finished or raised - an error. - project_name : Optional[str] - The LangSmith project name to be organize eval chain runs under. + Args: + evaluators : Sequence[RunEvaluator] + The run evaluators to apply to all top level runs. + client : LangSmith Client, optional + The LangSmith client instance to use for evaluating the runs. + If not specified, a new instance will be created. + example_id : Union[UUID, str], optional + The example ID to be associated with the runs. + project_name : str, optional + The LangSmith project name to be organize eval chain runs under. + + Attributes: + example_id : Union[UUID, None] + The example ID associated with the runs. + client : Client + The LangSmith client instance used for evaluating the runs. + evaluators : Sequence[RunEvaluator] + The sequence of run evaluators to be executed. + executor : ThreadPoolExecutor + The thread pool executor used for running the evaluators. + futures : Set[Future] + The set of futures representing the running evaluators. + skip_unfinished : bool + Whether to skip runs that are not finished or raised + an error. + project_name : Optional[str] + The LangSmith project name to be organize eval chain runs under. """ name: str = "evaluator_callback_handler" diff --git a/libs/core/langchain_core/tracers/langchain.py b/libs/core/langchain_core/tracers/langchain.py index 0183adb2604026..d125494af9ac62 100644 --- a/libs/core/langchain_core/tracers/langchain.py +++ b/libs/core/langchain_core/tracers/langchain.py @@ -253,9 +253,7 @@ def _llm_run_with_token_event( parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Run: - """ - Append token event to LLM run and return the run. - """ + """Append token event to LLM run and return the run.""" return super()._llm_run_with_token_event( # Drop the chunk; we don't need to save it token, diff --git a/libs/core/langchain_core/tracers/run_collector.py b/libs/core/langchain_core/tracers/run_collector.py index 9001eac38d189a..e7a7dfc7addc99 100644 --- a/libs/core/langchain_core/tracers/run_collector.py +++ b/libs/core/langchain_core/tracers/run_collector.py @@ -24,8 +24,7 @@ class RunCollectorCallbackHandler(BaseTracer): def __init__( self, example_id: Optional[Union[UUID, str]] = None, **kwargs: Any ) -> None: - """ - Initialize the RunCollectorCallbackHandler. + """Initialize the RunCollectorCallbackHandler. Parameters ---------- @@ -41,8 +40,7 @@ def __init__( self.traced_runs: list[Run] = [] def _persist_run(self, run: Run) -> None: - """ - Persist a run by adding it to the traced_runs list. + """Persist a run by adding it to the traced_runs list. Parameters ---------- diff --git a/libs/core/langchain_core/utils/__init__.py b/libs/core/langchain_core/utils/__init__.py index 7822d3b62519ef..1af695b4fb696f 100644 --- a/libs/core/langchain_core/utils/__init__.py +++ b/libs/core/langchain_core/utils/__init__.py @@ -1,5 +1,4 @@ -""" -**Utility functions** for LangChain. +"""**Utility functions** for LangChain. These functions do not depend on any other LangChain module. """ diff --git a/libs/core/langchain_core/utils/aiter.py b/libs/core/langchain_core/utils/aiter.py index 5d4ef032efaaac..b4e26915a39dc0 100644 --- a/libs/core/langchain_core/utils/aiter.py +++ b/libs/core/langchain_core/utils/aiter.py @@ -1,7 +1,6 @@ -""" -Adapted from +"""Adapted from https://github.com/maxfischer2781/asyncstdlib/blob/master/asyncstdlib/itertools.py -MIT License +MIT License. """ from collections import deque @@ -54,7 +53,6 @@ def py_anext( Raises: TypeError: If the iterator is not an async iterator. """ - try: __anext__ = cast( Callable[[AsyncIterator[T]], Awaitable[T]], type(iterator).__anext__ @@ -147,8 +145,7 @@ async def tee_peer( class Tee(Generic[T]): - """ - Create ``n`` separate asynchronous iterators over ``iterable``. + """Create ``n`` separate asynchronous iterators over ``iterable``. This splits a single ``iterable`` into multiple iterators, each providing the same items in the same order. diff --git a/libs/core/langchain_core/utils/function_calling.py b/libs/core/langchain_core/utils/function_calling.py index dd3468d59e5328..cdcd3dc8649647 100644 --- a/libs/core/langchain_core/utils/function_calling.py +++ b/libs/core/langchain_core/utils/function_calling.py @@ -1,4 +1,4 @@ -"""Methods for creating function specs in the style of OpenAI Functions""" +"""Methods for creating function specs in the style of OpenAI Functions.""" from __future__ import annotations @@ -342,6 +342,7 @@ def convert_to_openai_function( strict: Optional[bool] = None, ) -> dict[str, Any]: """Convert a raw function/class to an OpenAI function. + Args: function: A dictionary, Pydantic BaseModel class, TypedDict class, a LangChain diff --git a/libs/core/langchain_core/utils/html.py b/libs/core/langchain_core/utils/html.py index 805cae3cc8ed8d..b3ff3e5b30cd16 100644 --- a/libs/core/langchain_core/utils/html.py +++ b/libs/core/langchain_core/utils/html.py @@ -70,6 +70,7 @@ def extract_sub_links( exclude_prefixes: Exclude any URLs that start with one of these prefixes. continue_on_failure: If True, continue if parsing a specific link raises an exception. Otherwise, raise the exception. + Returns: List[str]: sub links. """ diff --git a/libs/core/langchain_core/utils/iter.py b/libs/core/langchain_core/utils/iter.py index f76ada9ddd02ca..7868119caedd2a 100644 --- a/libs/core/langchain_core/utils/iter.py +++ b/libs/core/langchain_core/utils/iter.py @@ -83,8 +83,7 @@ def tee_peer( class Tee(Generic[T]): - """ - Create ``n`` separate asynchronous iterators over ``iterable`` + """Create ``n`` separate asynchronous iterators over ``iterable``. This splits a single ``iterable`` into multiple iterators, each providing the same items in the same order. diff --git a/libs/core/langchain_core/utils/json.py b/libs/core/langchain_core/utils/json.py index 88e91899b91226..8aedfaf339b711 100644 --- a/libs/core/langchain_core/utils/json.py +++ b/libs/core/langchain_core/utils/json.py @@ -18,11 +18,10 @@ def _replace_new_line(match: re.Match[str]) -> str: def _custom_parser(multiline_string: str) -> str: - """ - The LLM response for `action_input` may be a multiline + """The LLM response for `action_input` may be a multiline string containing unescaped newlines, tabs or quotes. This function replaces those characters with their escaped counterparts. - (newlines in JSON must be double-escaped: `\\n`) + (newlines in JSON must be double-escaped: `\\n`). """ if isinstance(multiline_string, (bytes, bytearray)): multiline_string = multiline_string.decode() @@ -161,8 +160,7 @@ def _parse_json( def parse_and_check_json_markdown(text: str, expected_keys: list[str]) -> dict: - """ - Parse a JSON string from a Markdown string and check that it + """Parse a JSON string from a Markdown string and check that it contains the expected keys. Args: diff --git a/libs/core/langchain_core/utils/json_schema.py b/libs/core/langchain_core/utils/json_schema.py index 8ac177e342cc2e..38fab589909b9d 100644 --- a/libs/core/langchain_core/utils/json_schema.py +++ b/libs/core/langchain_core/utils/json_schema.py @@ -105,7 +105,6 @@ def dereference_refs( Returns: The dereferenced schema object. """ - full_schema = full_schema or schema_obj skip_keys = ( skip_keys diff --git a/libs/core/langchain_core/utils/mustache.py b/libs/core/langchain_core/utils/mustache.py index 89d5d9fbbf1445..d60764ad276cac 100644 --- a/libs/core/langchain_core/utils/mustache.py +++ b/libs/core/langchain_core/utils/mustache.py @@ -1,6 +1,5 @@ -""" -Adapted from https://github.com/noahmorrison/chevron -MIT License +"""Adapted from https://github.com/noahmorrison/chevron +MIT License. """ from __future__ import annotations @@ -48,7 +47,6 @@ def grab_literal(template: str, l_del: str) -> tuple[str, str]: Returns: Tuple[str, str]: The literal and the template. """ - global _CURRENT_LINE try: @@ -74,7 +72,6 @@ def l_sa_check(template: str, literal: str, is_standalone: bool) -> bool: Returns: bool: Whether the tag could be a standalone. """ - # If there is a newline, or the previous tag was a standalone if literal.find("\n") != -1 or is_standalone: padding = literal.split("\n")[-1] @@ -98,7 +95,6 @@ def r_sa_check(template: str, tag_type: str, is_standalone: bool) -> bool: Returns: bool: Whether the tag could be a standalone. """ - # Check right side if we might be a standalone if is_standalone and tag_type not in ["variable", "no escape"]: on_newline = template.split("\n", 1) @@ -199,36 +195,25 @@ def tokenize( using file-like objects. It also accepts a string containing the template. - - Arguments: - - template -- a file-like object, or a string of a mustache template - - def_ldel -- The default left delimiter - ("{{" by default, as in spec compliant mustache) - - def_rdel -- The default right delimiter - ("}}" by default, as in spec compliant mustache) - + Args: + template: a file-like object, or a string of a mustache template + def_ldel: The default left delimiter + ("{{" by default, as in spec compliant mustache) + def_rdel: The default right delimiter + ("}}" by default, as in spec compliant mustache) Returns: - - A generator of mustache tags in the form of a tuple - - -- (tag_type, tag_key) - - Where tag_type is one of: - * literal - * section - * inverted section - * end - * partial - * no escape - - And tag_key is either the key or in the case of a literal tag, - the literal itself. + A generator of mustache tags in the form of a tuple (tag_type, tag_key) + Where tag_type is one of: + * literal + * section + * inverted section + * end + * partial + * no escape + And tag_key is either the key or in the case of a literal tag, + the literal itself. """ - global _CURRENT_LINE, _LAST_TAG_LINE _CURRENT_LINE = 1 _LAST_TAG_LINE = None @@ -329,8 +314,7 @@ def tokenize( def _html_escape(string: str) -> str: - """HTML escape all of these " & < >""" - + """HTML escape all of these " & < >.""" html_codes = { '"': """, "<": "<", @@ -352,8 +336,7 @@ def _get_key( def_ldel: str, def_rdel: str, ) -> Any: - """Get a key from the current scope""" - + """Get a key from the current scope.""" # If the key is a dot if key == ".": # Then just return the current scope @@ -410,7 +393,7 @@ def _get_key( def _get_partial(name: str, partials_dict: Mapping[str, str]) -> str: - """Load a partial""" + """Load a partial.""" try: # Maybe the partial is in the dictionary return partials_dict[name] @@ -441,45 +424,31 @@ def render( Renders a mustache template with a data scope and inline partial capability. - Arguments: - - template -- A file-like object or a string containing the template. - - data -- A python dictionary with your data scope. - - partials_path -- The path to where your partials are stored. - If set to None, then partials won't be loaded from the file system - (defaults to '.'). - - partials_ext -- The extension that you want the parser to look for - (defaults to 'mustache'). - - partials_dict -- A python dictionary which will be search for partials - before the filesystem is. {'include': 'foo'} is the same - as a file called include.mustache - (defaults to {}). - - padding -- This is for padding partials, and shouldn't be used - (but can be if you really want to). - - def_ldel -- The default left delimiter - ("{{" by default, as in spec compliant mustache). - - def_rdel -- The default right delimiter - ("}}" by default, as in spec compliant mustache). - - scopes -- The list of scopes that get_key will look through. - - warn -- Log a warning when a template substitution isn't found in the data - - keep -- Keep unreplaced tags when a substitution isn't found in the data. - + Args: + template: A file-like object or a string containing the template. + data: A python dictionary with your data scope. + partials_path: The path to where your partials are stored. + If set to None, then partials won't be loaded from the file system + (defaults to '.'). + partials_ext: The extension that you want the parser to look for + (defaults to 'mustache'). + partials_dict: A python dictionary which will be search for partials + before the filesystem is. {'include': 'foo'} is the same + as a file called include.mustache + (defaults to {}). + padding: This is for padding partials, and shouldn't be used + (but can be if you really want to). + def_ldel: The default left delimiter + ("{{" by default, as in spec compliant mustache). + def_rdel: The default right delimiter + ("}}" by default, as in spec compliant mustache). + scopes: The list of scopes that get_key will look through. + warn: Log a warning when a template substitution isn't found in the data + keep: Keep unreplaced tags when a substitution isn't found in the data. Returns: - - A string containing the rendered template. + A string containing the rendered template. """ - # If the template is a sequence but not derived from a string if isinstance(template, Sequence) and not isinstance(template, str): # Then we don't need to tokenize it diff --git a/libs/core/langchain_core/utils/pydantic.py b/libs/core/langchain_core/utils/pydantic.py index 65f12232f9fc6b..65fa400468b301 100644 --- a/libs/core/langchain_core/utils/pydantic.py +++ b/libs/core/langchain_core/utils/pydantic.py @@ -172,7 +172,6 @@ def pre_init(func: Callable) -> Any: Returns: Any: The decorated function. """ - with warnings.catch_warnings(): warnings.filterwarnings(action="ignore", category=PydanticDeprecationWarning) diff --git a/libs/core/langchain_core/utils/utils.py b/libs/core/langchain_core/utils/utils.py index c66ed52228c427..6cfbc9ceaf8bb6 100644 --- a/libs/core/langchain_core/utils/utils.py +++ b/libs/core/langchain_core/utils/utils.py @@ -20,7 +20,7 @@ def xor_args(*arg_groups: tuple[str, ...]) -> Callable: - """Validate specified keyword args are mutually exclusive." + """Validate specified keyword args are mutually exclusive.". Args: *arg_groups (Tuple[str, ...]): Groups of mutually exclusive keyword args. diff --git a/libs/core/langchain_core/vectorstores/base.py b/libs/core/langchain_core/vectorstores/base.py index e4e861e76b913d..b154a14b98191e 100644 --- a/libs/core/langchain_core/vectorstores/base.py +++ b/libs/core/langchain_core/vectorstores/base.py @@ -132,7 +132,6 @@ def delete(self, ids: Optional[list[str]] = None, **kwargs: Any) -> Optional[boo Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ - msg = "delete method must be implemented by subclass." raise NotImplementedError(msg) @@ -423,7 +422,6 @@ def _euclidean_relevance_score_fn(distance: float) -> float: @staticmethod def _cosine_relevance_score_fn(distance: float) -> float: """Normalize the distance to a score on a scale [0, 1].""" - return 1.0 - distance @staticmethod @@ -435,8 +433,7 @@ def _max_inner_product_relevance_score_fn(distance: float) -> float: return -1.0 * distance def _select_relevance_score_fn(self) -> Callable[[float], float]: - """ - The 'correct' relevance function + """The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) @@ -473,7 +470,6 @@ async def asimilarity_search_with_score( Returns: List of Tuples of (doc, similarity_score). """ - # This is a temporary workaround to make the similarity search # asynchronous. The proper solution is to make the similarity search # asynchronous in the vector store implementations. @@ -487,8 +483,7 @@ def _similarity_search_with_relevance_scores( k: int = 4, **kwargs: Any, ) -> list[tuple[Document, float]]: - """ - Default similarity search with relevance scores. Modify if necessary + """Default similarity search with relevance scores. Modify if necessary in subclass. Return docs and relevance scores in the range [0, 1]. @@ -514,8 +509,7 @@ async def _asimilarity_search_with_relevance_scores( k: int = 4, **kwargs: Any, ) -> list[tuple[Document, float]]: - """ - Default similarity search with relevance scores. Modify if necessary + """Default similarity search with relevance scores. Modify if necessary in subclass. Return docs and relevance scores in the range [0, 1]. @@ -644,7 +638,6 @@ async def asimilarity_search( Returns: List of Documents most similar to the query. """ - # This is a temporary workaround to make the similarity search # asynchronous. The proper solution is to make the similarity search # asynchronous in the vector store implementations. @@ -678,7 +671,6 @@ async def asimilarity_search_by_vector( Returns: List of Documents most similar to the query vector. """ - # This is a temporary workaround to make the similarity search # asynchronous. The proper solution is to make the similarity search # asynchronous in the vector store implementations. @@ -741,7 +733,6 @@ async def amax_marginal_relevance_search( Returns: List of Documents selected by maximal marginal relevance. """ - # This is a temporary workaround to make the similarity search # asynchronous. The proper solution is to make the similarity search # asynchronous in the vector store implementations. @@ -1056,7 +1047,6 @@ def validate_search_type(cls, values: dict) -> Any: def _get_ls_params(self, **kwargs: Any) -> LangSmithRetrieverParams: """Get standard params for tracing.""" - _kwargs = self.search_kwargs | kwargs ls_params = super()._get_ls_params(**_kwargs) diff --git a/libs/core/tests/unit_tests/_api/test_beta_decorator.py b/libs/core/tests/unit_tests/_api/test_beta_decorator.py index 55f5cc8025c6cc..46a03fef60a37b 100644 --- a/libs/core/tests/unit_tests/_api/test_beta_decorator.py +++ b/libs/core/tests/unit_tests/_api/test_beta_decorator.py @@ -55,46 +55,46 @@ def test_warn_beta(kwargs: dict[str, Any], expected_message: str) -> None: @beta() def beta_function() -> str: - """original doc""" + """Original doc.""" return "This is a beta function." @beta() async def beta_async_function() -> str: - """original doc""" + """Original doc.""" return "This is a beta async function." class ClassWithBetaMethods: def __init__(self) -> None: - """original doc""" + """Original doc.""" @beta() def beta_method(self) -> str: - """original doc""" + """Original doc.""" return "This is a beta method." @beta() async def beta_async_method(self) -> str: - """original doc""" + """Original doc.""" return "This is a beta async method." @classmethod @beta() def beta_classmethod(cls) -> str: - """original doc""" + """Original doc.""" return "This is a beta classmethod." @staticmethod @beta() def beta_staticmethod() -> str: - """original doc""" + """Original doc.""" return "This is a beta staticmethod." @property @beta() def beta_property(self) -> str: - """original doc""" + """Original doc.""" return "This is a beta property." @@ -240,11 +240,11 @@ def test_whole_class_beta() -> None: @beta() class BetaClass: def __init__(self) -> None: - """original doc""" + """Original doc.""" @beta() def beta_method(self) -> str: - """original doc""" + """Original doc.""" return "This is a beta method." with warnings.catch_warnings(record=True) as warning_list: @@ -281,14 +281,14 @@ def test_whole_class_inherited_beta() -> None: class BetaClass: @beta() def beta_method(self) -> str: - """original doc""" + """Original doc.""" return "This is a beta method." @beta() class InheritedBetaClass(BetaClass): @beta() def beta_method(self) -> str: - """original doc""" + """Original doc.""" return "This is a beta method 2." with warnings.catch_warnings(record=True) as warning_list: @@ -344,7 +344,7 @@ def beta_method(self) -> str: class MyModel(BaseModel): @beta() def beta_method(self) -> str: - """original doc""" + """Original doc.""" return "This is a beta method." diff --git a/libs/core/tests/unit_tests/_api/test_deprecation.py b/libs/core/tests/unit_tests/_api/test_deprecation.py index a6a35430de54bd..fef70672d7cbb0 100644 --- a/libs/core/tests/unit_tests/_api/test_deprecation.py +++ b/libs/core/tests/unit_tests/_api/test_deprecation.py @@ -75,46 +75,46 @@ def test_undefined_deprecation_schedule() -> None: @deprecated(since="2.0.0", removal="3.0.0", pending=False) def deprecated_function() -> str: - """original doc""" + """Original doc.""" return "This is a deprecated function." @deprecated(since="2.0.0", removal="3.0.0", pending=False) async def deprecated_async_function() -> str: - """original doc""" + """Original doc.""" return "This is a deprecated async function." class ClassWithDeprecatedMethods: def __init__(self) -> None: - """original doc""" + """Original doc.""" @deprecated(since="2.0.0", removal="3.0.0") def deprecated_method(self) -> str: - """original doc""" + """Original doc.""" return "This is a deprecated method." @deprecated(since="2.0.0", removal="3.0.0") async def deprecated_async_method(self) -> str: - """original doc""" + """Original doc.""" return "This is a deprecated async method." @classmethod @deprecated(since="2.0.0", removal="3.0.0") def deprecated_classmethod(cls) -> str: - """original doc""" + """Original doc.""" return "This is a deprecated classmethod." @staticmethod @deprecated(since="2.0.0", removal="3.0.0") def deprecated_staticmethod() -> str: - """original doc""" + """Original doc.""" return "This is a deprecated staticmethod." @property @deprecated(since="2.0.0", removal="3.0.0") def deprecated_property(self) -> str: - """original doc""" + """Original doc.""" return "This is a deprecated property." @@ -264,11 +264,11 @@ def test_whole_class_deprecation() -> None: @deprecated(since="2.0.0", removal="3.0.0") class DeprecatedClass: def __init__(self) -> None: - """original doc""" + """Original doc.""" @deprecated(since="2.0.0", removal="3.0.0") def deprecated_method(self) -> str: - """original doc""" + """Original doc.""" return "This is a deprecated method." with warnings.catch_warnings(record=True) as warning_list: @@ -306,11 +306,11 @@ def test_whole_class_inherited_deprecation() -> None: @deprecated(since="2.0.0", removal="3.0.0") class DeprecatedClass: def __init__(self) -> None: - """original doc""" + """Original doc.""" @deprecated(since="2.0.0", removal="3.0.0") def deprecated_method(self) -> str: - """original doc""" + """Original doc.""" return "This is a deprecated method." @deprecated(since="2.2.0", removal="3.2.0") @@ -318,11 +318,11 @@ class InheritedDeprecatedClass(DeprecatedClass): """Inherited deprecated class.""" def __init__(self) -> None: - """original doc""" + """Original doc.""" @deprecated(since="2.2.0", removal="3.2.0") def deprecated_method(self) -> str: - """original doc""" + """Original doc.""" return "This is a deprecated method." with warnings.catch_warnings(record=True) as warning_list: @@ -379,7 +379,7 @@ def deprecated_method(self) -> str: class MyModel(BaseModel): @deprecated(since="2.0.0", removal="3.0.0") def deprecated_method(self) -> str: - """original doc""" + """Original doc.""" return "This is a deprecated method." @@ -408,7 +408,7 @@ def test_raise_error_for_bad_decorator() -> None: @deprecated(since="2.0.0", alternative="NewClass", alternative_import="hello") def deprecated_function() -> str: - """original doc""" + """Original doc.""" return "This is a deprecated function." @@ -417,7 +417,7 @@ def test_rename_parameter() -> None: @rename_parameter(since="2.0.0", removal="3.0.0", old="old_name", new="new_name") def foo(new_name: str) -> str: - """original doc""" + """Original doc.""" return new_name with warnings.catch_warnings(record=True) as warning_list: @@ -427,7 +427,7 @@ def foo(new_name: str) -> str: assert foo(new_name="hello") == "hello" assert foo("hello") == "hello" - assert foo.__doc__ == "original doc" + assert foo.__doc__ == "Original doc." with pytest.raises(TypeError): foo(meow="hello") # type: ignore[call-arg] with pytest.raises(TypeError): @@ -442,7 +442,7 @@ async def test_rename_parameter_for_async_func() -> None: @rename_parameter(since="2.0.0", removal="3.0.0", old="old_name", new="new_name") async def foo(new_name: str) -> str: - """original doc""" + """Original doc.""" return new_name with warnings.catch_warnings(record=True) as warning_list: @@ -451,7 +451,7 @@ async def foo(new_name: str) -> str: assert len(warning_list) == 1 assert await foo(new_name="hello") == "hello" assert await foo("hello") == "hello" - assert foo.__doc__ == "original doc" + assert foo.__doc__ == "Original doc." with pytest.raises(TypeError): await foo(meow="hello") # type: ignore[call-arg] with pytest.raises(TypeError): diff --git a/libs/core/tests/unit_tests/callbacks/test_dispatch_custom_event.py b/libs/core/tests/unit_tests/callbacks/test_dispatch_custom_event.py index cd13d1d46b26b4..d1d4f27f4deaae 100644 --- a/libs/core/tests/unit_tests/callbacks/test_dispatch_custom_event.py +++ b/libs/core/tests/unit_tests/callbacks/test_dispatch_custom_event.py @@ -91,7 +91,6 @@ async def foo(x: int, config: RunnableConfig) -> int: async def test_async_callback_manager() -> None: """Test async callback manager.""" - callback = AsyncCustomCallbackHandler() run_id = uuid.UUID(int=7) diff --git a/libs/core/tests/unit_tests/embeddings/test_deterministic_embedding.py b/libs/core/tests/unit_tests/embeddings/test_deterministic_embedding.py index 5ad33b7e307cd0..bb68065bb49236 100644 --- a/libs/core/tests/unit_tests/embeddings/test_deterministic_embedding.py +++ b/libs/core/tests/unit_tests/embeddings/test_deterministic_embedding.py @@ -2,8 +2,7 @@ def test_deterministic_fake_embeddings() -> None: - """ - Test that the deterministic fake embeddings return the same + """Test that the deterministic fake embeddings return the same embedding vector for the same text. """ fake = DeterministicFakeEmbedding(size=10) diff --git a/libs/core/tests/unit_tests/indexing/test_in_memory_indexer.py b/libs/core/tests/unit_tests/indexing/test_in_memory_indexer.py index 58ee2514644ddb..65c504c73516ed 100644 --- a/libs/core/tests/unit_tests/indexing/test_in_memory_indexer.py +++ b/libs/core/tests/unit_tests/indexing/test_in_memory_indexer.py @@ -1,4 +1,4 @@ -"""Test in memory indexer""" +"""Test in memory indexer.""" from collections.abc import AsyncGenerator, Generator diff --git a/libs/core/tests/unit_tests/indexing/test_indexing.py b/libs/core/tests/unit_tests/indexing/test_indexing.py index fbe59013e8355d..52cf3265e29915 100644 --- a/libs/core/tests/unit_tests/indexing/test_indexing.py +++ b/libs/core/tests/unit_tests/indexing/test_indexing.py @@ -1167,7 +1167,7 @@ def test_incremental_delete_with_same_source( def test_incremental_indexing_with_batch_size( record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore ) -> None: - """Test indexing with incremental indexing""" + """Test indexing with incremental indexing.""" loader = ToyLoader( documents=[ Document( @@ -2031,7 +2031,6 @@ def test_index_with_upsert_kwargs_for_document_indexer( mocker: MockerFixture, ) -> None: """Test that kwargs are passed to the upsert method of the document indexer.""" - document_index = InMemoryDocumentIndex() upsert_spy = mocker.spy(document_index.__class__, "upsert") docs = [ @@ -2070,7 +2069,6 @@ async def test_aindex_with_upsert_kwargs_for_document_indexer( mocker: MockerFixture, ) -> None: """Test that kwargs are passed to the upsert method of the document indexer.""" - document_index = InMemoryDocumentIndex() upsert_spy = mocker.spy(document_index.__class__, "aupsert") docs = [ diff --git a/libs/core/tests/unit_tests/language_models/chat_models/test_base.py b/libs/core/tests/unit_tests/language_models/chat_models/test_base.py index 2cd08a27d0383f..bee3a0783af8ae 100644 --- a/libs/core/tests/unit_tests/language_models/chat_models/test_base.py +++ b/libs/core/tests/unit_tests/language_models/chat_models/test_base.py @@ -136,7 +136,7 @@ def _generate( run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: - """Top Level call""" + """Top Level call.""" message = AIMessage(content="hello") generation = ChatGeneration(message=message) return ChatResult(generations=[generation]) @@ -164,7 +164,7 @@ def _generate( run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: - """Top Level call""" + """Top Level call.""" raise NotImplementedError def _stream( @@ -209,7 +209,7 @@ def _generate( run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: - """Top Level call""" + """Top Level call.""" raise NotImplementedError async def _astream( # type: ignore @@ -243,7 +243,6 @@ def __init__(self) -> None: def _persist_run(self, run: Run) -> None: """Persist a run.""" - self.traced_run_ids.append(run.id) diff --git a/libs/core/tests/unit_tests/language_models/chat_models/test_rate_limiting.py b/libs/core/tests/unit_tests/language_models/chat_models/test_rate_limiting.py index 3547cc8af6b592..bd2d960e10e404 100644 --- a/libs/core/tests/unit_tests/language_models/chat_models/test_rate_limiting.py +++ b/libs/core/tests/unit_tests/language_models/chat_models/test_rate_limiting.py @@ -8,7 +8,6 @@ def test_rate_limit_invoke() -> None: """Add rate limiter.""" - model = GenericFakeChatModel( messages=iter(["hello", "world"]), rate_limiter=InMemoryRateLimiter( @@ -35,7 +34,6 @@ def test_rate_limit_invoke() -> None: async def test_rate_limit_ainvoke() -> None: """Add rate limiter.""" - model = GenericFakeChatModel( messages=iter(["hello", "world", "!"]), rate_limiter=InMemoryRateLimiter( diff --git a/libs/core/tests/unit_tests/language_models/llms/test_base.py b/libs/core/tests/unit_tests/language_models/llms/test_base.py index f8ff2fdc4f2a66..0f28762bb21cdf 100644 --- a/libs/core/tests/unit_tests/language_models/llms/test_base.py +++ b/libs/core/tests/unit_tests/language_models/llms/test_base.py @@ -160,7 +160,7 @@ def _generate( run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: - """Top Level call""" + """Top Level call.""" raise NotImplementedError def _stream( @@ -197,7 +197,7 @@ def _generate( run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: - """Top Level call""" + """Top Level call.""" raise NotImplementedError async def _astream( diff --git a/libs/core/tests/unit_tests/output_parsers/test_list_parser.py b/libs/core/tests/unit_tests/output_parsers/test_list_parser.py index 11bd11b6a0b92e..e7538c5bc6557f 100644 --- a/libs/core/tests/unit_tests/output_parsers/test_list_parser.py +++ b/libs/core/tests/unit_tests/output_parsers/test_list_parser.py @@ -29,7 +29,8 @@ def test_single_item() -> None: def test_multiple_items_with_spaces() -> None: """Test that a string with multiple comma-separated items - with spaces is parsed to a list.""" + with spaces is parsed to a list. + """ parser = CommaSeparatedListOutputParser() text = "foo, bar, baz" expected = ["foo", "bar", "baz"] @@ -66,7 +67,8 @@ def test_multiple_items() -> None: def test_multiple_items_with_comma() -> None: """Test that a string with multiple comma-separated items with 1 item containing a - comma is parsed to a list.""" + comma is parsed to a list. + """ parser = CommaSeparatedListOutputParser() text = '"foo, foo2",bar,baz' expected = ["foo, foo2", "bar", "baz"] diff --git a/libs/core/tests/unit_tests/output_parsers/test_openai_functions.py b/libs/core/tests/unit_tests/output_parsers/test_openai_functions.py index c0959620e420cc..ed78ea2be318d4 100644 --- a/libs/core/tests/unit_tests/output_parsers/test_openai_functions.py +++ b/libs/core/tests/unit_tests/output_parsers/test_openai_functions.py @@ -166,7 +166,6 @@ class Model(BaseModel): def test_pydantic_output_functions_parser_multiple_schemas() -> None: """Test that the parser works if providing multiple pydantic schemas.""" - message = AIMessage( content="This is a test message", additional_kwargs={ diff --git a/libs/core/tests/unit_tests/output_parsers/test_openai_tools.py b/libs/core/tests/unit_tests/output_parsers/test_openai_tools.py index 63c2d8ea1eee80..d4940bab2bbea2 100644 --- a/libs/core/tests/unit_tests/output_parsers/test_openai_tools.py +++ b/libs/core/tests/unit_tests/output_parsers/test_openai_tools.py @@ -482,7 +482,7 @@ class Person(BaseModel): class NameCollector(BaseModel): - """record names of all people mentioned""" + """record names of all people mentioned.""" names: list[str] = Field(..., description="all names mentioned") person: Person = Field(..., description="info about the main subject") diff --git a/libs/core/tests/unit_tests/output_parsers/test_pydantic_parser.py b/libs/core/tests/unit_tests/output_parsers/test_pydantic_parser.py index b8bdd0ce252d39..4d152e34fd9ca4 100644 --- a/libs/core/tests/unit_tests/output_parsers/test_pydantic_parser.py +++ b/libs/core/tests/unit_tests/output_parsers/test_pydantic_parser.py @@ -1,4 +1,4 @@ -"""Test PydanticOutputParser""" +"""Test PydanticOutputParser.""" from enum import Enum from typing import Literal, Optional @@ -141,7 +141,6 @@ class TestModel(BaseModel): def test_pydantic_output_parser() -> None: """Test PydanticOutputParser.""" - pydantic_parser: PydanticOutputParser = PydanticOutputParser( pydantic_object=TestModel ) @@ -154,7 +153,6 @@ def test_pydantic_output_parser() -> None: def test_pydantic_output_parser_fail() -> None: """Test PydanticOutputParser where completion result fails schema validation.""" - pydantic_parser: PydanticOutputParser = PydanticOutputParser( pydantic_object=TestModel ) diff --git a/libs/core/tests/unit_tests/output_parsers/test_xml_parser.py b/libs/core/tests/unit_tests/output_parsers/test_xml_parser.py index 60826c439e500a..7fdcf194f97477 100644 --- a/libs/core/tests/unit_tests/output_parsers/test_xml_parser.py +++ b/libs/core/tests/unit_tests/output_parsers/test_xml_parser.py @@ -1,4 +1,4 @@ -"""Test XMLOutputParser""" +"""Test XMLOutputParser.""" import importlib from collections.abc import AsyncIterator, Iterable @@ -77,7 +77,7 @@ async def _as_iter(iterable: Iterable[str]) -> AsyncIterator[str]: async def test_root_only_xml_output_parser() -> None: - """Test XMLOutputParser when xml only contains the root level tag""" + """Test XMLOutputParser when xml only contains the root level tag.""" xml_parser = XMLOutputParser(parser="xml") assert xml_parser.parse(ROOT_LEVEL_ONLY) == {"body": "Text of the body."} assert await xml_parser.aparse(ROOT_LEVEL_ONLY) == {"body": "Text of the body."} @@ -125,7 +125,6 @@ async def test_xml_output_parser_defused(content: str) -> None: @pytest.mark.parametrize("result", ["foo>", " None: """Test XMLOutputParser where complete output is not in XML format.""" - xml_parser = XMLOutputParser(parser="xml") with pytest.raises(OutputParserException) as e: diff --git a/libs/core/tests/unit_tests/prompts/test_chat.py b/libs/core/tests/unit_tests/prompts/test_chat.py index 6249aa6f47893d..10675629ccc4be 100644 --- a/libs/core/tests/unit_tests/prompts/test_chat.py +++ b/libs/core/tests/unit_tests/prompts/test_chat.py @@ -110,7 +110,6 @@ def test_create_chat_prompt_template_from_template_partial() -> None: def test_create_system_message_prompt_template_from_template_partial() -> None: """Create a system message prompt template with partials.""" - graph_creator_content = """ Your instructions are: {instructions} diff --git a/libs/core/tests/unit_tests/prompts/test_prompt.py b/libs/core/tests/unit_tests/prompts/test_prompt.py index d56654d874d5bb..cef1e5595d2542 100644 --- a/libs/core/tests/unit_tests/prompts/test_prompt.py +++ b/libs/core/tests/unit_tests/prompts/test_prompt.py @@ -348,7 +348,8 @@ def test_prompt_from_file() -> None: def test_prompt_from_file_with_partial_variables() -> None: """Test prompt can be successfully constructed from a file - with partial variables.""" + with partial variables. + """ # given template = "This is a {foo} test {bar}." partial_variables = {"bar": "baz"} diff --git a/libs/core/tests/unit_tests/pydantic_utils.py b/libs/core/tests/unit_tests/pydantic_utils.py index e7e03750fd2df8..d04ce615d2f9d2 100644 --- a/libs/core/tests/unit_tests/pydantic_utils.py +++ b/libs/core/tests/unit_tests/pydantic_utils.py @@ -75,7 +75,6 @@ def _remove_enum(obj: Any) -> None: def _schema(obj: Any) -> dict: """Return the schema of the object.""" - if not is_basemodel_subclass(obj): msg = f"Object must be a Pydantic BaseModel subclass. Got {type(obj)}" raise TypeError(msg) diff --git a/libs/core/tests/unit_tests/runnables/test_configurable.py b/libs/core/tests/unit_tests/runnables/test_configurable.py index 9467d6dd0397f2..8b2557d1a97c63 100644 --- a/libs/core/tests/unit_tests/runnables/test_configurable.py +++ b/libs/core/tests/unit_tests/runnables/test_configurable.py @@ -67,7 +67,7 @@ def my_other_custom_function_w_config(self, config: RunnableConfig) -> str: def test_doubly_set_configurable() -> None: - """Test that setting a configurable field with a default value works""" + """Test that setting a configurable field with a default value works.""" runnable = MyRunnable(my_property="a") # type: ignore configurable_runnable = runnable.configurable_fields( my_property=ConfigurableField( diff --git a/libs/core/tests/unit_tests/runnables/test_fallbacks.py b/libs/core/tests/unit_tests/runnables/test_fallbacks.py index 731b3ddaa62aac..06a88a96ef5446 100644 --- a/libs/core/tests/unit_tests/runnables/test_fallbacks.py +++ b/libs/core/tests/unit_tests/runnables/test_fallbacks.py @@ -314,7 +314,7 @@ def _generate( run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: - """Top Level call""" + """Top Level call.""" return ChatResult(generations=[]) def bind_tools( @@ -344,7 +344,7 @@ def _generate( run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: - """Top Level call""" + """Top Level call.""" return ChatResult(generations=[]) def bind_tools( diff --git a/libs/core/tests/unit_tests/runnables/test_graph.py b/libs/core/tests/unit_tests/runnables/test_graph.py index e98a0a18a9c5cd..58e5749a0d5570 100644 --- a/libs/core/tests/unit_tests/runnables/test_graph.py +++ b/libs/core/tests/unit_tests/runnables/test_graph.py @@ -426,7 +426,7 @@ def invoke( def test_graph_mermaid_escape_node_label() -> None: - """Test that node labels are correctly preprocessed for draw_mermaid""" + """Test that node labels are correctly preprocessed for draw_mermaid.""" assert _escape_node_label("foo") == "foo" assert _escape_node_label("foo-bar") == "foo-bar" assert _escape_node_label("foo_1") == "foo_1" diff --git a/libs/core/tests/unit_tests/runnables/test_history.py b/libs/core/tests/unit_tests/runnables/test_history.py index 710f66a18856e5..06c63203f89748 100644 --- a/libs/core/tests/unit_tests/runnables/test_history.py +++ b/libs/core/tests/unit_tests/runnables/test_history.py @@ -257,7 +257,7 @@ def _generate( run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: - """Top Level call""" + """Top Level call.""" return ChatResult( generations=[ChatGeneration(message=AIMessage(content=str(len(messages))))] ) diff --git a/libs/core/tests/unit_tests/runnables/test_runnable.py b/libs/core/tests/unit_tests/runnables/test_runnable.py index b06cb381e80e68..e2752401c5d8ce 100644 --- a/libs/core/tests/unit_tests/runnables/test_runnable.py +++ b/libs/core/tests/unit_tests/runnables/test_runnable.py @@ -96,7 +96,8 @@ class FakeTracer(BaseTracer): """Fake tracer that records LangChain execution. - It replaces run ids with deterministic UUIDs for snapshotting.""" + It replaces run ids with deterministic UUIDs for snapshotting. + """ def __init__(self) -> None: """Initialize the tracer.""" @@ -158,7 +159,6 @@ def _copy_run(self, run: Run) -> Run: def _persist_run(self, run: Run) -> None: """Persist a run.""" - self.runs.append(self._copy_run(run)) def flattened_runs(self) -> list[Run]: @@ -657,7 +657,7 @@ async def aget_values_typed(input: InputType) -> OutputType: def test_with_types_with_type_generics() -> None: - """Verify that with_types works if we use things like List[int]""" + """Verify that with_types works if we use things like List[int].""" def foo(x: int) -> None: """Add one to the input.""" @@ -3334,7 +3334,6 @@ def test_with_config_with_config() -> None: def test_metadata_is_merged() -> None: """Test metadata and tags defined in with_config and at are merged/concatend.""" - foo = RunnableLambda(lambda x: x).with_config({"metadata": {"my_key": "my_value"}}) expected_metadata = { "my_key": "my_value", @@ -3349,7 +3348,6 @@ def test_metadata_is_merged() -> None: def test_tags_are_appended() -> None: """Test tags from with_config are concatenated with those in invocation.""" - foo = RunnableLambda(lambda x: x).with_config({"tags": ["my_key"]}) with collect_runs() as cb: foo.invoke("hi", {"tags": ["invoked_key"]}) @@ -4445,7 +4443,6 @@ async def test_runnable_branch_abatch() -> None: def test_runnable_branch_stream() -> None: """Verify that stream works for RunnableBranch.""" - llm_res = "i'm a textbot" # sleep to better simulate a real stream llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01) @@ -4503,7 +4500,6 @@ def raise_value_error(x: str) -> Any: async def test_runnable_branch_astream() -> None: """Verify that astream works for RunnableBranch.""" - llm_res = "i'm a textbot" # sleep to better simulate a real stream llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01) @@ -4694,8 +4690,8 @@ async def __call__(self, input: AsyncIterator[Any]) -> AsyncIterator[int]: async def test_runnable_gen_context_config() -> None: """Test that a generator can call other runnables with config - propagated from the context.""" - + propagated from the context. + """ fake = RunnableLambda(len) def gen(input: Iterator[Any]) -> Iterator[int]: @@ -4829,8 +4825,8 @@ async def agen(input: AsyncIterator[Any]) -> AsyncIterator[int]: async def test_runnable_iter_context_config() -> None: """Test that a generator can call other runnables with config - propagated from the context.""" - + propagated from the context. + """ fake = RunnableLambda(len) @chain @@ -4946,8 +4942,8 @@ async def agen(input: str) -> AsyncIterator[int]: async def test_runnable_lambda_context_config() -> None: """Test that a function can call other runnables with config - propagated from the context.""" - + propagated from the context. + """ fake = RunnableLambda(len) @chain @@ -5098,7 +5094,8 @@ def test_with_config_callbacks() -> None: async def test_ainvoke_on_returned_runnable() -> None: """Verify that a runnable returned by a sync runnable in the async path will - be runthroughaasync path (issue #13407)""" + be runthroughaasync path (issue #13407). + """ def idchain_sync(__input: dict) -> bool: return False @@ -5171,7 +5168,7 @@ async def test_astream_log_deep_copies() -> None: """ def _get_run_log(run_log_patches: Sequence[RunLogPatch]) -> RunLog: - """Get run log""" + """Get run log.""" run_log = RunLog(state=None) # type: ignore for log_patch in run_log_patches: run_log = run_log + log_patch @@ -5435,7 +5432,6 @@ class CustomChatModel(RunnableSerializable): def test_schema_for_prompt_and_chat_model() -> None: """Testing that schema is generated properly when using variable names - that collide with pydantic attributes. """ prompt = ChatPromptTemplate([("system", "{model_json_schema}, {_private}, {json}")]) diff --git a/libs/core/tests/unit_tests/runnables/test_runnable_events_v1.py b/libs/core/tests/unit_tests/runnables/test_runnable_events_v1.py index 7389d887769d81..9efd8b01695d6e 100644 --- a/libs/core/tests/unit_tests/runnables/test_runnable_events_v1.py +++ b/libs/core/tests/unit_tests/runnables/test_runnable_events_v1.py @@ -80,15 +80,15 @@ def _assert_events_equal_allow_superset_metadata(events: list, expected: list) - async def test_event_stream_with_simple_function_tool() -> None: - """Test the event stream with a function and tool""" + """Test the event stream with a function and tool.""" def foo(x: int) -> dict: - """Foo""" + """Foo.""" return {"x": 5} @tool def get_docs(x: int) -> list[Document]: - """Hello Doc""" + """Hello Doc.""" return [Document(page_content="hello")] chain = RunnableLambda(foo) | get_docs @@ -345,7 +345,7 @@ def reverse(s: str) -> str: async def test_event_stream_with_triple_lambda_test_filtering() -> None: - """Test filtering based on tags / names""" + """Test filtering based on tags / names.""" def reverse(s: str) -> str: """Reverse a string.""" @@ -1822,7 +1822,7 @@ async def add_one(x: int) -> int: async def test_events_astream_config() -> None: - """Test that astream events support accepting config""" + """Test that astream events support accepting config.""" infinite_cycle = cycle([AIMessage(content="hello world!", id="ai1")]) good_world_on_repeat = cycle([AIMessage(content="Goodbye world", id="ai2")]) model = GenericFakeChatModel(messages=infinite_cycle).configurable_fields( @@ -1912,7 +1912,7 @@ def clear(self) -> None: store: dict = {} def get_by_session_id(session_id: str) -> BaseChatMessageHistory: - """Get a chat message history""" + """Get a chat message history.""" if session_id not in store: store[session_id] = [] return InMemoryHistory(messages=store[session_id]) diff --git a/libs/core/tests/unit_tests/runnables/test_runnable_events_v2.py b/libs/core/tests/unit_tests/runnables/test_runnable_events_v2.py index 8ceb4bf38b5f17..44ddad7721f507 100644 --- a/libs/core/tests/unit_tests/runnables/test_runnable_events_v2.py +++ b/libs/core/tests/unit_tests/runnables/test_runnable_events_v2.py @@ -90,15 +90,15 @@ async def _collect_events( async def test_event_stream_with_simple_function_tool() -> None: - """Test the event stream with a function and tool""" + """Test the event stream with a function and tool.""" def foo(x: int) -> dict: - """Foo""" + """Foo.""" return {"x": 5} @tool def get_docs(x: int) -> list[Document]: - """Hello Doc""" + """Hello Doc.""" return [Document(page_content="hello")] chain = RunnableLambda(foo) | get_docs @@ -371,7 +371,7 @@ def step(name: str, err: Optional[str], val: str) -> str: async def test_event_stream_with_triple_lambda_test_filtering() -> None: - """Test filtering based on tags / names""" + """Test filtering based on tags / names.""" def reverse(s: str) -> str: """Reverse a string.""" @@ -1767,7 +1767,7 @@ async def add_one(x: int) -> int: async def test_events_astream_config() -> None: - """Test that astream events support accepting config""" + """Test that astream events support accepting config.""" infinite_cycle = cycle([AIMessage(content="hello world!", id="ai1")]) good_world_on_repeat = cycle([AIMessage(content="Goodbye world", id="ai2")]) model = GenericFakeChatModel(messages=infinite_cycle).configurable_fields( @@ -1859,7 +1859,7 @@ def clear(self) -> None: store: dict = {} def get_by_session_id(session_id: str) -> BaseChatMessageHistory: - """Get a chat message history""" + """Get a chat message history.""" if session_id not in store: store[session_id] = [] return InMemoryHistory(messages=store[session_id]) @@ -2046,7 +2046,7 @@ def add_one_proxy(x: int, config: RunnableConfig) -> int: class StreamingRunnable(Runnable[Input, Output]): - """A custom runnable used for testing purposes""" + """A custom runnable used for testing purposes.""" iterable: Iterable[Any] @@ -2734,7 +2734,7 @@ async def test_custom_event_root_dispatch_with_in_tool() -> None: @tool async def foo(x: int) -> int: - """Foo""" + """Foo.""" await adispatch_custom_event("event1", {"x": x}) return x + 1 diff --git a/libs/core/tests/unit_tests/runnables/test_utils.py b/libs/core/tests/unit_tests/runnables/test_utils.py index 27d1272ee27eff..06b84495b8ec21 100644 --- a/libs/core/tests/unit_tests/runnables/test_utils.py +++ b/libs/core/tests/unit_tests/runnables/test_utils.py @@ -23,7 +23,7 @@ ], ) def test_get_lambda_source(func: Callable, expected_source: str) -> None: - """Test get_lambda_source function""" + """Test get_lambda_source function.""" source = get_lambda_source(func) assert source == expected_source @@ -36,7 +36,7 @@ def test_get_lambda_source(func: Callable, expected_source: str) -> None: ], ) def test_indent_lines_after_first(text: str, prefix: str, expected_output: str) -> None: - """Test indent_lines_after_first function""" + """Test indent_lines_after_first function.""" indented_text = indent_lines_after_first(text, prefix) assert indented_text == expected_output diff --git a/libs/core/tests/unit_tests/test_pydantic_serde.py b/libs/core/tests/unit_tests/test_pydantic_serde.py index 87af2fa5a611ba..32ac6447315375 100644 --- a/libs/core/tests/unit_tests/test_pydantic_serde.py +++ b/libs/core/tests/unit_tests/test_pydantic_serde.py @@ -25,7 +25,6 @@ def test_serde_any_message() -> None: """Test AnyMessage() serder.""" - lc_objects = [ HumanMessage(content="human"), HumanMessageChunk(content="human"), diff --git a/libs/core/tests/unit_tests/test_tools.py b/libs/core/tests/unit_tests/test_tools.py index 03e7fcc01e5e4d..5045114397e4c1 100644 --- a/libs/core/tests/unit_tests/test_tools.py +++ b/libs/core/tests/unit_tests/test_tools.py @@ -388,7 +388,6 @@ async def _arun(self, tool_input: str) -> str: def test_tool_lambda_args_schema() -> None: """Test args schema inference when the tool argument is a lambda function.""" - tool = Tool( name="tool", description="A tool", @@ -403,7 +402,7 @@ def test_structured_tool_from_function_docstring() -> None: """Test that structured tools can be created from functions.""" def foo(bar: int, baz: str) -> str: - """Docstring + """Docstring. Args: bar: the bar value @@ -437,7 +436,7 @@ def test_structured_tool_from_function_docstring_complex_args() -> None: """Test that structured tools can be created from functions.""" def foo(bar: int, baz: list[str]) -> str: - """Docstring + """Docstring. Args: bar: int @@ -526,7 +525,7 @@ def test_tool_from_function_with_run_manager() -> None: def foo(bar: str, callbacks: Optional[CallbackManagerForToolRun] = None) -> str: """Docstring Args: - bar: str + bar: str. """ assert callbacks is not None return "foo" + bar @@ -544,7 +543,7 @@ def test_structured_tool_from_function_with_run_manager() -> None: def foo( bar: int, baz: str, callbacks: Optional[CallbackManagerForToolRun] = None ) -> str: - """Docstring + """Docstring. Args: bar: int @@ -1297,6 +1296,7 @@ def foo3(bar: str, baz: int) -> str: def foo4(bar: str, baz: int) -> str: """The foo. + Args: bar: The bar. baz: The baz. @@ -1381,7 +1381,7 @@ def _run( def _mock_structured_tool_with_artifact( arg1: int, arg2: bool, arg3: Optional[dict] = None ) -> tuple[str, dict]: - """A Structured Tool""" + """A Structured Tool.""" return f"{arg1} {arg2}", {"arg1": arg1, "arg2": arg2, "arg3": arg3} @@ -1891,7 +1891,7 @@ def test_structured_tool_with_different_pydantic_versions(pydantic_model: Any) - from langchain_core.tools import StructuredTool def foo(a: int, b: str) -> str: - """Hahaha""" + """Hahaha.""" return "foo" foo_tool = StructuredTool.from_function( @@ -2187,7 +2187,7 @@ class Foo(BaseModelV2): @tool(args_schema=Foo) def foo(x): # type: ignore[no-untyped-def] - """foo""" + """Foo.""" return x assert foo.tool_call_schema.model_json_schema() == { @@ -2269,7 +2269,7 @@ def injected_tool(x: int, foo: Annotated[Foo, InjectedToolArg]) -> str: def test_tool_injected_tool_call_id() -> None: @tool def foo(x: int, tool_call_id: Annotated[str, InjectedToolCallId]) -> ToolMessage: - """foo""" + """Foo.""" return ToolMessage(x, tool_call_id=tool_call_id) # type: ignore assert foo.invoke( @@ -2281,7 +2281,7 @@ def foo(x: int, tool_call_id: Annotated[str, InjectedToolCallId]) -> ToolMessage @tool def foo2(x: int, tool_call_id: Annotated[str, InjectedToolCallId()]) -> ToolMessage: - """foo""" + """Foo.""" return ToolMessage(x, tool_call_id=tool_call_id) # type: ignore assert foo2.invoke( @@ -2292,7 +2292,7 @@ def foo2(x: int, tool_call_id: Annotated[str, InjectedToolCallId()]) -> ToolMess def test_tool_uninjected_tool_call_id() -> None: @tool def foo(x: int, tool_call_id: str) -> ToolMessage: - """foo""" + """Foo.""" return ToolMessage(x, tool_call_id=tool_call_id) # type: ignore with pytest.raises(ValueError): diff --git a/libs/core/tests/unit_tests/tracers/test_langchain.py b/libs/core/tests/unit_tests/tracers/test_langchain.py index d71783f6bc8999..71fabecc948b5d 100644 --- a/libs/core/tests/unit_tests/tracers/test_langchain.py +++ b/libs/core/tests/unit_tests/tracers/test_langchain.py @@ -83,7 +83,6 @@ def test_tracer_with_run_tree_parent() -> None: def test_log_lock() -> None: """Test that example assigned at callback start/end is honored.""" - client = unittest.mock.MagicMock(spec=Client) tracer = LangChainTracer(client=client) @@ -96,9 +95,7 @@ def test_log_lock() -> None: class LangChainProjectNameTest(unittest.TestCase): - """ - Test that the project name is set correctly for runs. - """ + """Test that the project name is set correctly for runs.""" class SetProperTracerProjectTestCase: def __init__( diff --git a/libs/core/tests/unit_tests/utils/test_function_calling.py b/libs/core/tests/unit_tests/utils/test_function_calling.py index bf1a4f56337fe6..088e12cdc65379 100644 --- a/libs/core/tests/unit_tests/utils/test_function_calling.py +++ b/libs/core/tests/unit_tests/utils/test_function_calling.py @@ -39,7 +39,7 @@ @pytest.fixture() def pydantic() -> type[BaseModel]: class dummy_function(BaseModel): # noqa: N801 - """dummy function""" + """dummy function.""" arg1: int = Field(..., description="foo") arg2: Literal["bar", "baz"] = Field(..., description="one of 'bar', 'baz'") @@ -53,7 +53,7 @@ def dummy_function( arg1: ExtensionsAnnotated[int, "foo"], arg2: ExtensionsAnnotated[Literal["bar", "baz"], "one of 'bar', 'baz'"], ) -> None: - """dummy function""" + """Dummy function.""" return dummy_function @@ -61,7 +61,7 @@ def dummy_function( @pytest.fixture() def function() -> Callable: def dummy_function(arg1: int, arg2: Literal["bar", "baz"]) -> None: - """dummy function + """Dummy function. Args: arg1: foo @@ -74,7 +74,7 @@ def dummy_function(arg1: int, arg2: Literal["bar", "baz"]) -> None: @pytest.fixture() def function_docstring_annotations() -> Callable: def dummy_function(arg1: int, arg2: Literal["bar", "baz"]) -> None: - """dummy function + """Dummy function. Args: arg1 (int): foo @@ -130,7 +130,7 @@ class Schema(BaseModel): @pytest.fixture() def dummy_pydantic() -> type[BaseModel]: class dummy_function(BaseModel): # noqa: N801 - """dummy function""" + """dummy function.""" arg1: int = Field(..., description="foo") arg2: Literal["bar", "baz"] = Field(..., description="one of 'bar', 'baz'") @@ -141,7 +141,7 @@ class dummy_function(BaseModel): # noqa: N801 @pytest.fixture() def dummy_pydantic_v2() -> type[BaseModelV2Maybe]: class dummy_function(BaseModelV2Maybe): # noqa: N801 - """dummy function""" + """dummy function.""" arg1: int = FieldV2Maybe(..., description="foo") arg2: Literal["bar", "baz"] = FieldV2Maybe( @@ -154,7 +154,7 @@ class dummy_function(BaseModelV2Maybe): # noqa: N801 @pytest.fixture() def dummy_typing_typed_dict() -> type: class dummy_function(TypingTypedDict): # noqa: N801 - """dummy function""" + """dummy function.""" arg1: TypingAnnotated[int, ..., "foo"] # noqa: F821 arg2: TypingAnnotated[Literal["bar", "baz"], ..., "one of 'bar', 'baz'"] # noqa: F722 @@ -165,7 +165,7 @@ class dummy_function(TypingTypedDict): # noqa: N801 @pytest.fixture() def dummy_typing_typed_dict_docstring() -> type: class dummy_function(TypingTypedDict): # noqa: N801 - """dummy function + """dummy function. Args: arg1: foo @@ -181,7 +181,7 @@ class dummy_function(TypingTypedDict): # noqa: N801 @pytest.fixture() def dummy_extensions_typed_dict() -> type: class dummy_function(ExtensionsTypedDict): # noqa: N801 - """dummy function""" + """dummy function.""" arg1: ExtensionsAnnotated[int, ..., "foo"] arg2: ExtensionsAnnotated[Literal["bar", "baz"], ..., "one of 'bar', 'baz'"] @@ -192,7 +192,7 @@ class dummy_function(ExtensionsTypedDict): # noqa: N801 @pytest.fixture() def dummy_extensions_typed_dict_docstring() -> type: class dummy_function(ExtensionsTypedDict): # noqa: N801 - """dummy function + """dummy function. Args: arg1: foo @@ -269,7 +269,7 @@ def bedrock_converse_tool() -> dict: class Dummy: def dummy_function(self, arg1: int, arg2: Literal["bar", "baz"]) -> None: - """dummy function + """Dummy function. Args: arg1: foo @@ -280,7 +280,7 @@ def dummy_function(self, arg1: int, arg2: Literal["bar", "baz"]) -> None: class DummyWithClassMethod: @classmethod def dummy_function(cls, arg1: int, arg2: Literal["bar", "baz"]) -> None: - """dummy function + """Dummy function. Args: arg1: foo @@ -392,7 +392,7 @@ class NestedV2(BaseModelV2Maybe): ) def my_function(arg1: NestedV2) -> None: - """dummy function""" + """Dummy function.""" convert_to_openai_function(my_function) @@ -405,7 +405,7 @@ class Nested(BaseModel): ) def my_function(arg1: Nested) -> None: - """dummy function""" + """Dummy function.""" expected = { "name": "my_function", @@ -442,7 +442,7 @@ class Nested(BaseModel): ) def my_function(arg1: Nested) -> None: - """dummy function""" + """Dummy function.""" expected = { "name": "my_function", @@ -608,7 +608,7 @@ def func5( b: str, c: Optional[list[Optional[str]]], ) -> None: - """A test function""" + """A test function.""" func = convert_to_openai_function(func5) req = func["parameters"]["required"] @@ -617,7 +617,7 @@ def func5( def test_function_no_params() -> None: def nullary_function() -> None: - """nullary function""" + """Nullary function.""" func = convert_to_openai_function(nullary_function) req = func["parameters"].get("required") @@ -722,12 +722,12 @@ def test__convert_typed_dict_to_openai_function( annotated = TypingAnnotated if use_extension_annotated else TypingAnnotated class SubTool(typed_dict): - """Subtool docstring""" + """Subtool docstring.""" args: annotated[dict[str, Any], {}, "this does bar"] # noqa: F722 # type: ignore class Tool(typed_dict): - """Docstring + """Docstring. Args: arg1: foo @@ -981,7 +981,7 @@ def magic_function(input: int | float) -> str: def test_convert_to_openai_function_no_args() -> None: @tool def empty_tool() -> str: - """No args""" + """No args.""" return "foo" actual = convert_to_openai_function(empty_tool, strict=True) diff --git a/libs/core/tests/unit_tests/utils/test_pydantic.py b/libs/core/tests/unit_tests/utils/test_pydantic.py index fbe748a227dfc8..6bbcdcc3a94383 100644 --- a/libs/core/tests/unit_tests/utils/test_pydantic.py +++ b/libs/core/tests/unit_tests/utils/test_pydantic.py @@ -140,7 +140,7 @@ class Bar(BaseModelV1): @pytest.mark.skipif(PYDANTIC_MAJOR_VERSION != 2, reason="Only tests Pydantic v2") def test_with_field_metadata() -> None: - """Test pydantic with field metadata""" + """Test pydantic with field metadata.""" from pydantic import BaseModel as BaseModelV2 from pydantic import Field as FieldV2 @@ -202,7 +202,6 @@ class Foo(BaseModel): def test_create_model_v2() -> None: """Test that create model v2 works as expected.""" - with warnings.catch_warnings(record=True) as record: warnings.simplefilter("always") # Cause all warnings to always be triggered foo = create_model_v2("Foo", field_definitions={"a": (int, None)}) diff --git a/libs/core/tests/unit_tests/vectorstores/test_in_memory.py b/libs/core/tests/unit_tests/vectorstores/test_in_memory.py index e76f843616be81..67ede1c1508bd7 100644 --- a/libs/core/tests/unit_tests/vectorstores/test_in_memory.py +++ b/libs/core/tests/unit_tests/vectorstores/test_in_memory.py @@ -35,7 +35,7 @@ async def test_inmemory_similarity_search() -> None: async def test_inmemory_similarity_search_with_score() -> None: - """Test end to end similarity search with score""" + """Test end to end similarity search with score.""" store = await InMemoryVectorStore.afrom_texts( ["foo", "bar", "baz"], DeterministicFakeEmbedding(size=3) ) @@ -63,7 +63,7 @@ async def test_add_by_ids() -> None: async def test_inmemory_mmr() -> None: - """Test MMR search""" + """Test MMR search.""" texts = ["foo", "foo", "fou", "foy"] docsearch = await InMemoryVectorStore.afrom_texts( texts, DeterministicFakeEmbedding(size=6) @@ -147,7 +147,6 @@ async def test_inmemory_upsert() -> None: async def test_inmemory_get_by_ids() -> None: """Test get by ids.""" - store = InMemoryVectorStore(embedding=DeterministicFakeEmbedding(size=3)) store.upsert( diff --git a/libs/core/tests/unit_tests/vectorstores/test_vectorstore.py b/libs/core/tests/unit_tests/vectorstores/test_vectorstore.py index a0472b70cdaaef..c9c6592545b4cf 100644 --- a/libs/core/tests/unit_tests/vectorstores/test_vectorstore.py +++ b/libs/core/tests/unit_tests/vectorstores/test_vectorstore.py @@ -117,7 +117,6 @@ def test_default_add_documents(vs_class: type[VectorStore]) -> None: """Test that we can implement the upsert method of the CustomVectorStore class without violating the Liskov Substitution Principle. """ - store = vs_class() # Check upsert with id