diff --git a/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py b/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py index e91c8b7c56257..801364c4dff2e 100644 --- a/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py +++ b/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py @@ -271,6 +271,7 @@ def _generate( }, ) params = self._convert_prompt_msg_params(messages, **kwargs) + params["stop"] = stop response_payload = self.client.do(**params) lc_msg = _convert_dict_to_message(response_payload) gen = ChatGeneration( @@ -316,6 +317,7 @@ async def _agenerate( }, ) params = self._convert_prompt_msg_params(messages, **kwargs) + params["stop"] = stop response_payload = await self.client.ado(**params) lc_msg = _convert_dict_to_message(response_payload) generations = [] @@ -339,6 +341,7 @@ def _stream( **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: params = self._convert_prompt_msg_params(messages, **kwargs) + params["stop"] = stop params["stream"] = True for res in self.client.do(**params): if res: @@ -365,6 +368,7 @@ async def _astream( **kwargs: Any, ) -> AsyncIterator[ChatGenerationChunk]: params = self._convert_prompt_msg_params(messages, **kwargs) + params["stop"] = stop params["stream"] = True async for res in await self.client.ado(**params): if res: diff --git a/libs/community/langchain_community/llms/baidu_qianfan_endpoint.py b/libs/community/langchain_community/llms/baidu_qianfan_endpoint.py index 2ef7baba68b15..1a611d2656f7b 100644 --- a/libs/community/langchain_community/llms/baidu_qianfan_endpoint.py +++ b/libs/community/langchain_community/llms/baidu_qianfan_endpoint.py @@ -180,6 +180,7 @@ def _call( completion += chunk.text return completion params = self._convert_prompt_msg_params(prompt, **kwargs) + params["stop"] = stop response_payload = self.client.do(**params) return response_payload["result"] @@ -198,6 +199,7 @@ async def _acall( return completion params = self._convert_prompt_msg_params(prompt, **kwargs) + params["stop"] = stop response_payload = await self.client.ado(**params) return response_payload["result"] @@ -210,6 +212,7 @@ def _stream( **kwargs: Any, ) -> Iterator[GenerationChunk]: params = self._convert_prompt_msg_params(prompt, **{**kwargs, "stream": True}) + params["stop"] = stop for res in self.client.do(**params): if res: chunk = GenerationChunk(text=res["result"]) @@ -225,6 +228,7 @@ async def _astream( **kwargs: Any, ) -> AsyncIterator[GenerationChunk]: params = self._convert_prompt_msg_params(prompt, **{**kwargs, "stream": True}) + params["stop"] = stop async for res in await self.client.ado(**params): if res: chunk = GenerationChunk(text=res["result"])