Skip to content

Commit

Permalink
[pre-commit.ci] auto fixes from pre-commit.com hooks
Browse files Browse the repository at this point in the history
for more information, see https://pre-commit.ci
  • Loading branch information
pre-commit-ci[bot] committed Jan 9, 2025
1 parent 43c9228 commit 12b2415
Show file tree
Hide file tree
Showing 5 changed files with 17 additions and 51 deletions.
16 changes: 4 additions & 12 deletions comps/integrations/langchain/langchain_opea/chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,9 +86,7 @@ def _llm_type(self) -> str:
"""Return type of chat model."""
return "opea-chat"

def _get_ls_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> LangSmithParams:
def _get_ls_params(self, stop: Optional[List[str]] = None, **kwargs: Any) -> LangSmithParams:
"""Get the parameters used to invoke the model."""
params = super()._get_ls_params(stop=stop, **kwargs)
params["ls_provider"] = "opea"
Expand All @@ -103,9 +101,7 @@ def validate_environment(self) -> Self:
raise ValueError("n must be 1 when streaming.")

client_params: dict = {
"api_key": (
self.opea_api_key.get_secret_value() if self.opea_api_key else None
),
"api_key": (self.opea_api_key.get_secret_value() if self.opea_api_key else None),
"base_url": self.opea_api_base,
}

Expand All @@ -117,14 +113,10 @@ def validate_environment(self) -> Self:

if not (self.client or None):
sync_specific: dict = {"http_client": self.http_client}
self.client = openai.OpenAI(
**client_params, **sync_specific
).chat.completions
self.client = openai.OpenAI(**client_params, **sync_specific).chat.completions
if not (self.async_client or None):
async_specific: dict = {"http_client": self.http_async_client}
self.async_client = openai.AsyncOpenAI(
**client_params, **async_specific
).chat.completions
self.async_client = openai.AsyncOpenAI(**client_params, **async_specific).chat.completions
return self

@property
Expand Down
4 changes: 1 addition & 3 deletions comps/integrations/langchain/langchain_opea/llms.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,9 +76,7 @@ def validate_environment(self) -> Self:
if self.streaming and self.best_of > 1:
raise ValueError("Cannot stream results when best_of > 1.")
client_params: dict = {
"api_key": self.opea_api_key.get_secret_value()
if self.opea_api_key
else None,
"api_key": self.opea_api_key.get_secret_value() if self.opea_api_key else None,
"base_url": self.opea_api_base,
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,14 +50,10 @@ def supports_anthropic_inputs(self) -> bool:

supports_anthropic_inputs

@pytest.mark.xfail(
reason=("Fails with 'AssertionError'. OPEA does not support 'tool_choice' yet.")
)
@pytest.mark.xfail(reason=("Fails with 'AssertionError'. OPEA does not support 'tool_choice' yet."))
def test_structured_output(self, model: BaseChatModel) -> None:
super().test_structured_output(model)

@pytest.mark.xfail(
reason=("Fails with 'AssertionError'. OPEA does not support 'tool_choice' yet.")
)
@pytest.mark.xfail(reason=("Fails with 'AssertionError'. OPEA does not support 'tool_choice' yet."))
def test_structured_output_pydantic_2_v1(self, model: BaseChatModel) -> None:
super().test_structured_output_pydantic_2_v1(model)
32 changes: 8 additions & 24 deletions comps/integrations/langchain/tests/integration_tests/test_llms.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,29 +11,23 @@

def test_stream() -> None:
"""Test streaming tokens from OpenAI."""
llm = OPEALLM(
opea_api_base=OPEA_API_BASE, opea_api_key=OPEA_API_KEY, model_name=MODEL_NAME
)
llm = OPEALLM(opea_api_base=OPEA_API_BASE, opea_api_key=OPEA_API_KEY, model_name=MODEL_NAME)

for token in llm.stream("I'm Pickle Rick"):
assert isinstance(token, str)


async def test_astream() -> None:
"""Test streaming tokens from OpenAI."""
llm = OPEALLM(
opea_api_base=OPEA_API_BASE, opea_api_key=OPEA_API_KEY, model_name=MODEL_NAME
)
llm = OPEALLM(opea_api_base=OPEA_API_BASE, opea_api_key=OPEA_API_KEY, model_name=MODEL_NAME)

async for token in llm.astream("I'm Pickle Rick"):
assert isinstance(token, str)


async def test_abatch() -> None:
"""Test streaming tokens from OPEALLM."""
llm = OPEALLM(
opea_api_base=OPEA_API_BASE, opea_api_key=OPEA_API_KEY, model_name=MODEL_NAME
)
llm = OPEALLM(opea_api_base=OPEA_API_BASE, opea_api_key=OPEA_API_KEY, model_name=MODEL_NAME)

result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
Expand All @@ -42,22 +36,16 @@ async def test_abatch() -> None:

async def test_abatch_tags() -> None:
"""Test batch tokens from OPEALLM."""
llm = OPEALLM(
opea_api_base=OPEA_API_BASE, opea_api_key=OPEA_API_KEY, model_name=MODEL_NAME
)
llm = OPEALLM(opea_api_base=OPEA_API_BASE, opea_api_key=OPEA_API_KEY, model_name=MODEL_NAME)

result = await llm.abatch(
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
)
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]})
for token in result:
assert isinstance(token, str)


def test_batch() -> None:
"""Test batch tokens from OPEALLM."""
llm = OPEALLM(
opea_api_base=OPEA_API_BASE, opea_api_key=OPEA_API_KEY, model_name=MODEL_NAME
)
llm = OPEALLM(opea_api_base=OPEA_API_BASE, opea_api_key=OPEA_API_KEY, model_name=MODEL_NAME)

result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
Expand All @@ -66,19 +54,15 @@ def test_batch() -> None:

async def test_ainvoke() -> None:
"""Test invoke tokens from OPEALLM."""
llm = OPEALLM(
opea_api_base=OPEA_API_BASE, opea_api_key=OPEA_API_KEY, model_name=MODEL_NAME
)
llm = OPEALLM(opea_api_base=OPEA_API_BASE, opea_api_key=OPEA_API_KEY, model_name=MODEL_NAME)

result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
assert isinstance(result, str)


def test_invoke() -> None:
"""Test invoke tokens from OPEALLM."""
llm = OPEALLM(
opea_api_base=OPEA_API_BASE, opea_api_key=OPEA_API_KEY, model_name=MODEL_NAME
)
llm = OPEALLM(opea_api_base=OPEA_API_BASE, opea_api_key=OPEA_API_KEY, model_name=MODEL_NAME)

result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
assert isinstance(result, str)
8 changes: 2 additions & 6 deletions comps/integrations/langchain/tests/unit_tests/test_llms.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,16 +11,12 @@

def test_initialization() -> None:
"""Test integration initialization."""
OPEALLM(
opea_api_base=OPEA_API_BASE, opea_api_key=OPEA_API_KEY, model_name=MODEL_NAME
)
OPEALLM(opea_api_base=OPEA_API_BASE, opea_api_key=OPEA_API_KEY, model_name=MODEL_NAME)


def test_model_params() -> None:
# Test standard tracing params
llm = OPEALLM(
opea_api_base=OPEA_API_BASE, opea_api_key=OPEA_API_KEY, model_name=MODEL_NAME
)
llm = OPEALLM(opea_api_base=OPEA_API_BASE, opea_api_key=OPEA_API_KEY, model_name=MODEL_NAME)
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "opea",
Expand Down

0 comments on commit 12b2415

Please sign in to comment.