Skip to content

Commit

Permalink
small cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
ibolmo committed Dec 12, 2024
1 parent a00a0ba commit 5be31e9
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 12 deletions.
2 changes: 1 addition & 1 deletion py/autoevals/moderation.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ def __init__(
super().__init__(api_key=api_key, base_url=base_url, client=client)
self.threshold = threshold

# need to check who calls _run_eval_a?sync
def _run_eval_sync(self, output, __expected=None):
moderation_response = run_cached_request(
client=self.client, request_type=REQUEST_TYPE, input=output, **self.extra_args
Expand Down Expand Up @@ -73,3 +72,4 @@ def compute_score(moderation_result, threshold):

__all__ = ["Moderation"]
__all__ = ["Moderation"]
__all__ = ["Moderation"]
20 changes: 9 additions & 11 deletions py/autoevals/oai.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,15 @@ def complete(self, **kwargs):


def init(*, client: Optional[LLMClient] = None):
"""Initialize Autoevals with an optional custom LLM client.
This function sets up the global client context for Autoevals to use. If no client is provided,
the default OpenAI client will be used.
Args:
client (Optional[LLMClient]): A custom LLM client instance that implements the LLMClient interface.
If None, the default OpenAI client will be used.\
"""
_client_var.set(client)


Expand Down Expand Up @@ -256,14 +265,3 @@ async def arun_cached_request(
retries += 1

return resp
return resp
return resp
return resp
return resp
return resp
return resp
return resp
return resp
return resp
return resp
return resp

0 comments on commit 5be31e9

Please sign in to comment.