Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

(FEAT) allow setting base url for openai v1.0.0+ #32

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion py/autoevals/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ def __init__(
temperature=None,
engine=None,
api_key=None,
base_url=None,
):
self.name = name
self.model = model
Expand All @@ -92,6 +93,8 @@ def __init__(
self.extra_args["max_tokens"] = max(max_tokens, 5)
if api_key:
self.extra_args["api_key"] = api_key
if base_url:
self.extra_args["base_url"] = base_url

self.render_args = {}
if render_args:
Expand Down Expand Up @@ -199,6 +202,7 @@ def __init__(
temperature=0,
engine=None,
api_key=None,
base_url=None,
):
choice_strings = list(choice_scores.keys())

Expand All @@ -220,6 +224,7 @@ def __init__(
temperature=temperature,
engine=engine,
api_key=api_key,
base_url=base_url,
render_args={"__choices": choice_strings},
)

Expand All @@ -235,7 +240,7 @@ def from_spec_file(cls, name: str, path: str, **kwargs):


class SpecFileClassifier(LLMClassifier):
def __new__(cls, model=None, engine=None, use_cot=None, max_tokens=None, temperature=None, api_key=None):
def __new__(cls, model=None, engine=None, use_cot=None, max_tokens=None, temperature=None, api_key=None, base_url=None):
kwargs = {}
if model is not None:
kwargs["model"] = model
Expand All @@ -249,6 +254,8 @@ def __new__(cls, model=None, engine=None, use_cot=None, max_tokens=None, tempera
kwargs["temperature"] = temperature
if api_key is not None:
kwargs["api_key"] = api_key
if base_url is not None:
kwargs["base_url"] = base_url

# convert FooBar to foo_bar
template_name = re.sub(r"(?<!^)(?=[A-Z])", "_", cls.__name__).lower()
Expand Down
15 changes: 8 additions & 7 deletions py/autoevals/oai.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,8 @@ def open_cache():
CACHE_LOCK = threading.Lock()


def prepare_openai_complete(is_async=False, api_key=None):
def prepare_openai_complete(is_async=False, api_key=None, base_url="https://api.openai.com/v1"):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

would rather not hardcode the API url here -- e.g. someone may monkey patch the library to override the default, and this would override that. Is there any issue with passing None into the OpenAI object by default?

# base_url always defaults to https://api.openai.com/v1 unless specified by users
try:
import openai
except Exception as e:
Expand All @@ -59,9 +60,9 @@ def prepare_openai_complete(is_async=False, api_key=None):
# This is the new v1 API
is_v1 = True
if is_async:
openai_obj = openai.AsyncOpenAI(api_key=api_key)
openai_obj = openai.AsyncOpenAI(api_key=api_key, base_url=base_url)
else:
openai_obj = openai.OpenAI(api_key=api_key)
openai_obj = openai.OpenAI(api_key=api_key, base_url=base_url)

try:
from braintrust.oai import wrap_openai
Expand Down Expand Up @@ -109,9 +110,9 @@ def log_cached_response(params, resp):
)


def run_cached_request(api_key=None, **kwargs):
def run_cached_request(api_key=None, base_url=None, **kwargs):
# OpenAI is very slow to import, so we only do it if we need it
complete, RateLimitError = prepare_openai_complete(is_async=False, api_key=api_key)
complete, RateLimitError = prepare_openai_complete(is_async=False, api_key=api_key, base_url=base_url)

param_key = json.dumps(kwargs)
conn = open_cache()
Expand Down Expand Up @@ -141,8 +142,8 @@ def run_cached_request(api_key=None, **kwargs):
return resp


async def arun_cached_request(api_key=None, **kwargs):
complete, RateLimitError = prepare_openai_complete(is_async=True, api_key=api_key)
async def arun_cached_request(api_key=None, base_url=None, **kwargs):
complete, RateLimitError = prepare_openai_complete(is_async=True, api_key=api_key, base_url=base_url)

param_key = json.dumps(kwargs)
conn = open_cache()
Expand Down
Loading