Skip to content

Commit

Permalink
fix: add missing parameters order, limit, before, and after in get_as…
Browse files Browse the repository at this point in the history
…sistants method for openai (BerriAI#7537)

- Ensured that `before` and `after` parameters are only passed when provided to avoid AttributeError.
- Implemented safe access using default values for `before` and `after` to prevent missing attribute issues.
- Added consistent handling of `order` and `limit` to improve flexibility and robustness in API calls.
  • Loading branch information
jeansouzak authored Jan 3, 2025
1 parent 33f301e commit 4b0505d
Show file tree
Hide file tree
Showing 3 changed files with 74 additions and 5 deletions.
4 changes: 4 additions & 0 deletions litellm/assistants/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,10 @@ def get_assistants(
timeout=timeout,
max_retries=optional_params.max_retries,
organization=organization,
order=getattr(optional_params, "order", "desc"),
limit=getattr(optional_params, "limit", 20),
before=getattr(optional_params, "before", None),
after=getattr(optional_params, "after", None),
client=client,
aget_assistants=aget_assistants, # type: ignore
) # type: ignore
Expand Down
49 changes: 44 additions & 5 deletions litellm/llms/openai/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -1928,6 +1928,10 @@ async def async_get_assistants(
max_retries: Optional[int],
organization: Optional[str],
client: Optional[AsyncOpenAI],
order: Optional[str] = 'desc',
limit: Optional[int] = 20,
before: Optional[str] = None,
after: Optional[str] = None,
) -> AsyncCursorPage[Assistant]:
openai_client = self.async_get_openai_client(
api_key=api_key,
Expand All @@ -1937,8 +1941,16 @@ async def async_get_assistants(
organization=organization,
client=client,
)

response = await openai_client.beta.assistants.list()
request_params = {
"order": order,
"limit": limit,
}
if before:
request_params["before"] = before
if after:
request_params["after"] = after

response = await openai_client.beta.assistants.list(**request_params)

return response

Expand All @@ -1953,7 +1965,11 @@ def get_assistants(
max_retries: Optional[int],
organization: Optional[str],
client: Optional[AsyncOpenAI],
aget_assistants: Literal[True],
aget_assistants: Literal[True],
order: Optional[str] = 'desc',
limit: Optional[int] = 20,
before: Optional[str] = None,
after: Optional[str] = None,
) -> Coroutine[None, None, AsyncCursorPage[Assistant]]:
...

Expand All @@ -1966,7 +1982,11 @@ def get_assistants(
max_retries: Optional[int],
organization: Optional[str],
client: Optional[OpenAI],
aget_assistants: Optional[Literal[False]],
aget_assistants: Optional[Literal[False]],
order: Optional[str] = 'desc',
limit: Optional[int] = 20,
before: Optional[str] = None,
after: Optional[str] = None,
) -> SyncCursorPage[Assistant]:
...

Expand All @@ -1981,6 +2001,10 @@ def get_assistants(
organization: Optional[str],
client=None,
aget_assistants=None,
order: Optional[str] = 'desc',
limit: Optional[int] = 20,
before: Optional[str] = None,
after: Optional[str] = None,
):
if aget_assistants is not None and aget_assistants is True:
return self.async_get_assistants(
Expand All @@ -1990,6 +2014,10 @@ def get_assistants(
max_retries=max_retries,
organization=organization,
client=client,
order=order,
limit=limit,
before=before,
after=after,
)
openai_client = self.get_openai_client(
api_key=api_key,
Expand All @@ -2000,7 +2028,18 @@ def get_assistants(
client=client,
)

response = openai_client.beta.assistants.list()
request_params = {
"order": order,
"limit": limit,
}

if before:
request_params["before"] = before
if after:
request_params["after"] = after


response = openai_client.beta.assistants.list(**request_params)

return response

Expand Down
26 changes: 26 additions & 0 deletions litellm/proxy/proxy_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -4434,6 +4434,10 @@ async def get_assistants(
request: Request,
fastapi_response: Response,
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
order: Optional[str] = Query(None, description="Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order."),
limit: Optional[int] = Query(None, description="A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20."),
after: Optional[str] = Query(None, description="A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list."),
before: Optional[str] = Query(None, description="A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list."),
):
"""
Returns a list of assistants.
Expand All @@ -4456,6 +4460,28 @@ async def get_assistants(
proxy_config=proxy_config,
)

# Validate `order` parameter
if order and order not in ["asc", "desc"]:
raise HTTPException(
status_code=400, detail={"error": "order must be 'asc' or 'desc'"}
)
if order:
data["order"] = order

# Validate `limit` parameter
if limit is not None:
if not (1 <= limit <= 100):
raise HTTPException(
status_code=400, detail={"error": "limit must be between 1 and 100"}
)
data["limit"] = limit

# Add pagination cursors if provided
if after:
data["after"] = after
if before:
data["before"] = before

# for now use custom_llm_provider=="openai" -> this will change as LiteLLM adds more providers for acreate_batch
if llm_router is None:
raise HTTPException(
Expand Down

0 comments on commit 4b0505d

Please sign in to comment.