Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

release: 1.3.4 #853

Merged
merged 3 commits into from
Nov 21, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "1.3.3"
".": "1.3.4"
}
13 changes: 13 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,18 @@
# Changelog

## 1.3.4 (2023-11-21)

Full Changelog: [v1.3.3...v1.3.4](https://github.com/openai/openai-python/compare/v1.3.3...v1.3.4)

### Bug Fixes

* **client:** attempt to parse unknown json content types ([#854](https://github.com/openai/openai-python/issues/854)) ([ba50466](https://github.com/openai/openai-python/commit/ba5046611029a67714d5120b9cc6a3c7fecce10c))


### Chores

* **examples:** fix static types in assistants example ([#852](https://github.com/openai/openai-python/issues/852)) ([5b47b2c](https://github.com/openai/openai-python/commit/5b47b2c542b9b4fb143af121022e2d5ad0890ef4))

## 1.3.3 (2023-11-17)

Full Changelog: [v1.3.2...v1.3.3](https://github.com/openai/openai-python/compare/v1.3.2...v1.3.3)
Expand Down
30 changes: 12 additions & 18 deletions examples/assistant.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import openai
import time

import openai

# gets API Key from environment variable OPENAI_API_KEY
client = openai.OpenAI()

Expand All @@ -16,38 +17,31 @@
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content="I need to solve the equation `3x + 11 = 14`. Can you help me?"
content="I need to solve the equation `3x + 11 = 14`. Can you help me?",
)

run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id,
instructions="Please address the user as Jane Doe. The user has a premium account."
thread_id=thread.id,
assistant_id=assistant.id,
instructions="Please address the user as Jane Doe. The user has a premium account.",
)

print("checking assistant status. ")
while True:
run = client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=run.id
)
run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id)

if run.status == "completed":
print("done!")
messages = client.beta.threads.messages.list(
thread_id=thread.id
)
messages = client.beta.threads.messages.list(thread_id=thread.id)

print("messages: ")
for message in messages:
print({
"role": message.role,
"message": message.content[0].text.value
})
assert message.content[0].type == "text"
print({"role": message.role, "message": message.content[0].text.value})

client.beta.assistants.delete(assistant.id)

break
else:
print("in progress...")
time.sleep(5)
time.sleep(5)
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "openai"
version = "1.3.3"
version = "1.3.4"
description = "The official Python library for the openai API"
readme = "README.md"
license = "Apache-2.0"
Expand Down
20 changes: 14 additions & 6 deletions src/openai/_base_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,12 @@
RAW_RESPONSE_HEADER,
)
from ._streaming import Stream, AsyncStream
from ._exceptions import APIStatusError, APITimeoutError, APIConnectionError
from ._exceptions import (
APIStatusError,
APITimeoutError,
APIConnectionError,
APIResponseValidationError,
)

log: logging.Logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -518,13 +523,16 @@ def _process_response_data(
if cast_to is UnknownResponse:
return cast(ResponseT, data)

if inspect.isclass(cast_to) and issubclass(cast_to, ModelBuilderProtocol):
return cast(ResponseT, cast_to.build(response=response, data=data))
try:
if inspect.isclass(cast_to) and issubclass(cast_to, ModelBuilderProtocol):
return cast(ResponseT, cast_to.build(response=response, data=data))

if self._strict_response_validation:
return cast(ResponseT, validate_type(type_=cast_to, value=data))
if self._strict_response_validation:
return cast(ResponseT, validate_type(type_=cast_to, value=data))

return cast(ResponseT, construct_type(type_=cast_to, value=data))
return cast(ResponseT, construct_type(type_=cast_to, value=data))
except pydantic.ValidationError as err:
raise APIResponseValidationError(response=response, body=data) from err

@property
def qs(self) -> Querystring:
Expand Down
13 changes: 13 additions & 0 deletions src/openai/_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,6 +263,19 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object:
return construct_type(value=value, type_=type_)


def is_basemodel(type_: type) -> bool:
"""Returns whether or not the given type is either a `BaseModel` or a union of `BaseModel`"""
origin = get_origin(type_) or type_
if is_union(type_):
for variant in get_args(type_):
if is_basemodel(variant):
return True

return False

return issubclass(origin, BaseModel) or issubclass(origin, GenericModel)


def construct_type(*, value: object, type_: type) -> object:
"""Loose coercion to the expected type with construction of nested values.

Expand Down
31 changes: 21 additions & 10 deletions src/openai/_response.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
from __future__ import annotations

import inspect
import logging
import datetime
import functools
from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, cast
from typing_extensions import Awaitable, ParamSpec, get_args, override, get_origin

import httpx
import pydantic

from ._types import NoneType, UnknownResponse, BinaryResponseContent
from ._utils import is_given
from ._models import BaseModel
from ._models import BaseModel, is_basemodel
from ._constants import RAW_RESPONSE_HEADER
from ._exceptions import APIResponseValidationError

Expand All @@ -23,6 +23,8 @@
P = ParamSpec("P")
R = TypeVar("R")

log: logging.Logger = logging.getLogger(__name__)


class APIResponse(Generic[R]):
_cast_to: type[R]
Expand Down Expand Up @@ -174,6 +176,18 @@ def _parse(self) -> R:
# in the response, e.g. application/json; charset=utf-8
content_type, *_ = response.headers.get("content-type").split(";")
if content_type != "application/json":
if is_basemodel(cast_to):
try:
data = response.json()
except Exception as exc:
log.debug("Could not read JSON from response data due to %s - %s", type(exc), exc)
else:
return self._client._process_response_data(
data=data,
cast_to=cast_to, # type: ignore
response=response,
)

if self._client._strict_response_validation:
raise APIResponseValidationError(
response=response,
Expand All @@ -188,14 +202,11 @@ def _parse(self) -> R:

data = response.json()

try:
return self._client._process_response_data(
data=data,
cast_to=cast_to, # type: ignore
response=response,
)
except pydantic.ValidationError as err:
raise APIResponseValidationError(response=response, body=data) from err
return self._client._process_response_data(
data=data,
cast_to=cast_to, # type: ignore
response=response,
)

@override
def __repr__(self) -> str:
Expand Down
2 changes: 1 addition & 1 deletion src/openai/_version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless.

__title__ = "openai"
__version__ = "1.3.3" # x-release-please-version
__version__ = "1.3.4" # x-release-please-version
42 changes: 42 additions & 0 deletions tests/test_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -401,6 +401,27 @@ class Model2(BaseModel):
assert isinstance(response, Model1)
assert response.foo == 1

@pytest.mark.respx(base_url=base_url)
def test_non_application_json_content_type_for_json_data(self, respx_mock: MockRouter) -> None:
"""
Response that sets Content-Type to something other than application/json but returns json data
"""

class Model(BaseModel):
foo: int

respx_mock.get("/foo").mock(
return_value=httpx.Response(
200,
content=json.dumps({"foo": 2}),
headers={"Content-Type": "application/text"},
)
)

response = self.client.get("/foo", cast_to=Model)
assert isinstance(response, Model)
assert response.foo == 2

def test_base_url_env(self) -> None:
with update_env(OPENAI_BASE_URL="http://localhost:5000/from/env"):
client = OpenAI(api_key=api_key, _strict_response_validation=True)
Expand Down Expand Up @@ -939,6 +960,27 @@ class Model2(BaseModel):
assert isinstance(response, Model1)
assert response.foo == 1

@pytest.mark.respx(base_url=base_url)
async def test_non_application_json_content_type_for_json_data(self, respx_mock: MockRouter) -> None:
"""
Response that sets Content-Type to something other than application/json but returns json data
"""

class Model(BaseModel):
foo: int

respx_mock.get("/foo").mock(
return_value=httpx.Response(
200,
content=json.dumps({"foo": 2}),
headers={"Content-Type": "application/text"},
)
)

response = await self.client.get("/foo", cast_to=Model)
assert isinstance(response, Model)
assert response.foo == 2

def test_base_url_env(self) -> None:
with update_env(OPENAI_BASE_URL="http://localhost:5000/from/env"):
client = AsyncOpenAI(api_key=api_key, _strict_response_validation=True)
Expand Down