Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore(internal): minor change to tests #1466

Merged
merged 1 commit into from
Jun 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 8 additions & 8 deletions tests/api_resources/audio/test_speech.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def test_method_create(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
speech = client.audio.speech.create(
input="string",
model="string",
model="tts-1",
voice="alloy",
)
assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent)
Expand All @@ -39,7 +39,7 @@ def test_method_create_with_all_params(self, client: OpenAI, respx_mock: MockRou
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
speech = client.audio.speech.create(
input="string",
model="string",
model="tts-1",
voice="alloy",
response_format="mp3",
speed=0.25,
Expand All @@ -54,7 +54,7 @@ def test_raw_response_create(self, client: OpenAI, respx_mock: MockRouter) -> No

response = client.audio.speech.with_raw_response.create(
input="string",
model="string",
model="tts-1",
voice="alloy",
)

Expand All @@ -69,7 +69,7 @@ def test_streaming_response_create(self, client: OpenAI, respx_mock: MockRouter)
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
with client.audio.speech.with_streaming_response.create(
input="string",
model="string",
model="tts-1",
voice="alloy",
) as response:
assert not response.is_closed
Expand All @@ -90,7 +90,7 @@ async def test_method_create(self, async_client: AsyncOpenAI, respx_mock: MockRo
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
speech = await async_client.audio.speech.create(
input="string",
model="string",
model="tts-1",
voice="alloy",
)
assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent)
Expand All @@ -102,7 +102,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI, re
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
speech = await async_client.audio.speech.create(
input="string",
model="string",
model="tts-1",
voice="alloy",
response_format="mp3",
speed=0.25,
Expand All @@ -117,7 +117,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI, respx_mock:

response = await async_client.audio.speech.with_raw_response.create(
input="string",
model="string",
model="tts-1",
voice="alloy",
)

Expand All @@ -132,7 +132,7 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI, respx_
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
async with async_client.audio.speech.with_streaming_response.create(
input="string",
model="string",
model="tts-1",
voice="alloy",
) as response:
assert not response.is_closed
Expand Down
32 changes: 16 additions & 16 deletions tests/api_resources/test_completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,15 @@ class TestCompletions:
@parametrize
def test_method_create_overload_1(self, client: OpenAI) -> None:
completion = client.completions.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
)
assert_matches_type(Completion, completion, path=["response"])

@parametrize
def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
completion = client.completions.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
best_of=0,
echo=True,
Expand All @@ -52,7 +52,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
@parametrize
def test_raw_response_create_overload_1(self, client: OpenAI) -> None:
response = client.completions.with_raw_response.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
)

Expand All @@ -64,7 +64,7 @@ def test_raw_response_create_overload_1(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_create_overload_1(self, client: OpenAI) -> None:
with client.completions.with_streaming_response.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
) as response:
assert not response.is_closed
Expand All @@ -78,7 +78,7 @@ def test_streaming_response_create_overload_1(self, client: OpenAI) -> None:
@parametrize
def test_method_create_overload_2(self, client: OpenAI) -> None:
completion_stream = client.completions.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
)
Expand All @@ -87,7 +87,7 @@ def test_method_create_overload_2(self, client: OpenAI) -> None:
@parametrize
def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
completion_stream = client.completions.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
best_of=0,
Expand All @@ -111,7 +111,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
@parametrize
def test_raw_response_create_overload_2(self, client: OpenAI) -> None:
response = client.completions.with_raw_response.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
)
Expand All @@ -123,7 +123,7 @@ def test_raw_response_create_overload_2(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_create_overload_2(self, client: OpenAI) -> None:
with client.completions.with_streaming_response.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
) as response:
Expand All @@ -142,15 +142,15 @@ class TestAsyncCompletions:
@parametrize
async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None:
completion = await async_client.completions.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
)
assert_matches_type(Completion, completion, path=["response"])

@parametrize
async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
completion = await async_client.completions.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
best_of=0,
echo=True,
Expand All @@ -174,7 +174,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
@parametrize
async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
response = await async_client.completions.with_raw_response.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
)

Expand All @@ -186,7 +186,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -
@parametrize
async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
async with async_client.completions.with_streaming_response.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
) as response:
assert not response.is_closed
Expand All @@ -200,7 +200,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncOpe
@parametrize
async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None:
completion_stream = await async_client.completions.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
)
Expand All @@ -209,7 +209,7 @@ async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None
@parametrize
async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None:
completion_stream = await async_client.completions.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
best_of=0,
Expand All @@ -233,7 +233,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
@parametrize
async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
response = await async_client.completions.with_raw_response.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
)
Expand All @@ -245,7 +245,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -
@parametrize
async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
async with async_client.completions.with_streaming_response.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
) as response:
Expand Down