diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 271a68cfd8..de0960aba8 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.11.1" + ".": "1.12.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 4ac4be39fb..5ef0b80e87 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,31 @@ # Changelog +## 1.12.0 (2024-02-08) + +Full Changelog: [v1.11.1...v1.12.0](https://github.com/openai/openai-python/compare/v1.11.1...v1.12.0) + +### Features + +* **api:** add `timestamp_granularities`, add `gpt-3.5-turbo-0125` model ([#1125](https://github.com/openai/openai-python/issues/1125)) ([1ecf8f6](https://github.com/openai/openai-python/commit/1ecf8f6b12323ed09fb6a2815c85b9533ee52a50)) +* **cli/images:** add support for `--model` arg ([#1132](https://github.com/openai/openai-python/issues/1132)) ([0d53866](https://github.com/openai/openai-python/commit/0d5386615cda7cd50d5db90de2119b84dba29519)) + + +### Bug Fixes + +* remove double brackets from timestamp_granularities param ([#1140](https://github.com/openai/openai-python/issues/1140)) ([3db0222](https://github.com/openai/openai-python/commit/3db022216a81fa86470b53ec1246669bc7b17897)) +* **types:** loosen most List params types to Iterable ([#1129](https://github.com/openai/openai-python/issues/1129)) ([bdb31a3](https://github.com/openai/openai-python/commit/bdb31a3b1db6ede4e02b3c951c4fd23f70260038)) + + +### Chores + +* **internal:** add lint command ([#1128](https://github.com/openai/openai-python/issues/1128)) ([4c021c0](https://github.com/openai/openai-python/commit/4c021c0ab0151c2ec092d860c9b60e22e658cd03)) +* **internal:** support serialising iterable types ([#1127](https://github.com/openai/openai-python/issues/1127)) ([98d4e59](https://github.com/openai/openai-python/commit/98d4e59afcf2d65d4e660d91eb9462240ef5cd63)) + + +### Documentation + +* add CONTRIBUTING.md ([#1138](https://github.com/openai/openai-python/issues/1138)) ([79c8f0e](https://github.com/openai/openai-python/commit/79c8f0e8bf5470e2e31e781e8d279331e89ddfbe)) + ## 1.11.1 (2024-02-04) Full Changelog: [v1.11.0...v1.11.1](https://github.com/openai/openai-python/compare/v1.11.0...v1.11.1) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..914ab67053 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,125 @@ +## Setting up the environment + +### With Rye + +We use [Rye](https://rye-up.com/) to manage dependencies so we highly recommend [installing it](https://rye-up.com/guide/installation/) as it will automatically provision a Python environment with the expected Python version. + +After installing Rye, you'll just have to run this command: + +```sh +$ rye sync --all-features +``` + +You can then run scripts using `rye run python script.py` or by activating the virtual environment: + +```sh +$ rye shell +# or manually activate - https://docs.python.org/3/library/venv.html#how-venvs-work +$ source .venv/bin/activate + +# now you can omit the `rye run` prefix +$ python script.py +``` + +### Without Rye + +Alternatively if you don't want to install `Rye`, you can stick with the standard `pip` setup by ensuring you have the Python version specified in `.python-version`, create a virtual environment however you desire and then install dependencies using this command: + +```sh +$ pip install -r requirements-dev.lock +``` + +## Modifying/Adding code + +Most of the SDK is generated code, and any modified code will be overridden on the next generation. The +`src/openai/lib/` and `examples/` directories are exceptions and will never be overridden. + +## Adding and running examples + +All files in the `examples/` directory are not modified by the Stainless generator and can be freely edited or +added to. + +```bash +# add an example to examples/.py + +#!/usr/bin/env -S rye run python +… +``` + +``` +chmod +x examples/.py +# run the example against your api +./examples/.py +``` + +## Using the repository from source + +If you’d like to use the repository from source, you can either install from git or link to a cloned repository: + +To install via git: + +```bash +pip install git+ssh://git@github.com:openai/openai-python.git +``` + +Alternatively, you can build from source and install the wheel file: + +Building this package will create two files in the `dist/` directory, a `.tar.gz` containing the source files and a `.whl` that can be used to install the package efficiently. + +To create a distributable version of the library, all you have to do is run this command: + +```bash +rye build +# or +python -m build +``` + +Then to install: + +```sh +pip install ./path-to-wheel-file.whl +``` + +## Running tests + +Most tests will require you to [setup a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests. + +```bash +# you will need npm installed +npx prism path/to/your/openapi.yml +``` + +```bash +rye run pytest +``` + +## Linting and formatting + +This repository uses [ruff](https://github.com/astral-sh/ruff) and +[black](https://github.com/psf/black) to format the code in the repository. + +To lint: + +```bash +rye run lint +``` + +To format and fix all ruff issues automatically: + +```bash +rye run format +``` + +## Publishing and releases + +Changes made to this repository via the automated release PR pipeline should publish to PyPI automatically. If +the changes aren't made through the automated pipeline, you may want to make releases manually. + +### Publish with a GitHub workflow + +You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/openai/openai-python/actions/workflows/publish-pypi.yml). This will require a setup organization or repository secret to be set up. + +### Publish manually + +If you need to manually release a package, you can run the `bin/publish-pypi` script with an `PYPI_TOKEN` set on +the environment. diff --git a/pyproject.toml b/pyproject.toml index 20371e0ef9..163297ee2b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.11.1" +version = "1.12.0" description = "The official Python library for the openai API" readme = "README.md" license = "Apache-2.0" @@ -75,6 +75,10 @@ format = { chain = [ "format:ruff" = "ruff format" "format:isort" = "isort ." +"lint" = { chain = [ + "check:ruff", + "typecheck", +]} "check:ruff" = "ruff ." "fix:ruff" = "ruff --fix ." diff --git a/src/openai/_utils/__init__.py b/src/openai/_utils/__init__.py index 0fb811a945..b5790a879f 100644 --- a/src/openai/_utils/__init__.py +++ b/src/openai/_utils/__init__.py @@ -9,6 +9,7 @@ is_mapping as is_mapping, is_tuple_t as is_tuple_t, parse_date as parse_date, + is_iterable as is_iterable, is_sequence as is_sequence, coerce_float as coerce_float, is_mapping_t as is_mapping_t, @@ -33,6 +34,7 @@ is_list_type as is_list_type, is_union_type as is_union_type, extract_type_arg as extract_type_arg, + is_iterable_type as is_iterable_type, is_required_type as is_required_type, is_annotated_type as is_annotated_type, strip_annotated_type as strip_annotated_type, diff --git a/src/openai/_utils/_transform.py b/src/openai/_utils/_transform.py index 3a1c14969b..2cb7726c73 100644 --- a/src/openai/_utils/_transform.py +++ b/src/openai/_utils/_transform.py @@ -9,11 +9,13 @@ from ._utils import ( is_list, is_mapping, + is_iterable, ) from ._typing import ( is_list_type, is_union_type, extract_type_arg, + is_iterable_type, is_required_type, is_annotated_type, strip_annotated_type, @@ -157,7 +159,12 @@ def _transform_recursive( if is_typeddict(stripped_type) and is_mapping(data): return _transform_typeddict(data, stripped_type) - if is_list_type(stripped_type) and is_list(data): + if ( + # List[T] + (is_list_type(stripped_type) and is_list(data)) + # Iterable[T] + or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str)) + ): inner_type = extract_type_arg(stripped_type, 0) return [_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data] diff --git a/src/openai/_utils/_typing.py b/src/openai/_utils/_typing.py index c1d1ebb9a4..c036991f04 100644 --- a/src/openai/_utils/_typing.py +++ b/src/openai/_utils/_typing.py @@ -1,6 +1,7 @@ from __future__ import annotations -from typing import Any, TypeVar, cast +from typing import Any, TypeVar, Iterable, cast +from collections import abc as _c_abc from typing_extensions import Required, Annotated, get_args, get_origin from .._types import InheritsGeneric @@ -15,6 +16,12 @@ def is_list_type(typ: type) -> bool: return (get_origin(typ) or typ) == list +def is_iterable_type(typ: type) -> bool: + """If the given type is `typing.Iterable[T]`""" + origin = get_origin(typ) or typ + return origin == Iterable or origin == _c_abc.Iterable + + def is_union_type(typ: type) -> bool: return _is_union(get_origin(typ)) diff --git a/src/openai/_utils/_utils.py b/src/openai/_utils/_utils.py index 1c5c21a8ea..93c95517a9 100644 --- a/src/openai/_utils/_utils.py +++ b/src/openai/_utils/_utils.py @@ -164,6 +164,10 @@ def is_list(obj: object) -> TypeGuard[list[object]]: return isinstance(obj, list) +def is_iterable(obj: object) -> TypeGuard[Iterable[object]]: + return isinstance(obj, Iterable) + + def deepcopy_minimal(item: _T) -> _T: """Minimal reimplementation of copy.deepcopy() that will only copy certain object types: diff --git a/src/openai/_version.py b/src/openai/_version.py index 8af0cd2490..6db2292c7b 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. __title__ = "openai" -__version__ = "1.11.1" # x-release-please-version +__version__ = "1.12.0" # x-release-please-version diff --git a/src/openai/cli/_api/image.py b/src/openai/cli/_api/image.py index e6149eeac4..3e2a0a90f1 100644 --- a/src/openai/cli/_api/image.py +++ b/src/openai/cli/_api/image.py @@ -14,6 +14,7 @@ def register(subparser: _SubParsersAction[ArgumentParser]) -> None: sub = subparser.add_parser("images.generate") + sub.add_argument("-m", "--model", type=str) sub.add_argument("-p", "--prompt", type=str, required=True) sub.add_argument("-n", "--num-images", type=int, default=1) sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image") @@ -21,6 +22,7 @@ def register(subparser: _SubParsersAction[ArgumentParser]) -> None: sub.set_defaults(func=CLIImage.create, args_model=CLIImageCreateArgs) sub = subparser.add_parser("images.edit") + sub.add_argument("-m", "--model", type=str) sub.add_argument("-p", "--prompt", type=str, required=True) sub.add_argument("-n", "--num-images", type=int, default=1) sub.add_argument( @@ -42,6 +44,7 @@ def register(subparser: _SubParsersAction[ArgumentParser]) -> None: sub.set_defaults(func=CLIImage.edit, args_model=CLIImageEditArgs) sub = subparser.add_parser("images.create_variation") + sub.add_argument("-m", "--model", type=str) sub.add_argument("-n", "--num-images", type=int, default=1) sub.add_argument( "-I", @@ -60,6 +63,7 @@ class CLIImageCreateArgs(BaseModel): num_images: int size: str response_format: str + model: NotGivenOr[str] = NOT_GIVEN class CLIImageCreateVariationArgs(BaseModel): @@ -67,6 +71,7 @@ class CLIImageCreateVariationArgs(BaseModel): num_images: int size: str response_format: str + model: NotGivenOr[str] = NOT_GIVEN class CLIImageEditArgs(BaseModel): @@ -76,12 +81,14 @@ class CLIImageEditArgs(BaseModel): response_format: str prompt: str mask: NotGivenOr[str] = NOT_GIVEN + model: NotGivenOr[str] = NOT_GIVEN class CLIImage: @staticmethod def create(args: CLIImageCreateArgs) -> None: image = get_client().images.generate( + model=args.model, prompt=args.prompt, n=args.num_images, # casts required because the API is typed for enums @@ -97,6 +104,7 @@ def create_variation(args: CLIImageCreateVariationArgs) -> None: buffer_reader = BufferReader(file_reader.read(), desc="Upload progress") image = get_client().images.create_variation( + model=args.model, image=("image", buffer_reader), n=args.num_images, # casts required because the API is typed for enums @@ -118,6 +126,7 @@ def edit(args: CLIImageEditArgs) -> None: mask = BufferReader(file_reader.read(), desc="Mask progress") image = get_client().images.edit( + model=args.model, prompt=args.prompt, image=("image", buffer_reader), n=args.num_images, diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py index 2c167be395..275098ce88 100644 --- a/src/openai/resources/audio/transcriptions.py +++ b/src/openai/resources/audio/transcriptions.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Union, Mapping, cast +from typing import List, Union, Mapping, cast from typing_extensions import Literal import httpx @@ -39,6 +39,7 @@ def create( prompt: str | NotGiven = NOT_GIVEN, response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, + timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -74,6 +75,10 @@ def create( [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + timestamp_granularities: The timestamp granularities to populate for this transcription. Any of these + options: `word`, or `segment`. Note: There is no additional latency for segment + timestamps, but generating word timestamps incurs additional latency. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -90,6 +95,7 @@ def create( "prompt": prompt, "response_format": response_format, "temperature": temperature, + "timestamp_granularities": timestamp_granularities, } ) files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) @@ -127,6 +133,7 @@ async def create( prompt: str | NotGiven = NOT_GIVEN, response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, + timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -162,6 +169,10 @@ async def create( [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + timestamp_granularities: The timestamp granularities to populate for this transcription. Any of these + options: `word`, or `segment`. Note: There is no additional latency for segment + timestamps, but generating word timestamps incurs additional latency. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -178,6 +189,7 @@ async def create( "prompt": prompt, "response_format": response_format, "temperature": temperature, + "timestamp_granularities": timestamp_granularities, } ) files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) diff --git a/src/openai/resources/beta/assistants/assistants.py b/src/openai/resources/beta/assistants/assistants.py index 3a2418ad90..e926c31642 100644 --- a/src/openai/resources/beta/assistants/assistants.py +++ b/src/openai/resources/beta/assistants/assistants.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Optional +from typing import List, Iterable, Optional from typing_extensions import Literal import httpx @@ -59,7 +59,7 @@ def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, - tools: List[assistant_create_params.Tool] | NotGiven = NOT_GIVEN, + tools: Iterable[assistant_create_params.Tool] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -169,7 +169,7 @@ def update( metadata: Optional[object] | NotGiven = NOT_GIVEN, model: str | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, - tools: List[assistant_update_params.Tool] | NotGiven = NOT_GIVEN, + tools: Iterable[assistant_update_params.Tool] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -362,7 +362,7 @@ async def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, - tools: List[assistant_create_params.Tool] | NotGiven = NOT_GIVEN, + tools: Iterable[assistant_create_params.Tool] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -472,7 +472,7 @@ async def update( metadata: Optional[object] | NotGiven = NOT_GIVEN, model: str | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, - tools: List[assistant_update_params.Tool] | NotGiven = NOT_GIVEN, + tools: Iterable[assistant_update_params.Tool] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index 0ed48b4792..9b18336010 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Optional +from typing import Iterable, Optional from typing_extensions import Literal import httpx @@ -59,7 +59,7 @@ def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, model: Optional[str] | NotGiven = NOT_GIVEN, - tools: Optional[List[run_create_params.Tool]] | NotGiven = NOT_GIVEN, + tools: Optional[Iterable[run_create_params.Tool]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -316,7 +316,7 @@ def submit_tool_outputs( run_id: str, *, thread_id: str, - tool_outputs: List[run_submit_tool_outputs_params.ToolOutput], + tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -380,7 +380,7 @@ async def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, model: Optional[str] | NotGiven = NOT_GIVEN, - tools: Optional[List[run_create_params.Tool]] | NotGiven = NOT_GIVEN, + tools: Optional[Iterable[run_create_params.Tool]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -637,7 +637,7 @@ async def submit_tool_outputs( run_id: str, *, thread_id: str, - tool_outputs: List[run_submit_tool_outputs_params.ToolOutput], + tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index 0372ae2f66..dd079ac533 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Optional +from typing import Iterable, Optional import httpx @@ -65,7 +65,7 @@ def with_streaming_response(self) -> ThreadsWithStreamingResponse: def create( self, *, - messages: List[thread_create_params.Message] | NotGiven = NOT_GIVEN, + messages: Iterable[thread_create_params.Message] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -227,7 +227,7 @@ def create_and_run( metadata: Optional[object] | NotGiven = NOT_GIVEN, model: Optional[str] | NotGiven = NOT_GIVEN, thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tools: Optional[List[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN, + tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -310,7 +310,7 @@ def with_streaming_response(self) -> AsyncThreadsWithStreamingResponse: async def create( self, *, - messages: List[thread_create_params.Message] | NotGiven = NOT_GIVEN, + messages: Iterable[thread_create_params.Message] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -472,7 +472,7 @@ async def create_and_run( metadata: Optional[object] | NotGiven = NOT_GIVEN, model: Optional[str] | NotGiven = NOT_GIVEN, thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, - tools: Optional[List[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN, + tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index 45521833ad..0011d75e6e 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Dict, List, Union, Optional, overload +from typing import Dict, List, Union, Iterable, Optional, overload from typing_extensions import Literal import httpx @@ -42,7 +42,7 @@ def with_streaming_response(self) -> CompletionsWithStreamingResponse: def create( self, *, - messages: List[ChatCompletionMessageParam], + messages: Iterable[ChatCompletionMessageParam], model: Union[ str, Literal[ @@ -61,12 +61,13 @@ def create( "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", "gpt-3.5-turbo-16k-0613", ], ], frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: List[completion_create_params.Function] | NotGiven = NOT_GIVEN, + functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, @@ -78,7 +79,7 @@ def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -155,7 +156,7 @@ def create( response_format: An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - `gpt-3.5-turbo-1106`. + all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -231,7 +232,7 @@ def create( def create( self, *, - messages: List[ChatCompletionMessageParam], + messages: Iterable[ChatCompletionMessageParam], model: Union[ str, Literal[ @@ -250,13 +251,14 @@ def create( "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", "gpt-3.5-turbo-16k-0613", ], ], stream: Literal[True], frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: List[completion_create_params.Function] | NotGiven = NOT_GIVEN, + functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, @@ -267,7 +269,7 @@ def create( stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -351,7 +353,7 @@ def create( response_format: An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - `gpt-3.5-turbo-1106`. + all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -420,7 +422,7 @@ def create( def create( self, *, - messages: List[ChatCompletionMessageParam], + messages: Iterable[ChatCompletionMessageParam], model: Union[ str, Literal[ @@ -439,13 +441,14 @@ def create( "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", "gpt-3.5-turbo-16k-0613", ], ], stream: bool, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: List[completion_create_params.Function] | NotGiven = NOT_GIVEN, + functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, @@ -456,7 +459,7 @@ def create( stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -540,7 +543,7 @@ def create( response_format: An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - `gpt-3.5-turbo-1106`. + all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -609,7 +612,7 @@ def create( def create( self, *, - messages: List[ChatCompletionMessageParam], + messages: Iterable[ChatCompletionMessageParam], model: Union[ str, Literal[ @@ -628,12 +631,13 @@ def create( "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", "gpt-3.5-turbo-16k-0613", ], ], frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: List[completion_create_params.Function] | NotGiven = NOT_GIVEN, + functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, @@ -645,7 +649,7 @@ def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -705,7 +709,7 @@ def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse: async def create( self, *, - messages: List[ChatCompletionMessageParam], + messages: Iterable[ChatCompletionMessageParam], model: Union[ str, Literal[ @@ -724,12 +728,13 @@ async def create( "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", "gpt-3.5-turbo-16k-0613", ], ], frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: List[completion_create_params.Function] | NotGiven = NOT_GIVEN, + functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, @@ -741,7 +746,7 @@ async def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -818,7 +823,7 @@ async def create( response_format: An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - `gpt-3.5-turbo-1106`. + all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -894,7 +899,7 @@ async def create( async def create( self, *, - messages: List[ChatCompletionMessageParam], + messages: Iterable[ChatCompletionMessageParam], model: Union[ str, Literal[ @@ -913,13 +918,14 @@ async def create( "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", "gpt-3.5-turbo-16k-0613", ], ], stream: Literal[True], frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: List[completion_create_params.Function] | NotGiven = NOT_GIVEN, + functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, @@ -930,7 +936,7 @@ async def create( stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -1014,7 +1020,7 @@ async def create( response_format: An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - `gpt-3.5-turbo-1106`. + all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -1083,7 +1089,7 @@ async def create( async def create( self, *, - messages: List[ChatCompletionMessageParam], + messages: Iterable[ChatCompletionMessageParam], model: Union[ str, Literal[ @@ -1102,13 +1108,14 @@ async def create( "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", "gpt-3.5-turbo-16k-0613", ], ], stream: bool, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: List[completion_create_params.Function] | NotGiven = NOT_GIVEN, + functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, @@ -1119,7 +1126,7 @@ async def create( stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -1203,7 +1210,7 @@ async def create( response_format: An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - `gpt-3.5-turbo-1106`. + all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -1272,7 +1279,7 @@ async def create( async def create( self, *, - messages: List[ChatCompletionMessageParam], + messages: Iterable[ChatCompletionMessageParam], model: Union[ str, Literal[ @@ -1291,12 +1298,13 @@ async def create( "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", "gpt-3.5-turbo-16k-0613", ], ], frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: List[completion_create_params.Function] | NotGiven = NOT_GIVEN, + functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, @@ -1308,7 +1316,7 @@ async def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, diff --git a/src/openai/resources/completions.py b/src/openai/resources/completions.py index 3d2e10230a..af2d6e2e51 100644 --- a/src/openai/resources/completions.py +++ b/src/openai/resources/completions.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Dict, List, Union, Optional, overload +from typing import Dict, List, Union, Iterable, Optional, overload from typing_extensions import Literal import httpx @@ -36,7 +36,7 @@ def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], - prompt: Union[str, List[str], List[int], List[List[int]], None], + prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -184,7 +184,7 @@ def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], - prompt: Union[str, List[str], List[int], List[List[int]], None], + prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], stream: Literal[True], best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, @@ -332,7 +332,7 @@ def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], - prompt: Union[str, List[str], List[int], List[List[int]], None], + prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], stream: bool, best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, @@ -480,7 +480,7 @@ def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], - prompt: Union[str, List[str], List[int], List[List[int]], None], + prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -550,7 +550,7 @@ async def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], - prompt: Union[str, List[str], List[int], List[List[int]], None], + prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -698,7 +698,7 @@ async def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], - prompt: Union[str, List[str], List[int], List[List[int]], None], + prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], stream: Literal[True], best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, @@ -846,7 +846,7 @@ async def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], - prompt: Union[str, List[str], List[int], List[List[int]], None], + prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], stream: bool, best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, @@ -994,7 +994,7 @@ async def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], - prompt: Union[str, List[str], List[int], List[List[int]], None], + prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, diff --git a/src/openai/resources/embeddings.py b/src/openai/resources/embeddings.py index 857bfc7702..cfef025bc2 100644 --- a/src/openai/resources/embeddings.py +++ b/src/openai/resources/embeddings.py @@ -3,7 +3,7 @@ from __future__ import annotations import base64 -from typing import List, Union, cast +from typing import List, Union, Iterable, cast from typing_extensions import Literal import httpx @@ -35,7 +35,7 @@ def with_streaming_response(self) -> EmbeddingsWithStreamingResponse: def create( self, *, - input: Union[str, List[str], List[int], List[List[int]]], + input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]], model: Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]], dimensions: int | NotGiven = NOT_GIVEN, encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN, @@ -136,7 +136,7 @@ def with_streaming_response(self) -> AsyncEmbeddingsWithStreamingResponse: async def create( self, *, - input: Union[str, List[str], List[int], List[List[int]]], + input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]], model: Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]], dimensions: int | NotGiven = NOT_GIVEN, encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN, diff --git a/src/openai/types/audio/transcription_create_params.py b/src/openai/types/audio/transcription_create_params.py index 7bd70d7b48..5a90822144 100644 --- a/src/openai/types/audio/transcription_create_params.py +++ b/src/openai/types/audio/transcription_create_params.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Union +from typing import List, Union from typing_extensions import Literal, Required, TypedDict from ..._types import FileTypes @@ -50,3 +50,11 @@ class TranscriptionCreateParams(TypedDict, total=False): [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. """ + + timestamp_granularities: List[Literal["word", "segment"]] + """The timestamp granularities to populate for this transcription. + + Any of these options: `word`, or `segment`. Note: There is no additional latency + for segment timestamps, but generating word timestamps incurs additional + latency. + """ diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py index 539897a7ba..c49d6f6950 100644 --- a/src/openai/types/beta/assistant_create_params.py +++ b/src/openai/types/beta/assistant_create_params.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Union, Optional +from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict from ...types import shared_params @@ -54,7 +54,7 @@ class AssistantCreateParams(TypedDict, total=False): name: Optional[str] """The name of the assistant. The maximum length is 256 characters.""" - tools: List[Tool] + tools: Iterable[Tool] """A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types diff --git a/src/openai/types/beta/assistant_update_params.py b/src/openai/types/beta/assistant_update_params.py index dfb5d4c553..c5ccde62c5 100644 --- a/src/openai/types/beta/assistant_update_params.py +++ b/src/openai/types/beta/assistant_update_params.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Union, Optional +from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict from ...types import shared_params @@ -56,7 +56,7 @@ class AssistantUpdateParams(TypedDict, total=False): name: Optional[str] """The name of the assistant. The maximum length is 256 characters.""" - tools: List[Tool] + tools: Iterable[Tool] """A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index 9f58dcd875..cc1051b3d6 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Union, Optional +from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict from ...types import shared_params @@ -51,7 +51,7 @@ class ThreadCreateAndRunParams(TypedDict, total=False): thread: Thread """If no thread is provided, an empty thread will be created.""" - tools: Optional[List[Tool]] + tools: Optional[Iterable[Tool]] """Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. @@ -86,7 +86,7 @@ class ThreadMessage(TypedDict, total=False): class Thread(TypedDict, total=False): - messages: List[ThreadMessage] + messages: Iterable[ThreadMessage] """ A list of [messages](https://platform.openai.com/docs/api-reference/messages) to start the thread with. diff --git a/src/openai/types/beta/thread_create_params.py b/src/openai/types/beta/thread_create_params.py index d2ec78bbc3..e78276e839 100644 --- a/src/openai/types/beta/thread_create_params.py +++ b/src/openai/types/beta/thread_create_params.py @@ -2,14 +2,14 @@ from __future__ import annotations -from typing import List, Optional +from typing import List, Iterable, Optional from typing_extensions import Literal, Required, TypedDict __all__ = ["ThreadCreateParams", "Message"] class ThreadCreateParams(TypedDict, total=False): - messages: List[Message] + messages: Iterable[Message] """ A list of [messages](https://platform.openai.com/docs/api-reference/messages) to start the thread with. diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index a4f41a9338..b92649aa06 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Union, Optional +from typing import Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict from ....types import shared_params @@ -54,7 +54,7 @@ class RunCreateParams(TypedDict, total=False): assistant will be used. """ - tools: Optional[List[Tool]] + tools: Optional[Iterable[Tool]] """Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. diff --git a/src/openai/types/beta/threads/run_submit_tool_outputs_params.py b/src/openai/types/beta/threads/run_submit_tool_outputs_params.py index a960f0f06f..3b303a33fc 100644 --- a/src/openai/types/beta/threads/run_submit_tool_outputs_params.py +++ b/src/openai/types/beta/threads/run_submit_tool_outputs_params.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List +from typing import Iterable from typing_extensions import Required, TypedDict __all__ = ["RunSubmitToolOutputsParams", "ToolOutput"] @@ -11,7 +11,7 @@ class RunSubmitToolOutputsParams(TypedDict, total=False): thread_id: Required[str] - tool_outputs: Required[List[ToolOutput]] + tool_outputs: Required[Iterable[ToolOutput]] """A list of tools for which the outputs are being submitted.""" diff --git a/src/openai/types/chat/chat_completion_assistant_message_param.py b/src/openai/types/chat/chat_completion_assistant_message_param.py index 72a5bff83b..7377139bf5 100644 --- a/src/openai/types/chat/chat_completion_assistant_message_param.py +++ b/src/openai/types/chat/chat_completion_assistant_message_param.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Optional +from typing import Iterable, Optional from typing_extensions import Literal, Required, TypedDict from .chat_completion_message_tool_call_param import ChatCompletionMessageToolCallParam @@ -47,5 +47,5 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False): role. """ - tool_calls: List[ChatCompletionMessageToolCallParam] + tool_calls: Iterable[ChatCompletionMessageToolCallParam] """The tool calls generated by the model, such as function calls.""" diff --git a/src/openai/types/chat/chat_completion_user_message_param.py b/src/openai/types/chat/chat_completion_user_message_param.py index 07be67c405..cb8ca19bf0 100644 --- a/src/openai/types/chat/chat_completion_user_message_param.py +++ b/src/openai/types/chat/chat_completion_user_message_param.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Union +from typing import Union, Iterable from typing_extensions import Literal, Required, TypedDict from .chat_completion_content_part_param import ChatCompletionContentPartParam @@ -11,7 +11,7 @@ class ChatCompletionUserMessageParam(TypedDict, total=False): - content: Required[Union[str, List[ChatCompletionContentPartParam]]] + content: Required[Union[str, Iterable[ChatCompletionContentPartParam]]] """The contents of the user message.""" role: Required[Literal["user"]] diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 3ea14d82b3..e02a81bc51 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Dict, List, Union, Optional +from typing import Dict, List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict from ...types import shared_params @@ -22,7 +22,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): - messages: Required[List[ChatCompletionMessageParam]] + messages: Required[Iterable[ChatCompletionMessageParam]] """A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). @@ -47,6 +47,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", "gpt-3.5-turbo-16k-0613", ], ] @@ -80,7 +81,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): functions are present. """ - functions: List[Function] + functions: Iterable[Function] """Deprecated in favor of `tools`. A list of functions the model may generate JSON inputs for. @@ -137,7 +138,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - `gpt-3.5-turbo-1106`. + all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -185,7 +186,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): functions are present. """ - tools: List[ChatCompletionToolParam] + tools: Iterable[ChatCompletionToolParam] """A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of diff --git a/src/openai/types/completion_create_params.py b/src/openai/types/completion_create_params.py index e14c2860df..afbc9c549f 100644 --- a/src/openai/types/completion_create_params.py +++ b/src/openai/types/completion_create_params.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Dict, List, Union, Optional +from typing import Dict, List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict __all__ = ["CompletionCreateParamsBase", "CompletionCreateParamsNonStreaming", "CompletionCreateParamsStreaming"] @@ -19,7 +19,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): descriptions of them. """ - prompt: Required[Union[str, List[str], List[int], List[List[int]], None]] + prompt: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None]] """ The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. diff --git a/src/openai/types/embedding_create_params.py b/src/openai/types/embedding_create_params.py index 66ac60511c..a549dc94c4 100644 --- a/src/openai/types/embedding_create_params.py +++ b/src/openai/types/embedding_create_params.py @@ -2,14 +2,14 @@ from __future__ import annotations -from typing import List, Union +from typing import List, Union, Iterable from typing_extensions import Literal, Required, TypedDict __all__ = ["EmbeddingCreateParams"] class EmbeddingCreateParams(TypedDict, total=False): - input: Required[Union[str, List[str], List[int], List[List[int]]]] + input: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]]]] """Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array diff --git a/tests/api_resources/audio/test_transcriptions.py b/tests/api_resources/audio/test_transcriptions.py index d957871abc..80e364b484 100644 --- a/tests/api_resources/audio/test_transcriptions.py +++ b/tests/api_resources/audio/test_transcriptions.py @@ -34,6 +34,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: prompt="string", response_format="json", temperature=0, + timestamp_granularities=["word", "segment"], ) assert_matches_type(Transcription, transcription, path=["response"]) @@ -84,6 +85,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> prompt="string", response_format="json", temperature=0, + timestamp_granularities=["word", "segment"], ) assert_matches_type(Transcription, transcription, path=["response"]) diff --git a/tests/test_transform.py b/tests/test_transform.py index c4dffb3bb0..6ed67d49a7 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any, List, Union, Optional +from typing import Any, List, Union, Iterable, Optional, cast from datetime import date, datetime from typing_extensions import Required, Annotated, TypedDict @@ -265,3 +265,35 @@ def test_pydantic_default_field() -> None: assert model.with_none_default == "bar" assert model.with_str_default == "baz" assert transform(model, Any) == {"with_none_default": "bar", "with_str_default": "baz"} + + +class TypedDictIterableUnion(TypedDict): + foo: Annotated[Union[Bar8, Iterable[Baz8]], PropertyInfo(alias="FOO")] + + +class Bar8(TypedDict): + foo_bar: Annotated[str, PropertyInfo(alias="fooBar")] + + +class Baz8(TypedDict): + foo_baz: Annotated[str, PropertyInfo(alias="fooBaz")] + + +def test_iterable_of_dictionaries() -> None: + assert transform({"foo": [{"foo_baz": "bar"}]}, TypedDictIterableUnion) == {"FOO": [{"fooBaz": "bar"}]} + assert cast(Any, transform({"foo": ({"foo_baz": "bar"},)}, TypedDictIterableUnion)) == {"FOO": [{"fooBaz": "bar"}]} + + def my_iter() -> Iterable[Baz8]: + yield {"foo_baz": "hello"} + yield {"foo_baz": "world"} + + assert transform({"foo": my_iter()}, TypedDictIterableUnion) == {"FOO": [{"fooBaz": "hello"}, {"fooBaz": "world"}]} + + +class TypedDictIterableUnionStr(TypedDict): + foo: Annotated[Union[str, Iterable[Baz8]], PropertyInfo(alias="FOO")] + + +def test_iterable_union_str() -> None: + assert transform({"foo": "bar"}, TypedDictIterableUnionStr) == {"FOO": "bar"} + assert cast(Any, transform(iter([{"foo_baz": "bar"}]), Union[str, Iterable[Baz8]])) == [{"fooBaz": "bar"}]