diff --git a/.stats.yml b/.stats.yml
index 47c2bce1cc..2814bb7778 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1 +1 @@
-configured_endpoints: 55
+configured_endpoints: 62
diff --git a/README.md b/README.md
index 3bdd6c4a43..84d9017e45 100644
--- a/README.md
+++ b/README.md
@@ -53,7 +53,7 @@ so that your API Key is not stored in source control.
### Polling Helpers
-When interacting with the API some actions such as starting a Run may take time to complete. The SDK includes
+When interacting with the API some actions such as starting a Run and adding files to vector stores are asynchronous and take time to complete. The SDK includes
helper functions which will poll the status until it reaches a terminal state and then return the resulting object.
If an API method results in an action which could benefit from polling there will be a corresponding version of the
method ending in '\_and_poll'.
@@ -69,6 +69,20 @@ run = client.beta.threads.runs.create_and_poll(
More information on the lifecycle of a Run can be found in the [Run Lifecycle Documentation](https://platform.openai.com/docs/assistants/how-it-works/run-lifecycle)
+### Bulk Upload Helpers
+
+When creating an interacting with vector stores, you can use the polling helpers to monitor the status of operations.
+For convenience, we also provide a bulk upload helper to allow you to simultaneously upload several files at once.
+
+```python
+sample_files = [Path("sample-paper.pdf"), ...]
+
+batch = await client.vector_stores.file_batches.upload_and_poll(
+ store.id,
+ files=sample_files,
+)
+```
+
### Streaming Helpers
The SDK also includes helpers to process streams and handle the incoming events.
diff --git a/api.md b/api.md
index c772fb7c7b..962ed7b7c5 100644
--- a/api.md
+++ b/api.md
@@ -196,6 +196,59 @@ Methods:
# Beta
+## VectorStores
+
+Types:
+
+```python
+from openai.types.beta import VectorStore, VectorStoreDeleted
+```
+
+Methods:
+
+- client.beta.vector_stores.create(\*\*params) -> VectorStore
+- client.beta.vector_stores.retrieve(vector_store_id) -> VectorStore
+- client.beta.vector_stores.update(vector_store_id, \*\*params) -> VectorStore
+- client.beta.vector_stores.list(\*\*params) -> SyncCursorPage[VectorStore]
+- client.beta.vector_stores.delete(vector_store_id) -> VectorStoreDeleted
+
+### Files
+
+Types:
+
+```python
+from openai.types.beta.vector_stores import VectorStoreFile, VectorStoreFileDeleted
+```
+
+Methods:
+
+- client.beta.vector_stores.files.create(vector_store_id, \*\*params) -> VectorStoreFile
+- client.beta.vector_stores.files.retrieve(file_id, \*, vector_store_id) -> VectorStoreFile
+- client.beta.vector_stores.files.list(vector_store_id, \*\*params) -> SyncCursorPage[VectorStoreFile]
+- client.beta.vector_stores.files.delete(file_id, \*, vector_store_id) -> VectorStoreFileDeleted
+- client.beta.vector_stores.files.create_and_poll(\*args) -> VectorStoreFile
+- client.beta.vector_stores.files.poll(\*args) -> VectorStoreFile
+- client.beta.vector_stores.files.upload(\*args) -> VectorStoreFile
+- client.beta.vector_stores.files.upload_and_poll(\*args) -> VectorStoreFile
+
+### FileBatches
+
+Types:
+
+```python
+from openai.types.beta.vector_stores import VectorStoreFileBatch
+```
+
+Methods:
+
+- client.beta.vector_stores.file_batches.create(vector_store_id, \*\*params) -> VectorStoreFileBatch
+- client.beta.vector_stores.file_batches.retrieve(batch_id, \*, vector_store_id) -> VectorStoreFileBatch
+- client.beta.vector_stores.file_batches.cancel(batch_id, \*, vector_store_id) -> VectorStoreFileBatch
+- client.beta.vector_stores.file_batches.list_files(batch_id, \*, vector_store_id, \*\*params) -> SyncCursorPage[VectorStoreFile]
+- client.beta.vector_stores.file_batches.create_and_poll(\*args) -> VectorStoreFileBatch
+- client.beta.vector_stores.file_batches.poll(\*args) -> VectorStoreFileBatch
+- client.beta.vector_stores.file_batches.upload_and_poll(\*args) -> VectorStoreFileBatch
+
## Assistants
Types:
@@ -207,9 +260,9 @@ from openai.types.beta import (
AssistantStreamEvent,
AssistantTool,
CodeInterpreterTool,
+ FileSearchTool,
FunctionTool,
MessageStreamEvent,
- RetrievalTool,
RunStepStreamEvent,
RunStreamEvent,
ThreadStreamEvent,
@@ -218,26 +271,11 @@ from openai.types.beta import (
Methods:
-- client.beta.assistants.create(\*\*params) -> Assistant
-- client.beta.assistants.retrieve(assistant_id) -> Assistant
-- client.beta.assistants.update(assistant_id, \*\*params) -> Assistant
-- client.beta.assistants.list(\*\*params) -> SyncCursorPage[Assistant]
-- client.beta.assistants.delete(assistant_id) -> AssistantDeleted
-
-### Files
-
-Types:
-
-```python
-from openai.types.beta.assistants import AssistantFile, FileDeleteResponse
-```
-
-Methods:
-
-- client.beta.assistants.files.create(assistant_id, \*\*params) -> AssistantFile
-- client.beta.assistants.files.retrieve(file_id, \*, assistant_id) -> AssistantFile
-- client.beta.assistants.files.list(assistant_id, \*\*params) -> SyncCursorPage[AssistantFile]
-- client.beta.assistants.files.delete(file_id, \*, assistant_id) -> FileDeleteResponse
+- client.beta.assistants.create(\*\*params) -> Assistant
+- client.beta.assistants.retrieve(assistant_id) -> Assistant
+- client.beta.assistants.update(assistant_id, \*\*params) -> Assistant
+- client.beta.assistants.list(\*\*params) -> SyncCursorPage[Assistant]
+- client.beta.assistants.delete(assistant_id) -> AssistantDeleted
## Threads
@@ -298,11 +336,11 @@ from openai.types.beta.threads.runs import (
CodeInterpreterOutputImage,
CodeInterpreterToolCall,
CodeInterpreterToolCallDelta,
+ FileSearchToolCall,
+ FileSearchToolCallDelta,
FunctionToolCall,
FunctionToolCallDelta,
MessageCreationStepDetails,
- RetrievalToolCall,
- RetrievalToolCallDelta,
RunStep,
RunStepDelta,
RunStepDeltaEvent,
@@ -350,23 +388,10 @@ from openai.types.beta.threads import (
Methods:
-- client.beta.threads.messages.create(thread_id, \*\*params) -> Message
-- client.beta.threads.messages.retrieve(message_id, \*, thread_id) -> Message
-- client.beta.threads.messages.update(message_id, \*, thread_id, \*\*params) -> Message
-- client.beta.threads.messages.list(thread_id, \*\*params) -> SyncCursorPage[Message]
-
-#### Files
-
-Types:
-
-```python
-from openai.types.beta.threads.messages import MessageFile
-```
-
-Methods:
-
-- client.beta.threads.messages.files.retrieve(file_id, \*, thread_id, message_id) -> MessageFile
-- client.beta.threads.messages.files.list(message_id, \*, thread_id, \*\*params) -> SyncCursorPage[MessageFile]
+- client.beta.threads.messages.create(thread_id, \*\*params) -> Message
+- client.beta.threads.messages.retrieve(message_id, \*, thread_id) -> Message
+- client.beta.threads.messages.update(message_id, \*, thread_id, \*\*params) -> Message
+- client.beta.threads.messages.list(thread_id, \*\*params) -> SyncCursorPage[Message]
# Batches
diff --git a/helpers.md b/helpers.md
index 4271cd9ede..cf738f3f16 100644
--- a/helpers.md
+++ b/helpers.md
@@ -213,3 +213,24 @@ def get_final_messages(self) -> List[Message]
These methods are provided for convenience to collect information at the end of a stream. Calling these events
will trigger consumption of the stream until completion and then return the relevant accumulated objects.
+
+# Polling Helpers
+
+When interacting with the API some actions such as starting a Run and adding files to vector stores are asynchronous and take time to complete.
+The SDK includes helper functions which will poll the status until it reaches a terminal state and then return the resulting object.
+If an API method results in an action which could benefit from polling there will be a corresponding version of the
+method ending in `_and_poll`.
+
+All methods also allow you to set the polling frequency, how often the API is checked for an update, via a function argument (`poll_interval_ms`).
+
+The polling methods are:
+
+```python
+client.beta.threads.create_and_run_poll(...)
+client.beta.threads.runs.create_and_poll(...)
+client.beta.threads.runs.submit_tool_ouptputs_and_poll(...)
+client.beta.vector_stores.files.upload_and_poll(...)
+client.beta.vector_stores.files.create_and_poll(...)
+client.beta.vector_stores.file_batches.create_and_poll(...)
+client.beta.vector_stores.file_batches.upload_and_poll(...)
+```
diff --git a/pyproject.toml b/pyproject.toml
index 11ab55cbe9..6c3ae2b592 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -63,7 +63,8 @@ dev-dependencies = [
"inline-snapshot >=0.7.0",
"azure-identity >=1.14.1",
"types-tqdm > 4",
- "types-pyaudio > 0"
+ "types-pyaudio > 0",
+ "trio >=0.22.2"
]
[tool.rye.scripts]
diff --git a/requirements-dev.lock b/requirements-dev.lock
index 4461f65738..657e6cb810 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -18,7 +18,9 @@ argcomplete==3.1.2
asttokens==2.4.1
# via inline-snapshot
attrs==23.1.0
+ # via outcome
# via pytest
+ # via trio
azure-core==1.30.1
# via azure-identity
azure-identity==1.15.0
@@ -48,6 +50,7 @@ distro==1.8.0
# via openai
exceptiongroup==1.1.3
# via anyio
+ # via trio
executing==2.0.1
# via inline-snapshot
filelock==3.12.4
@@ -63,6 +66,7 @@ idna==3.4
# via anyio
# via httpx
# via requests
+ # via trio
importlib-metadata==7.0.0
iniconfig==2.0.0
# via pytest
@@ -83,6 +87,8 @@ numpy==1.26.3
# via openai
# via pandas
# via pandas-stubs
+outcome==1.3.0.post0
+ # via trio
packaging==23.2
# via black
# via msal-extensions
@@ -136,6 +142,9 @@ sniffio==1.3.0
# via anyio
# via httpx
# via openai
+ # via trio
+sortedcontainers==2.4.0
+ # via trio
time-machine==2.9.0
toml==0.10.2
# via inline-snapshot
@@ -145,6 +154,7 @@ tomli==2.0.1
# via pytest
tqdm==4.66.1
# via openai
+trio==0.22.2
types-pyaudio==0.2.16.20240106
types-pytz==2024.1.0.20240203
# via pandas-stubs
diff --git a/src/openai/resources/beta/__init__.py b/src/openai/resources/beta/__init__.py
index 87fea25267..01f5338757 100644
--- a/src/openai/resources/beta/__init__.py
+++ b/src/openai/resources/beta/__init__.py
@@ -24,8 +24,22 @@
AssistantsWithStreamingResponse,
AsyncAssistantsWithStreamingResponse,
)
+from .vector_stores import (
+ VectorStores,
+ AsyncVectorStores,
+ VectorStoresWithRawResponse,
+ AsyncVectorStoresWithRawResponse,
+ VectorStoresWithStreamingResponse,
+ AsyncVectorStoresWithStreamingResponse,
+)
__all__ = [
+ "VectorStores",
+ "AsyncVectorStores",
+ "VectorStoresWithRawResponse",
+ "AsyncVectorStoresWithRawResponse",
+ "VectorStoresWithStreamingResponse",
+ "AsyncVectorStoresWithStreamingResponse",
"Assistants",
"AsyncAssistants",
"AssistantsWithRawResponse",
diff --git a/src/openai/resources/beta/assistants/assistants.py b/src/openai/resources/beta/assistants.py
similarity index 71%
rename from src/openai/resources/beta/assistants/assistants.py
rename to src/openai/resources/beta/assistants.py
index 9e88794ebc..8695a949ca 100644
--- a/src/openai/resources/beta/assistants/assistants.py
+++ b/src/openai/resources/beta/assistants.py
@@ -2,38 +2,31 @@
from __future__ import annotations
-from typing import List, Union, Iterable, Optional
+from typing import Union, Iterable, Optional
from typing_extensions import Literal
import httpx
-from .... import _legacy_response
-from .files import (
- Files,
- AsyncFiles,
- FilesWithRawResponse,
- AsyncFilesWithRawResponse,
- FilesWithStreamingResponse,
- AsyncFilesWithStreamingResponse,
-)
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import (
+from ... import _legacy_response
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import (
maybe_transform,
async_maybe_transform,
)
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
-from ....pagination import SyncCursorPage, AsyncCursorPage
-from ....types.beta import (
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...pagination import SyncCursorPage, AsyncCursorPage
+from ...types.beta import (
Assistant,
AssistantDeleted,
AssistantToolParam,
+ AssistantResponseFormatOptionParam,
assistant_list_params,
assistant_create_params,
assistant_update_params,
)
-from ...._base_client import (
+from ..._base_client import (
AsyncPaginator,
make_request_options,
)
@@ -42,10 +35,6 @@
class Assistants(SyncAPIResource):
- @cached_property
- def files(self) -> Files:
- return Files(self._client)
-
@cached_property
def with_raw_response(self) -> AssistantsWithRawResponse:
return AssistantsWithRawResponse(self)
@@ -81,11 +70,14 @@ def create(
],
],
description: Optional[str] | NotGiven = NOT_GIVEN,
- file_ids: List[str] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
metadata: Optional[object] | NotGiven = NOT_GIVEN,
name: Optional[str] | NotGiven = NOT_GIVEN,
+ response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Iterable[AssistantToolParam] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -105,10 +97,6 @@ def create(
description: The description of the assistant. The maximum length is 512 characters.
- file_ids: A list of [file](https://platform.openai.com/docs/api-reference/files) IDs
- attached to this assistant. There can be a maximum of 20 files attached to the
- assistant. Files are ordered by their creation date in ascending order.
-
instructions: The system instructions that the assistant uses. The maximum length is 256,000
characters.
@@ -119,8 +107,39 @@ def create(
name: The name of the assistant. The maximum length is 256 characters.
+ response_format: Specifies the format that the model must output. Compatible with
+ [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+
+ Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+ message the model generates is valid JSON.
+
+ **Important:** when using JSON mode, you **must** also instruct the model to
+ produce JSON yourself via a system or user message. Without this, the model may
+ generate an unending stream of whitespace until the generation reaches the token
+ limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ the message content may be partially cut off if `finish_reason="length"`, which
+ indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ max context length.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic.
+
+ tool_resources: A set of resources that are used by the assistant's tools. The resources are
+ specific to the type of tool. For example, the `code_interpreter` tool requires
+ a list of file IDs, while the `file_search` tool requires a list of vector store
+ IDs.
+
tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
- assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`.
+ assistant. Tools can be of types `code_interpreter`, `file_search`, or
+ `function`.
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or temperature but not both.
extra_headers: Send extra headers
@@ -130,18 +149,21 @@ def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
"/assistants",
body=maybe_transform(
{
"model": model,
"description": description,
- "file_ids": file_ids,
"instructions": instructions,
"metadata": metadata,
"name": name,
+ "response_format": response_format,
+ "temperature": temperature,
+ "tool_resources": tool_resources,
"tools": tools,
+ "top_p": top_p,
},
assistant_create_params.AssistantCreateParams,
),
@@ -176,7 +198,7 @@ def retrieve(
"""
if not assistant_id:
raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
f"/assistants/{assistant_id}",
options=make_request_options(
@@ -190,12 +212,15 @@ def update(
assistant_id: str,
*,
description: Optional[str] | NotGiven = NOT_GIVEN,
- file_ids: List[str] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
metadata: Optional[object] | NotGiven = NOT_GIVEN,
model: str | NotGiven = NOT_GIVEN,
name: Optional[str] | NotGiven = NOT_GIVEN,
+ response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Iterable[AssistantToolParam] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -210,12 +235,6 @@ def update(
The maximum length is 512 characters.
- file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs
- attached to this assistant. There can be a maximum of 20 files attached to the
- assistant. Files are ordered by their creation date in ascending order. If a
- file was previously attached to the list but does not show up in the list, it
- will be deleted from the assistant.
-
instructions: The system instructions that the assistant uses. The maximum length is 256,000
characters.
@@ -232,8 +251,39 @@ def update(
name: The name of the assistant. The maximum length is 256 characters.
+ response_format: Specifies the format that the model must output. Compatible with
+ [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+
+ Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+ message the model generates is valid JSON.
+
+ **Important:** when using JSON mode, you **must** also instruct the model to
+ produce JSON yourself via a system or user message. Without this, the model may
+ generate an unending stream of whitespace until the generation reaches the token
+ limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ the message content may be partially cut off if `finish_reason="length"`, which
+ indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ max context length.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic.
+
+ tool_resources: A set of resources that are used by the assistant's tools. The resources are
+ specific to the type of tool. For example, the `code_interpreter` tool requires
+ a list of file IDs, while the `file_search` tool requires a list of vector store
+ IDs.
+
tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
- assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`.
+ assistant. Tools can be of types `code_interpreter`, `file_search`, or
+ `function`.
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or temperature but not both.
extra_headers: Send extra headers
@@ -245,18 +295,21 @@ def update(
"""
if not assistant_id:
raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/assistants/{assistant_id}",
body=maybe_transform(
{
"description": description,
- "file_ids": file_ids,
"instructions": instructions,
"metadata": metadata,
"model": model,
"name": name,
+ "response_format": response_format,
+ "temperature": temperature,
+ "tool_resources": tool_resources,
"tools": tools,
+ "top_p": top_p,
},
assistant_update_params.AssistantUpdateParams,
),
@@ -309,7 +362,7 @@ def list(
timeout: Override the client-level default timeout for this request, in seconds
"""
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
"/assistants",
page=SyncCursorPage[Assistant],
@@ -356,7 +409,7 @@ def delete(
"""
if not assistant_id:
raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._delete(
f"/assistants/{assistant_id}",
options=make_request_options(
@@ -367,10 +420,6 @@ def delete(
class AsyncAssistants(AsyncAPIResource):
- @cached_property
- def files(self) -> AsyncFiles:
- return AsyncFiles(self._client)
-
@cached_property
def with_raw_response(self) -> AsyncAssistantsWithRawResponse:
return AsyncAssistantsWithRawResponse(self)
@@ -406,11 +455,14 @@ async def create(
],
],
description: Optional[str] | NotGiven = NOT_GIVEN,
- file_ids: List[str] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
metadata: Optional[object] | NotGiven = NOT_GIVEN,
name: Optional[str] | NotGiven = NOT_GIVEN,
+ response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Iterable[AssistantToolParam] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -430,10 +482,6 @@ async def create(
description: The description of the assistant. The maximum length is 512 characters.
- file_ids: A list of [file](https://platform.openai.com/docs/api-reference/files) IDs
- attached to this assistant. There can be a maximum of 20 files attached to the
- assistant. Files are ordered by their creation date in ascending order.
-
instructions: The system instructions that the assistant uses. The maximum length is 256,000
characters.
@@ -444,8 +492,39 @@ async def create(
name: The name of the assistant. The maximum length is 256 characters.
+ response_format: Specifies the format that the model must output. Compatible with
+ [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+
+ Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+ message the model generates is valid JSON.
+
+ **Important:** when using JSON mode, you **must** also instruct the model to
+ produce JSON yourself via a system or user message. Without this, the model may
+ generate an unending stream of whitespace until the generation reaches the token
+ limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ the message content may be partially cut off if `finish_reason="length"`, which
+ indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ max context length.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic.
+
+ tool_resources: A set of resources that are used by the assistant's tools. The resources are
+ specific to the type of tool. For example, the `code_interpreter` tool requires
+ a list of file IDs, while the `file_search` tool requires a list of vector store
+ IDs.
+
tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
- assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`.
+ assistant. Tools can be of types `code_interpreter`, `file_search`, or
+ `function`.
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or temperature but not both.
extra_headers: Send extra headers
@@ -455,18 +534,21 @@ async def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
"/assistants",
body=await async_maybe_transform(
{
"model": model,
"description": description,
- "file_ids": file_ids,
"instructions": instructions,
"metadata": metadata,
"name": name,
+ "response_format": response_format,
+ "temperature": temperature,
+ "tool_resources": tool_resources,
"tools": tools,
+ "top_p": top_p,
},
assistant_create_params.AssistantCreateParams,
),
@@ -501,7 +583,7 @@ async def retrieve(
"""
if not assistant_id:
raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
f"/assistants/{assistant_id}",
options=make_request_options(
@@ -515,12 +597,15 @@ async def update(
assistant_id: str,
*,
description: Optional[str] | NotGiven = NOT_GIVEN,
- file_ids: List[str] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
metadata: Optional[object] | NotGiven = NOT_GIVEN,
model: str | NotGiven = NOT_GIVEN,
name: Optional[str] | NotGiven = NOT_GIVEN,
+ response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Iterable[AssistantToolParam] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -535,12 +620,6 @@ async def update(
The maximum length is 512 characters.
- file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs
- attached to this assistant. There can be a maximum of 20 files attached to the
- assistant. Files are ordered by their creation date in ascending order. If a
- file was previously attached to the list but does not show up in the list, it
- will be deleted from the assistant.
-
instructions: The system instructions that the assistant uses. The maximum length is 256,000
characters.
@@ -557,8 +636,39 @@ async def update(
name: The name of the assistant. The maximum length is 256 characters.
+ response_format: Specifies the format that the model must output. Compatible with
+ [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+
+ Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+ message the model generates is valid JSON.
+
+ **Important:** when using JSON mode, you **must** also instruct the model to
+ produce JSON yourself via a system or user message. Without this, the model may
+ generate an unending stream of whitespace until the generation reaches the token
+ limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ the message content may be partially cut off if `finish_reason="length"`, which
+ indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ max context length.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic.
+
+ tool_resources: A set of resources that are used by the assistant's tools. The resources are
+ specific to the type of tool. For example, the `code_interpreter` tool requires
+ a list of file IDs, while the `file_search` tool requires a list of vector store
+ IDs.
+
tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
- assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`.
+ assistant. Tools can be of types `code_interpreter`, `file_search`, or
+ `function`.
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or temperature but not both.
extra_headers: Send extra headers
@@ -570,18 +680,21 @@ async def update(
"""
if not assistant_id:
raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
f"/assistants/{assistant_id}",
body=await async_maybe_transform(
{
"description": description,
- "file_ids": file_ids,
"instructions": instructions,
"metadata": metadata,
"model": model,
"name": name,
+ "response_format": response_format,
+ "temperature": temperature,
+ "tool_resources": tool_resources,
"tools": tools,
+ "top_p": top_p,
},
assistant_update_params.AssistantUpdateParams,
),
@@ -634,7 +747,7 @@ def list(
timeout: Override the client-level default timeout for this request, in seconds
"""
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
"/assistants",
page=AsyncCursorPage[Assistant],
@@ -681,7 +794,7 @@ async def delete(
"""
if not assistant_id:
raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._delete(
f"/assistants/{assistant_id}",
options=make_request_options(
@@ -711,10 +824,6 @@ def __init__(self, assistants: Assistants) -> None:
assistants.delete,
)
- @cached_property
- def files(self) -> FilesWithRawResponse:
- return FilesWithRawResponse(self._assistants.files)
-
class AsyncAssistantsWithRawResponse:
def __init__(self, assistants: AsyncAssistants) -> None:
@@ -736,10 +845,6 @@ def __init__(self, assistants: AsyncAssistants) -> None:
assistants.delete,
)
- @cached_property
- def files(self) -> AsyncFilesWithRawResponse:
- return AsyncFilesWithRawResponse(self._assistants.files)
-
class AssistantsWithStreamingResponse:
def __init__(self, assistants: Assistants) -> None:
@@ -761,10 +866,6 @@ def __init__(self, assistants: Assistants) -> None:
assistants.delete,
)
- @cached_property
- def files(self) -> FilesWithStreamingResponse:
- return FilesWithStreamingResponse(self._assistants.files)
-
class AsyncAssistantsWithStreamingResponse:
def __init__(self, assistants: AsyncAssistants) -> None:
@@ -785,7 +886,3 @@ def __init__(self, assistants: AsyncAssistants) -> None:
self.delete = async_to_streamed_response_wrapper(
assistants.delete,
)
-
- @cached_property
- def files(self) -> AsyncFilesWithStreamingResponse:
- return AsyncFilesWithStreamingResponse(self._assistants.files)
diff --git a/src/openai/resources/beta/assistants/__init__.py b/src/openai/resources/beta/assistants/__init__.py
deleted file mode 100644
index 736def9388..0000000000
--- a/src/openai/resources/beta/assistants/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .files import (
- Files,
- AsyncFiles,
- FilesWithRawResponse,
- AsyncFilesWithRawResponse,
- FilesWithStreamingResponse,
- AsyncFilesWithStreamingResponse,
-)
-from .assistants import (
- Assistants,
- AsyncAssistants,
- AssistantsWithRawResponse,
- AsyncAssistantsWithRawResponse,
- AssistantsWithStreamingResponse,
- AsyncAssistantsWithStreamingResponse,
-)
-
-__all__ = [
- "Files",
- "AsyncFiles",
- "FilesWithRawResponse",
- "AsyncFilesWithRawResponse",
- "FilesWithStreamingResponse",
- "AsyncFilesWithStreamingResponse",
- "Assistants",
- "AsyncAssistants",
- "AssistantsWithRawResponse",
- "AsyncAssistantsWithRawResponse",
- "AssistantsWithStreamingResponse",
- "AsyncAssistantsWithStreamingResponse",
-]
diff --git a/src/openai/resources/beta/beta.py b/src/openai/resources/beta/beta.py
index 67baad2716..0d9806678f 100644
--- a/src/openai/resources/beta/beta.py
+++ b/src/openai/resources/beta/beta.py
@@ -20,13 +20,25 @@
AsyncAssistantsWithStreamingResponse,
)
from ..._resource import SyncAPIResource, AsyncAPIResource
+from .vector_stores import (
+ VectorStores,
+ AsyncVectorStores,
+ VectorStoresWithRawResponse,
+ AsyncVectorStoresWithRawResponse,
+ VectorStoresWithStreamingResponse,
+ AsyncVectorStoresWithStreamingResponse,
+)
from .threads.threads import Threads, AsyncThreads
-from .assistants.assistants import Assistants, AsyncAssistants
+from .vector_stores.vector_stores import VectorStores, AsyncVectorStores
__all__ = ["Beta", "AsyncBeta"]
class Beta(SyncAPIResource):
+ @cached_property
+ def vector_stores(self) -> VectorStores:
+ return VectorStores(self._client)
+
@cached_property
def assistants(self) -> Assistants:
return Assistants(self._client)
@@ -45,6 +57,10 @@ def with_streaming_response(self) -> BetaWithStreamingResponse:
class AsyncBeta(AsyncAPIResource):
+ @cached_property
+ def vector_stores(self) -> AsyncVectorStores:
+ return AsyncVectorStores(self._client)
+
@cached_property
def assistants(self) -> AsyncAssistants:
return AsyncAssistants(self._client)
@@ -66,6 +82,10 @@ class BetaWithRawResponse:
def __init__(self, beta: Beta) -> None:
self._beta = beta
+ @cached_property
+ def vector_stores(self) -> VectorStoresWithRawResponse:
+ return VectorStoresWithRawResponse(self._beta.vector_stores)
+
@cached_property
def assistants(self) -> AssistantsWithRawResponse:
return AssistantsWithRawResponse(self._beta.assistants)
@@ -79,6 +99,10 @@ class AsyncBetaWithRawResponse:
def __init__(self, beta: AsyncBeta) -> None:
self._beta = beta
+ @cached_property
+ def vector_stores(self) -> AsyncVectorStoresWithRawResponse:
+ return AsyncVectorStoresWithRawResponse(self._beta.vector_stores)
+
@cached_property
def assistants(self) -> AsyncAssistantsWithRawResponse:
return AsyncAssistantsWithRawResponse(self._beta.assistants)
@@ -92,6 +116,10 @@ class BetaWithStreamingResponse:
def __init__(self, beta: Beta) -> None:
self._beta = beta
+ @cached_property
+ def vector_stores(self) -> VectorStoresWithStreamingResponse:
+ return VectorStoresWithStreamingResponse(self._beta.vector_stores)
+
@cached_property
def assistants(self) -> AssistantsWithStreamingResponse:
return AssistantsWithStreamingResponse(self._beta.assistants)
@@ -105,6 +133,10 @@ class AsyncBetaWithStreamingResponse:
def __init__(self, beta: AsyncBeta) -> None:
self._beta = beta
+ @cached_property
+ def vector_stores(self) -> AsyncVectorStoresWithStreamingResponse:
+ return AsyncVectorStoresWithStreamingResponse(self._beta.vector_stores)
+
@cached_property
def assistants(self) -> AsyncAssistantsWithStreamingResponse:
return AsyncAssistantsWithStreamingResponse(self._beta.assistants)
diff --git a/src/openai/resources/beta/threads/messages/messages.py b/src/openai/resources/beta/threads/messages.py
similarity index 88%
rename from src/openai/resources/beta/threads/messages/messages.py
rename to src/openai/resources/beta/threads/messages.py
index bbce3e99e4..7a24b80dea 100644
--- a/src/openai/resources/beta/threads/messages/messages.py
+++ b/src/openai/resources/beta/threads/messages.py
@@ -2,43 +2,31 @@
from __future__ import annotations
-from typing import List, Optional
+from typing import Iterable, Optional
from typing_extensions import Literal
import httpx
-from ..... import _legacy_response
-from .files import (
- Files,
- AsyncFiles,
- FilesWithRawResponse,
- AsyncFilesWithRawResponse,
- FilesWithStreamingResponse,
- AsyncFilesWithStreamingResponse,
-)
-from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ....._utils import (
+from .... import _legacy_response
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._utils import (
maybe_transform,
async_maybe_transform,
)
-from ....._compat import cached_property
-from ....._resource import SyncAPIResource, AsyncAPIResource
-from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
-from .....pagination import SyncCursorPage, AsyncCursorPage
-from ....._base_client import (
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ....pagination import SyncCursorPage, AsyncCursorPage
+from ...._base_client import (
AsyncPaginator,
make_request_options,
)
-from .....types.beta.threads import Message, message_list_params, message_create_params, message_update_params
+from ....types.beta.threads import Message, message_list_params, message_create_params, message_update_params
__all__ = ["Messages", "AsyncMessages"]
class Messages(SyncAPIResource):
- @cached_property
- def files(self) -> Files:
- return Files(self._client)
-
@cached_property
def with_raw_response(self) -> MessagesWithRawResponse:
return MessagesWithRawResponse(self)
@@ -53,7 +41,7 @@ def create(
*,
content: str,
role: Literal["user", "assistant"],
- file_ids: List[str] | NotGiven = NOT_GIVEN,
+ attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN,
metadata: Optional[object] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -76,10 +64,7 @@ def create(
- `assistant`: Indicates the message is generated by the assistant. Use this
value to insert messages from the assistant into the conversation.
- file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
- the message should use. There can be a maximum of 10 files attached to a
- message. Useful for tools like `retrieval` and `code_interpreter` that can
- access and use files.
+ attachments: A list of files attached to the message, and the tools they should be added to.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format. Keys
@@ -96,14 +81,14 @@ def create(
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/threads/{thread_id}/messages",
body=maybe_transform(
{
"content": content,
"role": role,
- "file_ids": file_ids,
+ "attachments": attachments,
"metadata": metadata,
},
message_create_params.MessageCreateParams,
@@ -142,7 +127,7 @@ def retrieve(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not message_id:
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
f"/threads/{thread_id}/messages/{message_id}",
options=make_request_options(
@@ -185,7 +170,7 @@ def update(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not message_id:
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/threads/{thread_id}/messages/{message_id}",
body=maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams),
@@ -243,7 +228,7 @@ def list(
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/threads/{thread_id}/messages",
page=SyncCursorPage[Message],
@@ -268,10 +253,6 @@ def list(
class AsyncMessages(AsyncAPIResource):
- @cached_property
- def files(self) -> AsyncFiles:
- return AsyncFiles(self._client)
-
@cached_property
def with_raw_response(self) -> AsyncMessagesWithRawResponse:
return AsyncMessagesWithRawResponse(self)
@@ -286,7 +267,7 @@ async def create(
*,
content: str,
role: Literal["user", "assistant"],
- file_ids: List[str] | NotGiven = NOT_GIVEN,
+ attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN,
metadata: Optional[object] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -309,10 +290,7 @@ async def create(
- `assistant`: Indicates the message is generated by the assistant. Use this
value to insert messages from the assistant into the conversation.
- file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
- the message should use. There can be a maximum of 10 files attached to a
- message. Useful for tools like `retrieval` and `code_interpreter` that can
- access and use files.
+ attachments: A list of files attached to the message, and the tools they should be added to.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format. Keys
@@ -329,14 +307,14 @@ async def create(
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
f"/threads/{thread_id}/messages",
body=await async_maybe_transform(
{
"content": content,
"role": role,
- "file_ids": file_ids,
+ "attachments": attachments,
"metadata": metadata,
},
message_create_params.MessageCreateParams,
@@ -375,7 +353,7 @@ async def retrieve(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not message_id:
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
f"/threads/{thread_id}/messages/{message_id}",
options=make_request_options(
@@ -418,7 +396,7 @@ async def update(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not message_id:
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
f"/threads/{thread_id}/messages/{message_id}",
body=await async_maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams),
@@ -476,7 +454,7 @@ def list(
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/threads/{thread_id}/messages",
page=AsyncCursorPage[Message],
@@ -517,10 +495,6 @@ def __init__(self, messages: Messages) -> None:
messages.list,
)
- @cached_property
- def files(self) -> FilesWithRawResponse:
- return FilesWithRawResponse(self._messages.files)
-
class AsyncMessagesWithRawResponse:
def __init__(self, messages: AsyncMessages) -> None:
@@ -539,10 +513,6 @@ def __init__(self, messages: AsyncMessages) -> None:
messages.list,
)
- @cached_property
- def files(self) -> AsyncFilesWithRawResponse:
- return AsyncFilesWithRawResponse(self._messages.files)
-
class MessagesWithStreamingResponse:
def __init__(self, messages: Messages) -> None:
@@ -561,10 +531,6 @@ def __init__(self, messages: Messages) -> None:
messages.list,
)
- @cached_property
- def files(self) -> FilesWithStreamingResponse:
- return FilesWithStreamingResponse(self._messages.files)
-
class AsyncMessagesWithStreamingResponse:
def __init__(self, messages: AsyncMessages) -> None:
@@ -582,7 +548,3 @@ def __init__(self, messages: AsyncMessages) -> None:
self.list = async_to_streamed_response_wrapper(
messages.list,
)
-
- @cached_property
- def files(self) -> AsyncFilesWithStreamingResponse:
- return AsyncFilesWithStreamingResponse(self._messages.files)
diff --git a/src/openai/resources/beta/threads/messages/__init__.py b/src/openai/resources/beta/threads/messages/__init__.py
deleted file mode 100644
index a3286e6ace..0000000000
--- a/src/openai/resources/beta/threads/messages/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .files import (
- Files,
- AsyncFiles,
- FilesWithRawResponse,
- AsyncFilesWithRawResponse,
- FilesWithStreamingResponse,
- AsyncFilesWithStreamingResponse,
-)
-from .messages import (
- Messages,
- AsyncMessages,
- MessagesWithRawResponse,
- AsyncMessagesWithRawResponse,
- MessagesWithStreamingResponse,
- AsyncMessagesWithStreamingResponse,
-)
-
-__all__ = [
- "Files",
- "AsyncFiles",
- "FilesWithRawResponse",
- "AsyncFilesWithRawResponse",
- "FilesWithStreamingResponse",
- "AsyncFilesWithStreamingResponse",
- "Messages",
- "AsyncMessages",
- "MessagesWithRawResponse",
- "AsyncMessagesWithRawResponse",
- "MessagesWithStreamingResponse",
- "AsyncMessagesWithStreamingResponse",
-]
diff --git a/src/openai/resources/beta/threads/messages/files.py b/src/openai/resources/beta/threads/messages/files.py
deleted file mode 100644
index 349f99725e..0000000000
--- a/src/openai/resources/beta/threads/messages/files.py
+++ /dev/null
@@ -1,312 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal
-
-import httpx
-
-from ..... import _legacy_response
-from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ....._utils import maybe_transform
-from ....._compat import cached_property
-from ....._resource import SyncAPIResource, AsyncAPIResource
-from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
-from .....pagination import SyncCursorPage, AsyncCursorPage
-from ....._base_client import (
- AsyncPaginator,
- make_request_options,
-)
-from .....types.beta.threads.messages import MessageFile, file_list_params
-
-__all__ = ["Files", "AsyncFiles"]
-
-
-class Files(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> FilesWithRawResponse:
- return FilesWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> FilesWithStreamingResponse:
- return FilesWithStreamingResponse(self)
-
- def retrieve(
- self,
- file_id: str,
- *,
- thread_id: str,
- message_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageFile:
- """
- Retrieves a message file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not message_id:
- raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
- return self._get(
- f"/threads/{thread_id}/messages/{message_id}/files/{file_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MessageFile,
- )
-
- def list(
- self,
- message_id: str,
- *,
- thread_id: str,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> SyncCursorPage[MessageFile]:
- """Returns a list of message files.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include before=obj_foo in order to
- fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not message_id:
- raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
- return self._get_api_list(
- f"/threads/{thread_id}/messages/{message_id}/files",
- page=SyncCursorPage[MessageFile],
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- },
- file_list_params.FileListParams,
- ),
- ),
- model=MessageFile,
- )
-
-
-class AsyncFiles(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncFilesWithRawResponse:
- return AsyncFilesWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncFilesWithStreamingResponse:
- return AsyncFilesWithStreamingResponse(self)
-
- async def retrieve(
- self,
- file_id: str,
- *,
- thread_id: str,
- message_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageFile:
- """
- Retrieves a message file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not message_id:
- raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
- return await self._get(
- f"/threads/{thread_id}/messages/{message_id}/files/{file_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MessageFile,
- )
-
- def list(
- self,
- message_id: str,
- *,
- thread_id: str,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AsyncPaginator[MessageFile, AsyncCursorPage[MessageFile]]:
- """Returns a list of message files.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include before=obj_foo in order to
- fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not message_id:
- raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
- return self._get_api_list(
- f"/threads/{thread_id}/messages/{message_id}/files",
- page=AsyncCursorPage[MessageFile],
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- },
- file_list_params.FileListParams,
- ),
- ),
- model=MessageFile,
- )
-
-
-class FilesWithRawResponse:
- def __init__(self, files: Files) -> None:
- self._files = files
-
- self.retrieve = _legacy_response.to_raw_response_wrapper(
- files.retrieve,
- )
- self.list = _legacy_response.to_raw_response_wrapper(
- files.list,
- )
-
-
-class AsyncFilesWithRawResponse:
- def __init__(self, files: AsyncFiles) -> None:
- self._files = files
-
- self.retrieve = _legacy_response.async_to_raw_response_wrapper(
- files.retrieve,
- )
- self.list = _legacy_response.async_to_raw_response_wrapper(
- files.list,
- )
-
-
-class FilesWithStreamingResponse:
- def __init__(self, files: Files) -> None:
- self._files = files
-
- self.retrieve = to_streamed_response_wrapper(
- files.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- files.list,
- )
-
-
-class AsyncFilesWithStreamingResponse:
- def __init__(self, files: AsyncFiles) -> None:
- self._files = files
-
- self.retrieve = async_to_streamed_response_wrapper(
- files.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- files.list,
- )
diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py
index 9fa7239c0b..7aab17a30d 100644
--- a/src/openai/resources/beta/threads/runs/runs.py
+++ b/src/openai/resources/beta/threads/runs/runs.py
@@ -115,6 +115,7 @@ def create(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -196,6 +197,10 @@ def create(
tools: Override the tools the assistant can use for this run. This is useful for
modifying the behavior on a per-run basis.
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -248,6 +253,7 @@ def create(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -329,6 +335,10 @@ def create(
tools: Override the tools the assistant can use for this run. This is useful for
modifying the behavior on a per-run basis.
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -381,6 +391,7 @@ def create(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -462,6 +473,10 @@ def create(
tools: Override the tools the assistant can use for this run. This is useful for
modifying the behavior on a per-run basis.
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -514,6 +529,7 @@ def create(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -524,7 +540,7 @@ def create(
) -> Run | Stream[AssistantStreamEvent]:
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/threads/{thread_id}/runs",
body=maybe_transform(
@@ -542,6 +558,7 @@ def create(
"temperature": temperature,
"tool_choice": tool_choice,
"tools": tools,
+ "top_p": top_p,
"truncation_strategy": truncation_strategy,
},
run_create_params.RunCreateParams,
@@ -582,7 +599,7 @@ def retrieve(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
f"/threads/{thread_id}/runs/{run_id}",
options=make_request_options(
@@ -625,7 +642,7 @@ def update(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/threads/{thread_id}/runs/{run_id}",
body=maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams),
@@ -680,7 +697,7 @@ def list(
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/threads/{thread_id}/runs",
page=SyncCursorPage[Run],
@@ -730,7 +747,7 @@ def cancel(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/threads/{thread_id}/runs/{run_id}/cancel",
options=make_request_options(
@@ -778,6 +795,7 @@ def create_and_poll(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
poll_interval_ms: int | NotGiven = NOT_GIVEN,
thread_id: str,
@@ -810,6 +828,7 @@ def create_and_poll(
stream=False,
tools=tools,
truncation_strategy=truncation_strategy,
+ top_p=top_p,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
@@ -866,6 +885,7 @@ def create_and_stream(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
thread_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -919,6 +939,7 @@ def create_and_stream(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
thread_id: str,
event_handler: AssistantEventHandlerT,
@@ -972,6 +993,7 @@ def create_and_stream(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
thread_id: str,
event_handler: AssistantEventHandlerT | None = None,
@@ -987,7 +1009,7 @@ def create_and_stream(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {
- "OpenAI-Beta": "assistants=v1",
+ "OpenAI-Beta": "assistants=v2",
"X-Stainless-Stream-Helper": "threads.runs.create_and_stream",
"X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
**(extra_headers or {}),
@@ -1011,6 +1033,7 @@ def create_and_stream(
"stream": True,
"tools": tools,
"truncation_strategy": truncation_strategy,
+ "top_p": top_p,
},
run_create_params.RunCreateParams,
),
@@ -1108,6 +1131,7 @@ def stream(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
thread_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -1160,6 +1184,7 @@ def stream(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
thread_id: str,
event_handler: AssistantEventHandlerT,
@@ -1212,6 +1237,7 @@ def stream(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
thread_id: str,
event_handler: AssistantEventHandlerT | None = None,
@@ -1227,7 +1253,7 @@ def stream(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {
- "OpenAI-Beta": "assistants=v1",
+ "OpenAI-Beta": "assistants=v2",
"X-Stainless-Stream-Helper": "threads.runs.create_and_stream",
"X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
**(extra_headers or {}),
@@ -1251,6 +1277,7 @@ def stream(
"stream": True,
"tools": tools,
"truncation_strategy": truncation_strategy,
+ "top_p": top_p,
},
run_create_params.RunCreateParams,
),
@@ -1396,7 +1423,7 @@ def submit_tool_outputs(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs",
body=maybe_transform(
@@ -1522,7 +1549,7 @@ def submit_tool_outputs_stream(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {
- "OpenAI-Beta": "assistants=v1",
+ "OpenAI-Beta": "assistants=v2",
"X-Stainless-Stream-Helper": "threads.runs.submit_tool_outputs_stream",
"X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
**(extra_headers or {}),
@@ -1602,6 +1629,7 @@ async def create(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1683,6 +1711,10 @@ async def create(
tools: Override the tools the assistant can use for this run. This is useful for
modifying the behavior on a per-run basis.
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -1735,6 +1767,7 @@ async def create(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1816,6 +1849,10 @@ async def create(
tools: Override the tools the assistant can use for this run. This is useful for
modifying the behavior on a per-run basis.
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -1868,6 +1905,7 @@ async def create(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1949,6 +1987,10 @@ async def create(
tools: Override the tools the assistant can use for this run. This is useful for
modifying the behavior on a per-run basis.
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -2001,6 +2043,7 @@ async def create(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -2011,7 +2054,7 @@ async def create(
) -> Run | AsyncStream[AssistantStreamEvent]:
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
f"/threads/{thread_id}/runs",
body=await async_maybe_transform(
@@ -2029,6 +2072,7 @@ async def create(
"temperature": temperature,
"tool_choice": tool_choice,
"tools": tools,
+ "top_p": top_p,
"truncation_strategy": truncation_strategy,
},
run_create_params.RunCreateParams,
@@ -2069,7 +2113,7 @@ async def retrieve(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
f"/threads/{thread_id}/runs/{run_id}",
options=make_request_options(
@@ -2112,7 +2156,7 @@ async def update(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
f"/threads/{thread_id}/runs/{run_id}",
body=await async_maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams),
@@ -2167,7 +2211,7 @@ def list(
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/threads/{thread_id}/runs",
page=AsyncCursorPage[Run],
@@ -2217,7 +2261,7 @@ async def cancel(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
f"/threads/{thread_id}/runs/{run_id}/cancel",
options=make_request_options(
@@ -2265,6 +2309,7 @@ async def create_and_poll(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
poll_interval_ms: int | NotGiven = NOT_GIVEN,
thread_id: str,
@@ -2297,6 +2342,7 @@ async def create_and_poll(
stream=False,
tools=tools,
truncation_strategy=truncation_strategy,
+ top_p=top_p,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
@@ -2353,6 +2399,7 @@ def create_and_stream(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
thread_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -2406,6 +2453,7 @@ def create_and_stream(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
thread_id: str,
event_handler: AsyncAssistantEventHandlerT,
@@ -2459,6 +2507,7 @@ def create_and_stream(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
thread_id: str,
event_handler: AsyncAssistantEventHandlerT | None = None,
@@ -2477,7 +2526,7 @@ def create_and_stream(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {
- "OpenAI-Beta": "assistants=v1",
+ "OpenAI-Beta": "assistants=v2",
"X-Stainless-Stream-Helper": "threads.runs.create_and_stream",
"X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
**(extra_headers or {}),
@@ -2500,6 +2549,7 @@ def create_and_stream(
"stream": True,
"tools": tools,
"truncation_strategy": truncation_strategy,
+ "top_p": top_p,
},
run_create_params.RunCreateParams,
),
@@ -2597,6 +2647,7 @@ def stream(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
thread_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -2649,6 +2700,7 @@ def stream(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
thread_id: str,
event_handler: AsyncAssistantEventHandlerT,
@@ -2701,6 +2753,7 @@ def stream(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
thread_id: str,
event_handler: AsyncAssistantEventHandlerT | None = None,
@@ -2719,7 +2772,7 @@ def stream(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {
- "OpenAI-Beta": "assistants=v1",
+ "OpenAI-Beta": "assistants=v2",
"X-Stainless-Stream-Helper": "threads.runs.create_and_stream",
"X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
**(extra_headers or {}),
@@ -2742,6 +2795,7 @@ def stream(
"stream": True,
"tools": tools,
"truncation_strategy": truncation_strategy,
+ "top_p": top_p,
},
run_create_params.RunCreateParams,
),
@@ -2887,7 +2941,7 @@ async def submit_tool_outputs(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs",
body=await async_maybe_transform(
@@ -3016,7 +3070,7 @@ def submit_tool_outputs_stream(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {
- "OpenAI-Beta": "assistants=v1",
+ "OpenAI-Beta": "assistants=v2",
"X-Stainless-Stream-Helper": "threads.runs.submit_tool_outputs_stream",
"X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
**(extra_headers or {}),
diff --git a/src/openai/resources/beta/threads/runs/steps.py b/src/openai/resources/beta/threads/runs/steps.py
index 118bd8822a..986ef2997a 100644
--- a/src/openai/resources/beta/threads/runs/steps.py
+++ b/src/openai/resources/beta/threads/runs/steps.py
@@ -62,7 +62,7 @@ def retrieve(
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
if not step_id:
raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}",
options=make_request_options(
@@ -119,7 +119,7 @@ def list(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/threads/{thread_id}/runs/{run_id}/steps",
page=SyncCursorPage[RunStep],
@@ -182,7 +182,7 @@ async def retrieve(
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
if not step_id:
raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}",
options=make_request_options(
@@ -239,7 +239,7 @@ def list(
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/threads/{thread_id}/runs/{run_id}/steps",
page=AsyncCursorPage[RunStep],
diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py
index 9c2e2f0043..678c621a10 100644
--- a/src/openai/resources/beta/threads/threads.py
+++ b/src/openai/resources/beta/threads/threads.py
@@ -57,7 +57,6 @@
AsyncAssistantEventHandlerT,
AsyncAssistantStreamManager,
)
-from .messages.messages import Messages, AsyncMessages
from ....types.beta.threads import Run
__all__ = ["Threads", "AsyncThreads"]
@@ -85,6 +84,7 @@ def create(
*,
messages: Iterable[thread_create_params.Message] | NotGiven = NOT_GIVEN,
metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -104,6 +104,11 @@ def create(
can be a maximum of 64 characters long and values can be a maxium of 512
characters long.
+ tool_resources: A set of resources that are made available to the assistant's tools in this
+ thread. The resources are specific to the type of tool. For example, the
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ tool requires a list of vector store IDs.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -112,13 +117,14 @@ def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
"/threads",
body=maybe_transform(
{
"messages": messages,
"metadata": metadata,
+ "tool_resources": tool_resources,
},
thread_create_params.ThreadCreateParams,
),
@@ -153,7 +159,7 @@ def retrieve(
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
f"/threads/{thread_id}",
options=make_request_options(
@@ -167,6 +173,7 @@ def update(
thread_id: str,
*,
metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -183,6 +190,11 @@ def update(
can be a maximum of 64 characters long and values can be a maxium of 512
characters long.
+ tool_resources: A set of resources that are made available to the assistant's tools in this
+ thread. The resources are specific to the type of tool. For example, the
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ tool requires a list of vector store IDs.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -193,10 +205,16 @@ def update(
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/threads/{thread_id}",
- body=maybe_transform({"metadata": metadata}, thread_update_params.ThreadUpdateParams),
+ body=maybe_transform(
+ {
+ "metadata": metadata,
+ "tool_resources": tool_resources,
+ },
+ thread_update_params.ThreadUpdateParams,
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -228,7 +246,7 @@ def delete(
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._delete(
f"/threads/{thread_id}",
options=make_request_options(
@@ -276,7 +294,9 @@ def create_and_run(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -299,13 +319,13 @@ def create_and_run(
max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
run. The run will make a best effort to use only the number of completion tokens
specified, across multiple turns of the run. If the run exceeds the number of
- completion tokens specified, the run will end with status `complete`. See
+ completion tokens specified, the run will end with status `incomplete`. See
`incomplete_details` for more info.
max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
The run will make a best effort to use only the number of prompt tokens
specified, across multiple turns of the run. If the run exceeds the number of
- prompt tokens specified, the run will end with status `complete`. See
+ prompt tokens specified, the run will end with status `incomplete`. See
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
@@ -350,9 +370,18 @@ def create_and_run(
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
call that tool.
+ tool_resources: A set of resources that are used by the assistant's tools. The resources are
+ specific to the type of tool. For example, the `code_interpreter` tool requires
+ a list of file IDs, while the `file_search` tool requires a list of vector store
+ IDs.
+
tools: Override the tools the assistant can use for this run. This is useful for
modifying the behavior on a per-run basis.
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -402,7 +431,9 @@ def create_and_run(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -429,13 +460,13 @@ def create_and_run(
max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
run. The run will make a best effort to use only the number of completion tokens
specified, across multiple turns of the run. If the run exceeds the number of
- completion tokens specified, the run will end with status `complete`. See
+ completion tokens specified, the run will end with status `incomplete`. See
`incomplete_details` for more info.
max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
The run will make a best effort to use only the number of prompt tokens
specified, across multiple turns of the run. If the run exceeds the number of
- prompt tokens specified, the run will end with status `complete`. See
+ prompt tokens specified, the run will end with status `incomplete`. See
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
@@ -476,9 +507,18 @@ def create_and_run(
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
call that tool.
+ tool_resources: A set of resources that are used by the assistant's tools. The resources are
+ specific to the type of tool. For example, the `code_interpreter` tool requires
+ a list of file IDs, while the `file_search` tool requires a list of vector store
+ IDs.
+
tools: Override the tools the assistant can use for this run. This is useful for
modifying the behavior on a per-run basis.
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -528,7 +568,9 @@ def create_and_run(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -555,13 +597,13 @@ def create_and_run(
max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
run. The run will make a best effort to use only the number of completion tokens
specified, across multiple turns of the run. If the run exceeds the number of
- completion tokens specified, the run will end with status `complete`. See
+ completion tokens specified, the run will end with status `incomplete`. See
`incomplete_details` for more info.
max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
The run will make a best effort to use only the number of prompt tokens
specified, across multiple turns of the run. If the run exceeds the number of
- prompt tokens specified, the run will end with status `complete`. See
+ prompt tokens specified, the run will end with status `incomplete`. See
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
@@ -602,9 +644,18 @@ def create_and_run(
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
call that tool.
+ tool_resources: A set of resources that are used by the assistant's tools. The resources are
+ specific to the type of tool. For example, the `code_interpreter` tool requires
+ a list of file IDs, while the `file_search` tool requires a list of vector store
+ IDs.
+
tools: Override the tools the assistant can use for this run. This is useful for
modifying the behavior on a per-run basis.
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -654,7 +705,9 @@ def create_and_run(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -663,7 +716,7 @@ def create_and_run(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Run | Stream[AssistantStreamEvent]:
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
"/threads/runs",
body=maybe_transform(
@@ -679,7 +732,9 @@ def create_and_run(
"temperature": temperature,
"thread": thread,
"tool_choice": tool_choice,
+ "tool_resources": tool_resources,
"tools": tools,
+ "top_p": top_p,
"truncation_strategy": truncation_strategy,
},
thread_create_and_run_params.ThreadCreateAndRunParams,
@@ -729,7 +784,9 @@ def create_and_run_poll(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
poll_interval_ms: int | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -755,8 +812,10 @@ def create_and_run_poll(
temperature=temperature,
stream=False,
thread=thread,
+ tool_resources=tool_resources,
tool_choice=tool_choice,
truncation_strategy=truncation_strategy,
+ top_p=top_p,
tools=tools,
extra_headers=extra_headers,
extra_query=extra_query,
@@ -803,7 +862,9 @@ def create_and_run_stream(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -853,7 +914,9 @@ def create_and_run_stream(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
event_handler: AssistantEventHandlerT,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -903,7 +966,9 @@ def create_and_run_stream(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
event_handler: AssistantEventHandlerT | None = None,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -915,7 +980,7 @@ def create_and_run_stream(
) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]:
"""Create a thread and stream the run back"""
extra_headers = {
- "OpenAI-Beta": "assistants=v1",
+ "OpenAI-Beta": "assistants=v2",
"X-Stainless-Stream-Helper": "threads.create_and_run_stream",
"X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
**(extra_headers or {}),
@@ -937,7 +1002,9 @@ def create_and_run_stream(
"stream": True,
"thread": thread,
"tools": tools,
+ "tool": tool_resources,
"truncation_strategy": truncation_strategy,
+ "top_p": top_p,
},
thread_create_and_run_params.ThreadCreateAndRunParams,
),
@@ -973,6 +1040,7 @@ async def create(
*,
messages: Iterable[thread_create_params.Message] | NotGiven = NOT_GIVEN,
metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -992,6 +1060,11 @@ async def create(
can be a maximum of 64 characters long and values can be a maxium of 512
characters long.
+ tool_resources: A set of resources that are made available to the assistant's tools in this
+ thread. The resources are specific to the type of tool. For example, the
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ tool requires a list of vector store IDs.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -1000,13 +1073,14 @@ async def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
"/threads",
body=await async_maybe_transform(
{
"messages": messages,
"metadata": metadata,
+ "tool_resources": tool_resources,
},
thread_create_params.ThreadCreateParams,
),
@@ -1041,7 +1115,7 @@ async def retrieve(
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
f"/threads/{thread_id}",
options=make_request_options(
@@ -1055,6 +1129,7 @@ async def update(
thread_id: str,
*,
metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -1071,6 +1146,11 @@ async def update(
can be a maximum of 64 characters long and values can be a maxium of 512
characters long.
+ tool_resources: A set of resources that are made available to the assistant's tools in this
+ thread. The resources are specific to the type of tool. For example, the
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ tool requires a list of vector store IDs.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -1081,10 +1161,16 @@ async def update(
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
f"/threads/{thread_id}",
- body=await async_maybe_transform({"metadata": metadata}, thread_update_params.ThreadUpdateParams),
+ body=await async_maybe_transform(
+ {
+ "metadata": metadata,
+ "tool_resources": tool_resources,
+ },
+ thread_update_params.ThreadUpdateParams,
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -1116,7 +1202,7 @@ async def delete(
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._delete(
f"/threads/{thread_id}",
options=make_request_options(
@@ -1164,7 +1250,9 @@ async def create_and_run(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1187,13 +1275,13 @@ async def create_and_run(
max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
run. The run will make a best effort to use only the number of completion tokens
specified, across multiple turns of the run. If the run exceeds the number of
- completion tokens specified, the run will end with status `complete`. See
+ completion tokens specified, the run will end with status `incomplete`. See
`incomplete_details` for more info.
max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
The run will make a best effort to use only the number of prompt tokens
specified, across multiple turns of the run. If the run exceeds the number of
- prompt tokens specified, the run will end with status `complete`. See
+ prompt tokens specified, the run will end with status `incomplete`. See
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
@@ -1238,9 +1326,18 @@ async def create_and_run(
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
call that tool.
+ tool_resources: A set of resources that are used by the assistant's tools. The resources are
+ specific to the type of tool. For example, the `code_interpreter` tool requires
+ a list of file IDs, while the `file_search` tool requires a list of vector store
+ IDs.
+
tools: Override the tools the assistant can use for this run. This is useful for
modifying the behavior on a per-run basis.
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -1290,7 +1387,9 @@ async def create_and_run(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1317,13 +1416,13 @@ async def create_and_run(
max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
run. The run will make a best effort to use only the number of completion tokens
specified, across multiple turns of the run. If the run exceeds the number of
- completion tokens specified, the run will end with status `complete`. See
+ completion tokens specified, the run will end with status `incomplete`. See
`incomplete_details` for more info.
max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
The run will make a best effort to use only the number of prompt tokens
specified, across multiple turns of the run. If the run exceeds the number of
- prompt tokens specified, the run will end with status `complete`. See
+ prompt tokens specified, the run will end with status `incomplete`. See
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
@@ -1364,9 +1463,18 @@ async def create_and_run(
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
call that tool.
+ tool_resources: A set of resources that are used by the assistant's tools. The resources are
+ specific to the type of tool. For example, the `code_interpreter` tool requires
+ a list of file IDs, while the `file_search` tool requires a list of vector store
+ IDs.
+
tools: Override the tools the assistant can use for this run. This is useful for
modifying the behavior on a per-run basis.
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -1416,7 +1524,9 @@ async def create_and_run(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1443,13 +1553,13 @@ async def create_and_run(
max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
run. The run will make a best effort to use only the number of completion tokens
specified, across multiple turns of the run. If the run exceeds the number of
- completion tokens specified, the run will end with status `complete`. See
+ completion tokens specified, the run will end with status `incomplete`. See
`incomplete_details` for more info.
max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
The run will make a best effort to use only the number of prompt tokens
specified, across multiple turns of the run. If the run exceeds the number of
- prompt tokens specified, the run will end with status `complete`. See
+ prompt tokens specified, the run will end with status `incomplete`. See
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
@@ -1490,9 +1600,18 @@ async def create_and_run(
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
call that tool.
+ tool_resources: A set of resources that are used by the assistant's tools. The resources are
+ specific to the type of tool. For example, the `code_interpreter` tool requires
+ a list of file IDs, while the `file_search` tool requires a list of vector store
+ IDs.
+
tools: Override the tools the assistant can use for this run. This is useful for
modifying the behavior on a per-run basis.
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -1542,7 +1661,9 @@ async def create_and_run(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1551,7 +1672,7 @@ async def create_and_run(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Run | AsyncStream[AssistantStreamEvent]:
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
"/threads/runs",
body=await async_maybe_transform(
@@ -1567,7 +1688,9 @@ async def create_and_run(
"temperature": temperature,
"thread": thread,
"tool_choice": tool_choice,
+ "tool_resources": tool_resources,
"tools": tools,
+ "top_p": top_p,
"truncation_strategy": truncation_strategy,
},
thread_create_and_run_params.ThreadCreateAndRunParams,
@@ -1617,7 +1740,9 @@ async def create_and_run_poll(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
poll_interval_ms: int | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -1643,8 +1768,10 @@ async def create_and_run_poll(
temperature=temperature,
stream=False,
thread=thread,
+ tool_resources=tool_resources,
tool_choice=tool_choice,
truncation_strategy=truncation_strategy,
+ top_p=top_p,
tools=tools,
extra_headers=extra_headers,
extra_query=extra_query,
@@ -1693,7 +1820,9 @@ def create_and_run_stream(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1743,7 +1872,9 @@ def create_and_run_stream(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
event_handler: AsyncAssistantEventHandlerT,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -1793,7 +1924,9 @@ def create_and_run_stream(
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
+ tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
event_handler: AsyncAssistantEventHandlerT | None = None,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -1808,7 +1941,7 @@ def create_and_run_stream(
):
"""Create a thread and stream the run back"""
extra_headers = {
- "OpenAI-Beta": "assistants=v1",
+ "OpenAI-Beta": "assistants=v2",
"X-Stainless-Stream-Helper": "threads.create_and_run_stream",
"X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
**(extra_headers or {}),
@@ -1829,7 +1962,9 @@ def create_and_run_stream(
"stream": True,
"thread": thread,
"tools": tools,
+ "tool": tool_resources,
"truncation_strategy": truncation_strategy,
+ "top_p": top_p,
},
thread_create_and_run_params.ThreadCreateAndRunParams,
),
diff --git a/src/openai/resources/beta/vector_stores/__init__.py b/src/openai/resources/beta/vector_stores/__init__.py
new file mode 100644
index 0000000000..96ae16c302
--- /dev/null
+++ b/src/openai/resources/beta/vector_stores/__init__.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .files import (
+ Files,
+ AsyncFiles,
+ FilesWithRawResponse,
+ AsyncFilesWithRawResponse,
+ FilesWithStreamingResponse,
+ AsyncFilesWithStreamingResponse,
+)
+from .file_batches import (
+ FileBatches,
+ AsyncFileBatches,
+ FileBatchesWithRawResponse,
+ AsyncFileBatchesWithRawResponse,
+ FileBatchesWithStreamingResponse,
+ AsyncFileBatchesWithStreamingResponse,
+)
+from .vector_stores import (
+ VectorStores,
+ AsyncVectorStores,
+ VectorStoresWithRawResponse,
+ AsyncVectorStoresWithRawResponse,
+ VectorStoresWithStreamingResponse,
+ AsyncVectorStoresWithStreamingResponse,
+)
+
+__all__ = [
+ "Files",
+ "AsyncFiles",
+ "FilesWithRawResponse",
+ "AsyncFilesWithRawResponse",
+ "FilesWithStreamingResponse",
+ "AsyncFilesWithStreamingResponse",
+ "FileBatches",
+ "AsyncFileBatches",
+ "FileBatchesWithRawResponse",
+ "AsyncFileBatchesWithRawResponse",
+ "FileBatchesWithStreamingResponse",
+ "AsyncFileBatchesWithStreamingResponse",
+ "VectorStores",
+ "AsyncVectorStores",
+ "VectorStoresWithRawResponse",
+ "AsyncVectorStoresWithRawResponse",
+ "VectorStoresWithStreamingResponse",
+ "AsyncVectorStoresWithStreamingResponse",
+]
diff --git a/src/openai/resources/beta/vector_stores/file_batches.py b/src/openai/resources/beta/vector_stores/file_batches.py
new file mode 100644
index 0000000000..55b30b08e3
--- /dev/null
+++ b/src/openai/resources/beta/vector_stores/file_batches.py
@@ -0,0 +1,739 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import asyncio
+from typing import List, Iterable
+from typing_extensions import Literal
+from concurrent.futures import Future, ThreadPoolExecutor, as_completed
+
+import httpx
+import sniffio
+
+from .... import _legacy_response
+from ....types import FileObject
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
+from ...._utils import (
+ is_given,
+ maybe_transform,
+ async_maybe_transform,
+)
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ....pagination import SyncCursorPage, AsyncCursorPage
+from ...._base_client import (
+ AsyncPaginator,
+ make_request_options,
+)
+from ....types.beta.vector_stores import (
+ VectorStoreFile,
+ VectorStoreFileBatch,
+ file_batch_create_params,
+ file_batch_list_files_params,
+)
+
+__all__ = ["FileBatches", "AsyncFileBatches"]
+
+
+class FileBatches(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> FileBatchesWithRawResponse:
+ return FileBatchesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> FileBatchesWithStreamingResponse:
+ return FileBatchesWithStreamingResponse(self)
+
+ def create(
+ self,
+ vector_store_id: str,
+ *,
+ file_ids: List[str],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """
+ Create a vector store file batch.
+
+ Args:
+ file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ the vector store should use. Useful for tools like `file_search` that can access
+ files.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._post(
+ f"/vector_stores/{vector_store_id}/file_batches",
+ body=maybe_transform({"file_ids": file_ids}, file_batch_create_params.FileBatchCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFileBatch,
+ )
+
+ def retrieve(
+ self,
+ batch_id: str,
+ *,
+ vector_store_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """
+ Retrieves a vector store file batch.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not batch_id:
+ raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._get(
+ f"/vector_stores/{vector_store_id}/file_batches/{batch_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFileBatch,
+ )
+
+ def cancel(
+ self,
+ batch_id: str,
+ *,
+ vector_store_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """Cancel a vector store file batch.
+
+ This attempts to cancel the processing of
+ files in this batch as soon as possible.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not batch_id:
+ raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._post(
+ f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFileBatch,
+ )
+
+ def create_and_poll(
+ self,
+ vector_store_id: str,
+ *,
+ file_ids: List[str],
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """Create a vector store batch and poll until all files have been processed."""
+ batch = self.create(
+ vector_store_id=vector_store_id,
+ file_ids=file_ids,
+ )
+ # TODO: don't poll unless necessary??
+ return self.poll(
+ batch.id,
+ vector_store_id=vector_store_id,
+ poll_interval_ms=poll_interval_ms,
+ )
+
+ def list_files(
+ self,
+ batch_id: str,
+ *,
+ vector_store_id: str,
+ after: str | NotGiven = NOT_GIVEN,
+ before: str | NotGiven = NOT_GIVEN,
+ filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SyncCursorPage[VectorStoreFile]:
+ """
+ Returns a list of vector store files in a batch.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ before: A cursor for use in pagination. `before` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include before=obj_foo in order to
+ fetch the previous page of the list.
+
+ filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not batch_id:
+ raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._get_api_list(
+ f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
+ page=SyncCursorPage[VectorStoreFile],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "before": before,
+ "filter": filter,
+ "limit": limit,
+ "order": order,
+ },
+ file_batch_list_files_params.FileBatchListFilesParams,
+ ),
+ ),
+ model=VectorStoreFile,
+ )
+
+ def poll(
+ self,
+ batch_id: str,
+ *,
+ vector_store_id: str,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """Wait for the given file batch to be processed.
+
+ Note: this will return even if one of the files failed to process, you need to
+ check batch.file_counts.failed_count to handle this case.
+ """
+ headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
+ if is_given(poll_interval_ms):
+ headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
+
+ while True:
+ response = self.with_raw_response.retrieve(
+ batch_id,
+ vector_store_id=vector_store_id,
+ extra_headers=headers,
+ )
+
+ batch = response.parse()
+ if batch.file_counts.in_progress > 0:
+ if not is_given(poll_interval_ms):
+ from_header = response.headers.get("openai-poll-after-ms")
+ if from_header is not None:
+ poll_interval_ms = int(from_header)
+ else:
+ poll_interval_ms = 1000
+
+ self._sleep(poll_interval_ms / 1000)
+ continue
+
+ return batch
+
+ def upload_and_poll(
+ self,
+ vector_store_id: str,
+ *,
+ files: Iterable[FileTypes],
+ max_concurrency: int = 5,
+ file_ids: List[str] = [],
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """Uploads the given files concurrently and then creates a vector store file batch.
+
+ If you've already uploaded certain files that you want to include in this batch
+ then you can pass their IDs through the `file_ids` argument.
+
+ By default, if any file upload fails then an exception will be eagerly raised.
+
+ The number of concurrency uploads is configurable using the `max_concurrency`
+ parameter.
+
+ Note: this method only supports `asyncio` or `trio` as the backing async
+ runtime.
+ """
+ results: list[FileObject] = []
+
+ with ThreadPoolExecutor(max_workers=max_concurrency) as executor:
+ futures: list[Future[FileObject]] = [
+ executor.submit(
+ self._client.files.create,
+ file=file,
+ purpose="assistants",
+ )
+ for file in files
+ ]
+
+ for future in as_completed(futures):
+ exc = future.exception()
+ if exc:
+ raise exc
+
+ results.append(future.result())
+
+ batch = self.create_and_poll(
+ vector_store_id=vector_store_id,
+ file_ids=[*file_ids, *(f.id for f in results)],
+ poll_interval_ms=poll_interval_ms,
+ )
+ return batch
+
+
+class AsyncFileBatches(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncFileBatchesWithRawResponse:
+ return AsyncFileBatchesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncFileBatchesWithStreamingResponse:
+ return AsyncFileBatchesWithStreamingResponse(self)
+
+ async def create(
+ self,
+ vector_store_id: str,
+ *,
+ file_ids: List[str],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """
+ Create a vector store file batch.
+
+ Args:
+ file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ the vector store should use. Useful for tools like `file_search` that can access
+ files.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return await self._post(
+ f"/vector_stores/{vector_store_id}/file_batches",
+ body=await async_maybe_transform({"file_ids": file_ids}, file_batch_create_params.FileBatchCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFileBatch,
+ )
+
+ async def retrieve(
+ self,
+ batch_id: str,
+ *,
+ vector_store_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """
+ Retrieves a vector store file batch.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not batch_id:
+ raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return await self._get(
+ f"/vector_stores/{vector_store_id}/file_batches/{batch_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFileBatch,
+ )
+
+ async def cancel(
+ self,
+ batch_id: str,
+ *,
+ vector_store_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """Cancel a vector store file batch.
+
+ This attempts to cancel the processing of
+ files in this batch as soon as possible.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not batch_id:
+ raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return await self._post(
+ f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFileBatch,
+ )
+
+ async def create_and_poll(
+ self,
+ vector_store_id: str,
+ *,
+ file_ids: List[str],
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """Create a vector store batch and poll until all files have been processed."""
+ batch = await self.create(
+ vector_store_id=vector_store_id,
+ file_ids=file_ids,
+ )
+ # TODO: don't poll unless necessary??
+ return await self.poll(
+ batch.id,
+ vector_store_id=vector_store_id,
+ poll_interval_ms=poll_interval_ms,
+ )
+
+ def list_files(
+ self,
+ batch_id: str,
+ *,
+ vector_store_id: str,
+ after: str | NotGiven = NOT_GIVEN,
+ before: str | NotGiven = NOT_GIVEN,
+ filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncPaginator[VectorStoreFile, AsyncCursorPage[VectorStoreFile]]:
+ """
+ Returns a list of vector store files in a batch.
+
+ Args:
+ after: A cursor for use in pagination. `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ before: A cursor for use in pagination. `before` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include before=obj_foo in order to
+ fetch the previous page of the list.
+
+ filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not batch_id:
+ raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._get_api_list(
+ f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
+ page=AsyncCursorPage[VectorStoreFile],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "before": before,
+ "filter": filter,
+ "limit": limit,
+ "order": order,
+ },
+ file_batch_list_files_params.FileBatchListFilesParams,
+ ),
+ ),
+ model=VectorStoreFile,
+ )
+
+ async def poll(
+ self,
+ batch_id: str,
+ *,
+ vector_store_id: str,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """Wait for the given file batch to be processed.
+
+ Note: this will return even if one of the files failed to process, you need to
+ check batch.file_counts.failed_count to handle this case.
+ """
+ headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
+ if is_given(poll_interval_ms):
+ headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
+
+ while True:
+ response = await self.with_raw_response.retrieve(
+ batch_id,
+ vector_store_id=vector_store_id,
+ extra_headers=headers,
+ )
+
+ batch = response.parse()
+ if batch.file_counts.in_progress > 0:
+ if not is_given(poll_interval_ms):
+ from_header = response.headers.get("openai-poll-after-ms")
+ if from_header is not None:
+ poll_interval_ms = int(from_header)
+ else:
+ poll_interval_ms = 1000
+
+ await self._sleep(poll_interval_ms / 1000)
+ continue
+
+ return batch
+
+ async def upload_and_poll(
+ self,
+ vector_store_id: str,
+ *,
+ files: Iterable[FileTypes],
+ max_concurrency: int = 5,
+ file_ids: List[str] = [],
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFileBatch:
+ """Uploads the given files concurrently and then creates a vector store file batch.
+
+ If you've already uploaded certain files that you want to include in this batch
+ then you can pass their IDs through the `file_ids` argument.
+
+ By default, if any file upload fails then an exception will be eagerly raised.
+
+ The number of concurrency uploads is configurable using the `max_concurrency`
+ parameter.
+
+ Note: this method only supports `asyncio` or `trio` as the backing async
+ runtime.
+ """
+ uploaded_files: list[FileObject] = []
+
+ async_library = sniffio.current_async_library()
+
+ if async_library == "asyncio":
+
+ async def asyncio_upload_file(semaphore: asyncio.Semaphore, file: FileTypes) -> None:
+ async with semaphore:
+ file_obj = await self._client.files.create(
+ file=file,
+ purpose="assistants",
+ )
+ uploaded_files.append(file_obj)
+
+ semaphore = asyncio.Semaphore(max_concurrency)
+
+ tasks = [asyncio_upload_file(semaphore, file) for file in files]
+
+ await asyncio.gather(*tasks)
+ elif async_library == "trio":
+ # We only import if the library is being used.
+ # We support Python 3.7 so are using an older version of trio that does not have type information
+ import trio # type: ignore # pyright: ignore[reportMissingTypeStubs]
+
+ async def trio_upload_file(limiter: trio.CapacityLimiter, file: FileTypes) -> None:
+ async with limiter:
+ file_obj = await self._client.files.create(
+ file=file,
+ purpose="assistants",
+ )
+ uploaded_files.append(file_obj)
+
+ limiter = trio.CapacityLimiter(max_concurrency)
+
+ async with trio.open_nursery() as nursery:
+ for file in files:
+ nursery.start_soon(trio_upload_file, limiter, file) # pyright: ignore [reportUnknownMemberType]
+ else:
+ raise RuntimeError(
+ f"Async runtime {async_library} is not supported yet. Only asyncio or trio is supported",
+ )
+
+ batch = await self.create_and_poll(
+ vector_store_id=vector_store_id,
+ file_ids=[*file_ids, *(f.id for f in uploaded_files)],
+ poll_interval_ms=poll_interval_ms,
+ )
+ return batch
+
+
+class FileBatchesWithRawResponse:
+ def __init__(self, file_batches: FileBatches) -> None:
+ self._file_batches = file_batches
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ file_batches.create,
+ )
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ file_batches.retrieve,
+ )
+ self.cancel = _legacy_response.to_raw_response_wrapper(
+ file_batches.cancel,
+ )
+ self.list_files = _legacy_response.to_raw_response_wrapper(
+ file_batches.list_files,
+ )
+
+
+class AsyncFileBatchesWithRawResponse:
+ def __init__(self, file_batches: AsyncFileBatches) -> None:
+ self._file_batches = file_batches
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ file_batches.create,
+ )
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ file_batches.retrieve,
+ )
+ self.cancel = _legacy_response.async_to_raw_response_wrapper(
+ file_batches.cancel,
+ )
+ self.list_files = _legacy_response.async_to_raw_response_wrapper(
+ file_batches.list_files,
+ )
+
+
+class FileBatchesWithStreamingResponse:
+ def __init__(self, file_batches: FileBatches) -> None:
+ self._file_batches = file_batches
+
+ self.create = to_streamed_response_wrapper(
+ file_batches.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ file_batches.retrieve,
+ )
+ self.cancel = to_streamed_response_wrapper(
+ file_batches.cancel,
+ )
+ self.list_files = to_streamed_response_wrapper(
+ file_batches.list_files,
+ )
+
+
+class AsyncFileBatchesWithStreamingResponse:
+ def __init__(self, file_batches: AsyncFileBatches) -> None:
+ self._file_batches = file_batches
+
+ self.create = async_to_streamed_response_wrapper(
+ file_batches.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ file_batches.retrieve,
+ )
+ self.cancel = async_to_streamed_response_wrapper(
+ file_batches.cancel,
+ )
+ self.list_files = async_to_streamed_response_wrapper(
+ file_batches.list_files,
+ )
diff --git a/src/openai/resources/beta/assistants/files.py b/src/openai/resources/beta/vector_stores/files.py
similarity index 57%
rename from src/openai/resources/beta/assistants/files.py
rename to src/openai/resources/beta/vector_stores/files.py
index dc57dfb96c..6404b9d54c 100644
--- a/src/openai/resources/beta/assistants/files.py
+++ b/src/openai/resources/beta/vector_stores/files.py
@@ -2,13 +2,15 @@
from __future__ import annotations
-from typing_extensions import Literal
+from typing import TYPE_CHECKING
+from typing_extensions import Literal, assert_never
import httpx
from .... import _legacy_response
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
from ...._utils import (
+ is_given,
maybe_transform,
async_maybe_transform,
)
@@ -20,7 +22,7 @@
AsyncPaginator,
make_request_options,
)
-from ....types.beta.assistants import AssistantFile, FileDeleteResponse, file_list_params, file_create_params
+from ....types.beta.vector_stores import VectorStoreFile, VectorStoreFileDeleted, file_list_params, file_create_params
__all__ = ["Files", "AsyncFiles"]
@@ -36,7 +38,7 @@ def with_streaming_response(self) -> FilesWithStreamingResponse:
def create(
self,
- assistant_id: str,
+ vector_store_id: str,
*,
file_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -45,16 +47,16 @@ def create(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantFile:
+ ) -> VectorStoreFile:
"""
- Create an assistant file by attaching a
- [File](https://platform.openai.com/docs/api-reference/files) to an
- [assistant](https://platform.openai.com/docs/api-reference/assistants).
+ Create a vector store file by attaching a
+ [File](https://platform.openai.com/docs/api-reference/files) to a
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object).
Args:
- file_id: A [File](https://platform.openai.com/docs/api-reference/files) ID (with
- `purpose="assistants"`) that the assistant should use. Useful for tools like
- `retrieval` and `code_interpreter` that can access files.
+ file_id: A [File](https://platform.openai.com/docs/api-reference/files) ID that the
+ vector store should use. Useful for tools like `file_search` that can access
+ files.
extra_headers: Send extra headers
@@ -64,32 +66,32 @@ def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
- if not assistant_id:
- raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
- f"/assistants/{assistant_id}/files",
+ f"/vector_stores/{vector_store_id}/files",
body=maybe_transform({"file_id": file_id}, file_create_params.FileCreateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=AssistantFile,
+ cast_to=VectorStoreFile,
)
def retrieve(
self,
file_id: str,
*,
- assistant_id: str,
+ vector_store_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantFile:
+ ) -> VectorStoreFile:
"""
- Retrieves an AssistantFile.
+ Retrieves a vector store file.
Args:
extra_headers: Send extra headers
@@ -100,25 +102,26 @@ def retrieve(
timeout: Override the client-level default timeout for this request, in seconds
"""
- if not assistant_id:
- raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
- f"/assistants/{assistant_id}/files/{file_id}",
+ f"/vector_stores/{vector_store_id}/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=AssistantFile,
+ cast_to=VectorStoreFile,
)
def list(
self,
- assistant_id: str,
+ vector_store_id: str,
*,
after: str | NotGiven = NOT_GIVEN,
before: str | NotGiven = NOT_GIVEN,
+ filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -127,9 +130,9 @@ def list(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> SyncCursorPage[AssistantFile]:
+ ) -> SyncCursorPage[VectorStoreFile]:
"""
- Returns a list of assistant files.
+ Returns a list of vector store files.
Args:
after: A cursor for use in pagination. `after` is an object ID that defines your place
@@ -142,6 +145,8 @@ def list(
ending with obj_foo, your subsequent call can include before=obj_foo in order to
fetch the previous page of the list.
+ filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
+
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
@@ -156,12 +161,12 @@ def list(
timeout: Override the client-level default timeout for this request, in seconds
"""
- if not assistant_id:
- raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
- f"/assistants/{assistant_id}/files",
- page=SyncCursorPage[AssistantFile],
+ f"/vector_stores/{vector_store_id}/files",
+ page=SyncCursorPage[VectorStoreFile],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -171,29 +176,34 @@ def list(
{
"after": after,
"before": before,
+ "filter": filter,
"limit": limit,
"order": order,
},
file_list_params.FileListParams,
),
),
- model=AssistantFile,
+ model=VectorStoreFile,
)
def delete(
self,
file_id: str,
*,
- assistant_id: str,
+ vector_store_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FileDeleteResponse:
- """
- Delete an assistant file.
+ ) -> VectorStoreFileDeleted:
+ """Delete a vector store file.
+
+ This will remove the file from the vector store but
+ the file itself will not be deleted. To delete the file, use the
+ [delete file](https://platform.openai.com/docs/api-reference/files/delete)
+ endpoint.
Args:
extra_headers: Send extra headers
@@ -204,17 +214,103 @@ def delete(
timeout: Override the client-level default timeout for this request, in seconds
"""
- if not assistant_id:
- raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._delete(
- f"/assistants/{assistant_id}/files/{file_id}",
+ f"/vector_stores/{vector_store_id}/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=FileDeleteResponse,
+ cast_to=VectorStoreFileDeleted,
+ )
+
+ def create_and_poll(
+ self,
+ file_id: str,
+ *,
+ vector_store_id: str,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFile:
+ """Attach a file to the given vector store and wait for it to be processed."""
+ self.create(vector_store_id=vector_store_id, file_id=file_id)
+
+ return self.poll(
+ file_id,
+ vector_store_id=vector_store_id,
+ poll_interval_ms=poll_interval_ms,
+ )
+
+ def poll(
+ self,
+ file_id: str,
+ *,
+ vector_store_id: str,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFile:
+ """Wait for the vector store file to finish processing.
+
+ Note: this will return even if the file failed to process, you need to check
+ file.last_error and file.status to handle these cases
+ """
+ headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
+ if is_given(poll_interval_ms):
+ headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
+
+ while True:
+ response = self.with_raw_response.retrieve(
+ file_id,
+ vector_store_id=vector_store_id,
+ extra_headers=headers,
+ )
+
+ file = response.parse()
+ if file.status == "in_progress":
+ if not is_given(poll_interval_ms):
+ from_header = response.headers.get("openai-poll-after-ms")
+ if from_header is not None:
+ poll_interval_ms = int(from_header)
+ else:
+ poll_interval_ms = 1000
+
+ self._sleep(poll_interval_ms / 1000)
+ elif file.status == "cancelled" or file.status == "completed" or file.status == "failed":
+ return file
+ else:
+ if TYPE_CHECKING: # type: ignore[unreachable]
+ assert_never(file.status)
+ else:
+ return file
+
+ def upload(
+ self,
+ *,
+ vector_store_id: str,
+ file: FileTypes,
+ ) -> VectorStoreFile:
+ """Upload a file to the `files` API and then attach it to the given vector store.
+
+ Note the file will be asynchronously processed (you can use the alternative
+ polling helper method to wait for processing to complete).
+ """
+ file_obj = self._client.files.create(file=file, purpose="assistants")
+ return self.create(vector_store_id=vector_store_id, file_id=file_obj.id)
+
+ def upload_and_poll(
+ self,
+ *,
+ vector_store_id: str,
+ file: FileTypes,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFile:
+ """Add a file to a vector store and poll until processing is complete."""
+ file_obj = self._client.files.create(file=file, purpose="assistants")
+ return self.create_and_poll(
+ vector_store_id=vector_store_id,
+ file_id=file_obj.id,
+ poll_interval_ms=poll_interval_ms,
)
@@ -229,7 +325,7 @@ def with_streaming_response(self) -> AsyncFilesWithStreamingResponse:
async def create(
self,
- assistant_id: str,
+ vector_store_id: str,
*,
file_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -238,16 +334,16 @@ async def create(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantFile:
+ ) -> VectorStoreFile:
"""
- Create an assistant file by attaching a
- [File](https://platform.openai.com/docs/api-reference/files) to an
- [assistant](https://platform.openai.com/docs/api-reference/assistants).
+ Create a vector store file by attaching a
+ [File](https://platform.openai.com/docs/api-reference/files) to a
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object).
Args:
- file_id: A [File](https://platform.openai.com/docs/api-reference/files) ID (with
- `purpose="assistants"`) that the assistant should use. Useful for tools like
- `retrieval` and `code_interpreter` that can access files.
+ file_id: A [File](https://platform.openai.com/docs/api-reference/files) ID that the
+ vector store should use. Useful for tools like `file_search` that can access
+ files.
extra_headers: Send extra headers
@@ -257,32 +353,32 @@ async def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
- if not assistant_id:
- raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
- f"/assistants/{assistant_id}/files",
+ f"/vector_stores/{vector_store_id}/files",
body=await async_maybe_transform({"file_id": file_id}, file_create_params.FileCreateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=AssistantFile,
+ cast_to=VectorStoreFile,
)
async def retrieve(
self,
file_id: str,
*,
- assistant_id: str,
+ vector_store_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantFile:
+ ) -> VectorStoreFile:
"""
- Retrieves an AssistantFile.
+ Retrieves a vector store file.
Args:
extra_headers: Send extra headers
@@ -293,25 +389,26 @@ async def retrieve(
timeout: Override the client-level default timeout for this request, in seconds
"""
- if not assistant_id:
- raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._get(
- f"/assistants/{assistant_id}/files/{file_id}",
+ f"/vector_stores/{vector_store_id}/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=AssistantFile,
+ cast_to=VectorStoreFile,
)
def list(
self,
- assistant_id: str,
+ vector_store_id: str,
*,
after: str | NotGiven = NOT_GIVEN,
before: str | NotGiven = NOT_GIVEN,
+ filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -320,9 +417,9 @@ def list(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AsyncPaginator[AssistantFile, AsyncCursorPage[AssistantFile]]:
+ ) -> AsyncPaginator[VectorStoreFile, AsyncCursorPage[VectorStoreFile]]:
"""
- Returns a list of assistant files.
+ Returns a list of vector store files.
Args:
after: A cursor for use in pagination. `after` is an object ID that defines your place
@@ -335,6 +432,8 @@ def list(
ending with obj_foo, your subsequent call can include before=obj_foo in order to
fetch the previous page of the list.
+ filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
+
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
@@ -349,12 +448,12 @@ def list(
timeout: Override the client-level default timeout for this request, in seconds
"""
- if not assistant_id:
- raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
- f"/assistants/{assistant_id}/files",
- page=AsyncCursorPage[AssistantFile],
+ f"/vector_stores/{vector_store_id}/files",
+ page=AsyncCursorPage[VectorStoreFile],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -364,29 +463,34 @@ def list(
{
"after": after,
"before": before,
+ "filter": filter,
"limit": limit,
"order": order,
},
file_list_params.FileListParams,
),
),
- model=AssistantFile,
+ model=VectorStoreFile,
)
async def delete(
self,
file_id: str,
*,
- assistant_id: str,
+ vector_store_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FileDeleteResponse:
- """
- Delete an assistant file.
+ ) -> VectorStoreFileDeleted:
+ """Delete a vector store file.
+
+ This will remove the file from the vector store but
+ the file itself will not be deleted. To delete the file, use the
+ [delete file](https://platform.openai.com/docs/api-reference/files/delete)
+ endpoint.
Args:
extra_headers: Send extra headers
@@ -397,17 +501,103 @@ async def delete(
timeout: Override the client-level default timeout for this request, in seconds
"""
- if not assistant_id:
- raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})}
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._delete(
- f"/assistants/{assistant_id}/files/{file_id}",
+ f"/vector_stores/{vector_store_id}/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=FileDeleteResponse,
+ cast_to=VectorStoreFileDeleted,
+ )
+
+ async def create_and_poll(
+ self,
+ file_id: str,
+ *,
+ vector_store_id: str,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFile:
+ """Attach a file to the given vector store and wait for it to be processed."""
+ await self.create(vector_store_id=vector_store_id, file_id=file_id)
+
+ return await self.poll(
+ file_id,
+ vector_store_id=vector_store_id,
+ poll_interval_ms=poll_interval_ms,
+ )
+
+ async def poll(
+ self,
+ file_id: str,
+ *,
+ vector_store_id: str,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFile:
+ """Wait for the vector store file to finish processing.
+
+ Note: this will return even if the file failed to process, you need to check
+ file.last_error and file.status to handle these cases
+ """
+ headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
+ if is_given(poll_interval_ms):
+ headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
+
+ while True:
+ response = await self.with_raw_response.retrieve(
+ file_id,
+ vector_store_id=vector_store_id,
+ extra_headers=headers,
+ )
+
+ file = response.parse()
+ if file.status == "in_progress":
+ if not is_given(poll_interval_ms):
+ from_header = response.headers.get("openai-poll-after-ms")
+ if from_header is not None:
+ poll_interval_ms = int(from_header)
+ else:
+ poll_interval_ms = 1000
+
+ await self._sleep(poll_interval_ms / 1000)
+ elif file.status == "cancelled" or file.status == "completed" or file.status == "failed":
+ return file
+ else:
+ if TYPE_CHECKING: # type: ignore[unreachable]
+ assert_never(file.status)
+ else:
+ return file
+
+ async def upload(
+ self,
+ *,
+ vector_store_id: str,
+ file: FileTypes,
+ ) -> VectorStoreFile:
+ """Upload a file to the `files` API and then attach it to the given vector store.
+
+ Note the file will be asynchronously processed (you can use the alternative
+ polling helper method to wait for processing to complete).
+ """
+ file_obj = await self._client.files.create(file=file, purpose="assistants")
+ return await self.create(vector_store_id=vector_store_id, file_id=file_obj.id)
+
+ async def upload_and_poll(
+ self,
+ *,
+ vector_store_id: str,
+ file: FileTypes,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreFile:
+ """Add a file to a vector store and poll until processing is complete."""
+ file_obj = await self._client.files.create(file=file, purpose="assistants")
+ return await self.create_and_poll(
+ vector_store_id=vector_store_id,
+ file_id=file_obj.id,
+ poll_interval_ms=poll_interval_ms,
)
diff --git a/src/openai/resources/beta/vector_stores/vector_stores.py b/src/openai/resources/beta/vector_stores/vector_stores.py
new file mode 100644
index 0000000000..6e2c9ab70c
--- /dev/null
+++ b/src/openai/resources/beta/vector_stores/vector_stores.py
@@ -0,0 +1,688 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+import httpx
+
+from .... import _legacy_response
+from .files import (
+ Files,
+ AsyncFiles,
+ FilesWithRawResponse,
+ AsyncFilesWithRawResponse,
+ FilesWithStreamingResponse,
+ AsyncFilesWithStreamingResponse,
+)
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._utils import (
+ maybe_transform,
+ async_maybe_transform,
+)
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .file_batches import (
+ FileBatches,
+ AsyncFileBatches,
+ FileBatchesWithRawResponse,
+ AsyncFileBatchesWithRawResponse,
+ FileBatchesWithStreamingResponse,
+ AsyncFileBatchesWithStreamingResponse,
+)
+from ....pagination import SyncCursorPage, AsyncCursorPage
+from ....types.beta import (
+ VectorStore,
+ VectorStoreDeleted,
+ vector_store_list_params,
+ vector_store_create_params,
+ vector_store_update_params,
+)
+from ...._base_client import (
+ AsyncPaginator,
+ make_request_options,
+)
+
+__all__ = ["VectorStores", "AsyncVectorStores"]
+
+
+class VectorStores(SyncAPIResource):
+ @cached_property
+ def files(self) -> Files:
+ return Files(self._client)
+
+ @cached_property
+ def file_batches(self) -> FileBatches:
+ return FileBatches(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> VectorStoresWithRawResponse:
+ return VectorStoresWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> VectorStoresWithStreamingResponse:
+ return VectorStoresWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
+ file_ids: List[str] | NotGiven = NOT_GIVEN,
+ metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStore:
+ """
+ Create a vector store.
+
+ Args:
+ expires_after: The expiration policy for a vector store.
+
+ file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ the vector store should use. Useful for tools like `file_search` that can access
+ files.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format. Keys
+ can be a maximum of 64 characters long and values can be a maxium of 512
+ characters long.
+
+ name: The name of the vector store.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._post(
+ "/vector_stores",
+ body=maybe_transform(
+ {
+ "expires_after": expires_after,
+ "file_ids": file_ids,
+ "metadata": metadata,
+ "name": name,
+ },
+ vector_store_create_params.VectorStoreCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStore,
+ )
+
+ def retrieve(
+ self,
+ vector_store_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStore:
+ """
+ Retrieves a vector store.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._get(
+ f"/vector_stores/{vector_store_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStore,
+ )
+
+ def update(
+ self,
+ vector_store_id: str,
+ *,
+ expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN,
+ metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ name: Optional[str] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStore:
+ """
+ Modifies a vector store.
+
+ Args:
+ expires_after: The expiration policy for a vector store.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format. Keys
+ can be a maximum of 64 characters long and values can be a maxium of 512
+ characters long.
+
+ name: The name of the vector store.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._post(
+ f"/vector_stores/{vector_store_id}",
+ body=maybe_transform(
+ {
+ "expires_after": expires_after,
+ "metadata": metadata,
+ "name": name,
+ },
+ vector_store_update_params.VectorStoreUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStore,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | NotGiven = NOT_GIVEN,
+ before: str | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SyncCursorPage[VectorStore]:
+ """Returns a list of vector stores.
+
+ Args:
+ after: A cursor for use in pagination.
+
+ `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ before: A cursor for use in pagination. `before` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include before=obj_foo in order to
+ fetch the previous page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._get_api_list(
+ "/vector_stores",
+ page=SyncCursorPage[VectorStore],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "before": before,
+ "limit": limit,
+ "order": order,
+ },
+ vector_store_list_params.VectorStoreListParams,
+ ),
+ ),
+ model=VectorStore,
+ )
+
+ def delete(
+ self,
+ vector_store_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreDeleted:
+ """
+ Delete a vector store.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._delete(
+ f"/vector_stores/{vector_store_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreDeleted,
+ )
+
+
+class AsyncVectorStores(AsyncAPIResource):
+ @cached_property
+ def files(self) -> AsyncFiles:
+ return AsyncFiles(self._client)
+
+ @cached_property
+ def file_batches(self) -> AsyncFileBatches:
+ return AsyncFileBatches(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncVectorStoresWithRawResponse:
+ return AsyncVectorStoresWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncVectorStoresWithStreamingResponse:
+ return AsyncVectorStoresWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
+ file_ids: List[str] | NotGiven = NOT_GIVEN,
+ metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStore:
+ """
+ Create a vector store.
+
+ Args:
+ expires_after: The expiration policy for a vector store.
+
+ file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ the vector store should use. Useful for tools like `file_search` that can access
+ files.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format. Keys
+ can be a maximum of 64 characters long and values can be a maxium of 512
+ characters long.
+
+ name: The name of the vector store.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return await self._post(
+ "/vector_stores",
+ body=await async_maybe_transform(
+ {
+ "expires_after": expires_after,
+ "file_ids": file_ids,
+ "metadata": metadata,
+ "name": name,
+ },
+ vector_store_create_params.VectorStoreCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStore,
+ )
+
+ async def retrieve(
+ self,
+ vector_store_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStore:
+ """
+ Retrieves a vector store.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return await self._get(
+ f"/vector_stores/{vector_store_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStore,
+ )
+
+ async def update(
+ self,
+ vector_store_id: str,
+ *,
+ expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN,
+ metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ name: Optional[str] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStore:
+ """
+ Modifies a vector store.
+
+ Args:
+ expires_after: The expiration policy for a vector store.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format. Keys
+ can be a maximum of 64 characters long and values can be a maxium of 512
+ characters long.
+
+ name: The name of the vector store.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return await self._post(
+ f"/vector_stores/{vector_store_id}",
+ body=await async_maybe_transform(
+ {
+ "expires_after": expires_after,
+ "metadata": metadata,
+ "name": name,
+ },
+ vector_store_update_params.VectorStoreUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStore,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | NotGiven = NOT_GIVEN,
+ before: str | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncPaginator[VectorStore, AsyncCursorPage[VectorStore]]:
+ """Returns a list of vector stores.
+
+ Args:
+ after: A cursor for use in pagination.
+
+ `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ before: A cursor for use in pagination. `before` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include before=obj_foo in order to
+ fetch the previous page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return self._get_api_list(
+ "/vector_stores",
+ page=AsyncCursorPage[VectorStore],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "before": before,
+ "limit": limit,
+ "order": order,
+ },
+ vector_store_list_params.VectorStoreListParams,
+ ),
+ ),
+ model=VectorStore,
+ )
+
+ async def delete(
+ self,
+ vector_store_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> VectorStoreDeleted:
+ """
+ Delete a vector store.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+ return await self._delete(
+ f"/vector_stores/{vector_store_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreDeleted,
+ )
+
+
+class VectorStoresWithRawResponse:
+ def __init__(self, vector_stores: VectorStores) -> None:
+ self._vector_stores = vector_stores
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ vector_stores.create,
+ )
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ vector_stores.retrieve,
+ )
+ self.update = _legacy_response.to_raw_response_wrapper(
+ vector_stores.update,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ vector_stores.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ vector_stores.delete,
+ )
+
+ @cached_property
+ def files(self) -> FilesWithRawResponse:
+ return FilesWithRawResponse(self._vector_stores.files)
+
+ @cached_property
+ def file_batches(self) -> FileBatchesWithRawResponse:
+ return FileBatchesWithRawResponse(self._vector_stores.file_batches)
+
+
+class AsyncVectorStoresWithRawResponse:
+ def __init__(self, vector_stores: AsyncVectorStores) -> None:
+ self._vector_stores = vector_stores
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ vector_stores.create,
+ )
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ vector_stores.retrieve,
+ )
+ self.update = _legacy_response.async_to_raw_response_wrapper(
+ vector_stores.update,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ vector_stores.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ vector_stores.delete,
+ )
+
+ @cached_property
+ def files(self) -> AsyncFilesWithRawResponse:
+ return AsyncFilesWithRawResponse(self._vector_stores.files)
+
+ @cached_property
+ def file_batches(self) -> AsyncFileBatchesWithRawResponse:
+ return AsyncFileBatchesWithRawResponse(self._vector_stores.file_batches)
+
+
+class VectorStoresWithStreamingResponse:
+ def __init__(self, vector_stores: VectorStores) -> None:
+ self._vector_stores = vector_stores
+
+ self.create = to_streamed_response_wrapper(
+ vector_stores.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ vector_stores.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ vector_stores.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ vector_stores.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ vector_stores.delete,
+ )
+
+ @cached_property
+ def files(self) -> FilesWithStreamingResponse:
+ return FilesWithStreamingResponse(self._vector_stores.files)
+
+ @cached_property
+ def file_batches(self) -> FileBatchesWithStreamingResponse:
+ return FileBatchesWithStreamingResponse(self._vector_stores.file_batches)
+
+
+class AsyncVectorStoresWithStreamingResponse:
+ def __init__(self, vector_stores: AsyncVectorStores) -> None:
+ self._vector_stores = vector_stores
+
+ self.create = async_to_streamed_response_wrapper(
+ vector_stores.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ vector_stores.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ vector_stores.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ vector_stores.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ vector_stores.delete,
+ )
+
+ @cached_property
+ def files(self) -> AsyncFilesWithStreamingResponse:
+ return AsyncFilesWithStreamingResponse(self._vector_stores.files)
+
+ @cached_property
+ def file_batches(self) -> AsyncFileBatchesWithStreamingResponse:
+ return AsyncFileBatchesWithStreamingResponse(self._vector_stores.file_batches)
diff --git a/src/openai/resources/fine_tuning/jobs/jobs.py b/src/openai/resources/fine_tuning/jobs/jobs.py
index 229f716c48..8e49571b14 100644
--- a/src/openai/resources/fine_tuning/jobs/jobs.py
+++ b/src/openai/resources/fine_tuning/jobs/jobs.py
@@ -85,7 +85,7 @@ def create(
training_file: The ID of an uploaded file that contains training data.
- See [upload file](https://platform.openai.com/docs/api-reference/files/upload)
+ See [upload file](https://platform.openai.com/docs/api-reference/files/create)
for how to upload a file.
Your dataset must be formatted as a JSONL file. Additionally, you must upload
@@ -360,7 +360,7 @@ async def create(
training_file: The ID of an uploaded file that contains training data.
- See [upload file](https://platform.openai.com/docs/api-reference/files/upload)
+ See [upload file](https://platform.openai.com/docs/api-reference/files/create)
for how to upload a file.
Your dataset must be formatted as a JSONL file. Additionally, you must upload
diff --git a/src/openai/types/beta/__init__.py b/src/openai/types/beta/__init__.py
index 0171694587..d851a3619c 100644
--- a/src/openai/types/beta/__init__.py
+++ b/src/openai/types/beta/__init__.py
@@ -4,23 +4,28 @@
from .thread import Thread as Thread
from .assistant import Assistant as Assistant
+from .vector_store import VectorStore as VectorStore
from .function_tool import FunctionTool as FunctionTool
from .assistant_tool import AssistantTool as AssistantTool
-from .retrieval_tool import RetrievalTool as RetrievalTool
from .thread_deleted import ThreadDeleted as ThreadDeleted
+from .file_search_tool import FileSearchTool as FileSearchTool
from .assistant_deleted import AssistantDeleted as AssistantDeleted
from .function_tool_param import FunctionToolParam as FunctionToolParam
from .assistant_tool_param import AssistantToolParam as AssistantToolParam
-from .retrieval_tool_param import RetrievalToolParam as RetrievalToolParam
from .thread_create_params import ThreadCreateParams as ThreadCreateParams
from .thread_update_params import ThreadUpdateParams as ThreadUpdateParams
+from .vector_store_deleted import VectorStoreDeleted as VectorStoreDeleted
from .assistant_list_params import AssistantListParams as AssistantListParams
from .assistant_tool_choice import AssistantToolChoice as AssistantToolChoice
from .code_interpreter_tool import CodeInterpreterTool as CodeInterpreterTool
from .assistant_stream_event import AssistantStreamEvent as AssistantStreamEvent
+from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam
from .assistant_create_params import AssistantCreateParams as AssistantCreateParams
from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams
+from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams
from .assistant_response_format import AssistantResponseFormat as AssistantResponseFormat
+from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams
+from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams
from .assistant_tool_choice_param import AssistantToolChoiceParam as AssistantToolChoiceParam
from .code_interpreter_tool_param import CodeInterpreterToolParam as CodeInterpreterToolParam
from .assistant_tool_choice_option import AssistantToolChoiceOption as AssistantToolChoiceOption
diff --git a/src/openai/types/beta/assistant.py b/src/openai/types/beta/assistant.py
index 0a0d28ed01..fa09efb0cc 100644
--- a/src/openai/types/beta/assistant.py
+++ b/src/openai/types/beta/assistant.py
@@ -6,7 +6,32 @@
from ..._models import BaseModel
from .assistant_tool import AssistantTool
-__all__ = ["Assistant"]
+__all__ = ["Assistant", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
+
+
+class ToolResourcesCodeInterpreter(BaseModel):
+ file_ids: Optional[List[str]] = None
+ """
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ available to the `code_interpreter`` tool. There can be a maximum of 20 files
+ associated with the tool.
+ """
+
+
+class ToolResourcesFileSearch(BaseModel):
+ vector_store_ids: Optional[List[str]] = None
+ """
+ The ID of the
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ attached to this assistant. There can be a maximum of 1 vector store attached to
+ the assistant.
+ """
+
+
+class ToolResources(BaseModel):
+ code_interpreter: Optional[ToolResourcesCodeInterpreter] = None
+
+ file_search: Optional[ToolResourcesFileSearch] = None
class Assistant(BaseModel):
@@ -19,13 +44,6 @@ class Assistant(BaseModel):
description: Optional[str] = None
"""The description of the assistant. The maximum length is 512 characters."""
- file_ids: List[str]
- """
- A list of [file](https://platform.openai.com/docs/api-reference/files) IDs
- attached to this assistant. There can be a maximum of 20 files attached to the
- assistant. Files are ordered by their creation date in ascending order.
- """
-
instructions: Optional[str] = None
"""The system instructions that the assistant uses.
@@ -60,5 +78,13 @@ class Assistant(BaseModel):
"""A list of tool enabled on the assistant.
There can be a maximum of 128 tools per assistant. Tools can be of types
- `code_interpreter`, `retrieval`, or `function`.
+ `code_interpreter`, `file_search`, or `function`.
+ """
+
+ tool_resources: Optional[ToolResources] = None
+ """A set of resources that are used by the assistant's tools.
+
+ The resources are specific to the type of tool. For example, the
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ tool requires a list of vector store IDs.
"""
diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py
index 011121485f..925b85050f 100644
--- a/src/openai/types/beta/assistant_create_params.py
+++ b/src/openai/types/beta/assistant_create_params.py
@@ -6,8 +6,15 @@
from typing_extensions import Literal, Required, TypedDict
from .assistant_tool_param import AssistantToolParam
+from .assistant_response_format_option_param import AssistantResponseFormatOptionParam
-__all__ = ["AssistantCreateParams"]
+__all__ = [
+ "AssistantCreateParams",
+ "ToolResources",
+ "ToolResourcesCodeInterpreter",
+ "ToolResourcesFileSearch",
+ "ToolResourcesFileSearchVectorStore",
+]
class AssistantCreateParams(TypedDict, total=False):
@@ -48,13 +55,6 @@ class AssistantCreateParams(TypedDict, total=False):
description: Optional[str]
"""The description of the assistant. The maximum length is 512 characters."""
- file_ids: List[str]
- """
- A list of [file](https://platform.openai.com/docs/api-reference/files) IDs
- attached to this assistant. There can be a maximum of 20 files attached to the
- assistant. Files are ordered by their creation date in ascending order.
- """
-
instructions: Optional[str]
"""The system instructions that the assistant uses.
@@ -72,9 +72,102 @@ class AssistantCreateParams(TypedDict, total=False):
name: Optional[str]
"""The name of the assistant. The maximum length is 256 characters."""
+ response_format: Optional[AssistantResponseFormatOptionParam]
+ """Specifies the format that the model must output.
+
+ Compatible with
+ [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+
+ Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+ message the model generates is valid JSON.
+
+ **Important:** when using JSON mode, you **must** also instruct the model to
+ produce JSON yourself via a system or user message. Without this, the model may
+ generate an unending stream of whitespace until the generation reaches the token
+ limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ the message content may be partially cut off if `finish_reason="length"`, which
+ indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ max context length.
+ """
+
+ temperature: Optional[float]
+ """What sampling temperature to use, between 0 and 2.
+
+ Higher values like 0.8 will make the output more random, while lower values like
+ 0.2 will make it more focused and deterministic.
+ """
+
+ tool_resources: Optional[ToolResources]
+ """A set of resources that are used by the assistant's tools.
+
+ The resources are specific to the type of tool. For example, the
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ tool requires a list of vector store IDs.
+ """
+
tools: Iterable[AssistantToolParam]
"""A list of tool enabled on the assistant.
There can be a maximum of 128 tools per assistant. Tools can be of types
- `code_interpreter`, `retrieval`, or `function`.
+ `code_interpreter`, `file_search`, or `function`.
+ """
+
+ top_p: Optional[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or temperature but not both.
+ """
+
+
+class ToolResourcesCodeInterpreter(TypedDict, total=False):
+ file_ids: List[str]
+ """
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ available to the `code_interpreter` tool. There can be a maximum of 20 files
+ associated with the tool.
+ """
+
+
+class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
+ file_ids: List[str]
+ """
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
+ add to the vector store. There can be a maximum of 10000 files in a vector
+ store.
+ """
+
+ metadata: object
+ """Set of 16 key-value pairs that can be attached to a vector store.
+
+ This can be useful for storing additional information about the vector store in
+ a structured format. Keys can be a maximum of 64 characters long and values can
+ be a maxium of 512 characters long.
"""
+
+
+class ToolResourcesFileSearch(TypedDict, total=False):
+ vector_store_ids: List[str]
+ """
+ The
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ attached to this assistant. There can be a maximum of 1 vector store attached to
+ the assistant.
+ """
+
+ vector_stores: Iterable[ToolResourcesFileSearchVectorStore]
+ """
+ A helper to create a
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ with file_ids and attach it to this assistant. There can be a maximum of 1
+ vector store attached to the assistant.
+ """
+
+
+class ToolResources(TypedDict, total=False):
+ code_interpreter: ToolResourcesCodeInterpreter
+
+ file_search: ToolResourcesFileSearch
diff --git a/src/openai/types/beta/assistant_tool.py b/src/openai/types/beta/assistant_tool.py
index a4420385e8..7832da48cc 100644
--- a/src/openai/types/beta/assistant_tool.py
+++ b/src/openai/types/beta/assistant_tool.py
@@ -5,9 +5,9 @@
from ..._utils import PropertyInfo
from .function_tool import FunctionTool
-from .retrieval_tool import RetrievalTool
+from .file_search_tool import FileSearchTool
from .code_interpreter_tool import CodeInterpreterTool
__all__ = ["AssistantTool"]
-AssistantTool = Annotated[Union[CodeInterpreterTool, RetrievalTool, FunctionTool], PropertyInfo(discriminator="type")]
+AssistantTool = Annotated[Union[CodeInterpreterTool, FileSearchTool, FunctionTool], PropertyInfo(discriminator="type")]
diff --git a/src/openai/types/beta/assistant_tool_choice.py b/src/openai/types/beta/assistant_tool_choice.py
index 4314d4b41e..d73439f006 100644
--- a/src/openai/types/beta/assistant_tool_choice.py
+++ b/src/openai/types/beta/assistant_tool_choice.py
@@ -10,7 +10,7 @@
class AssistantToolChoice(BaseModel):
- type: Literal["function", "code_interpreter", "retrieval"]
+ type: Literal["function", "code_interpreter", "file_search"]
"""The type of the tool. If type is `function`, the function name must be set"""
function: Optional[AssistantToolChoiceFunction] = None
diff --git a/src/openai/types/beta/assistant_tool_choice_param.py b/src/openai/types/beta/assistant_tool_choice_param.py
index 5cf6ea27be..904f489e26 100644
--- a/src/openai/types/beta/assistant_tool_choice_param.py
+++ b/src/openai/types/beta/assistant_tool_choice_param.py
@@ -10,7 +10,7 @@
class AssistantToolChoiceParam(TypedDict, total=False):
- type: Required[Literal["function", "code_interpreter", "retrieval"]]
+ type: Required[Literal["function", "code_interpreter", "file_search"]]
"""The type of the tool. If type is `function`, the function name must be set"""
function: AssistantToolChoiceFunctionParam
diff --git a/src/openai/types/beta/assistant_tool_param.py b/src/openai/types/beta/assistant_tool_param.py
index d5758f169e..5b1d30ba2f 100644
--- a/src/openai/types/beta/assistant_tool_param.py
+++ b/src/openai/types/beta/assistant_tool_param.py
@@ -5,9 +5,9 @@
from typing import Union
from .function_tool_param import FunctionToolParam
-from .retrieval_tool_param import RetrievalToolParam
+from .file_search_tool_param import FileSearchToolParam
from .code_interpreter_tool_param import CodeInterpreterToolParam
__all__ = ["AssistantToolParam"]
-AssistantToolParam = Union[CodeInterpreterToolParam, RetrievalToolParam, FunctionToolParam]
+AssistantToolParam = Union[CodeInterpreterToolParam, FileSearchToolParam, FunctionToolParam]
diff --git a/src/openai/types/beta/assistant_update_params.py b/src/openai/types/beta/assistant_update_params.py
index 6e9d9ed5db..1354b078a8 100644
--- a/src/openai/types/beta/assistant_update_params.py
+++ b/src/openai/types/beta/assistant_update_params.py
@@ -6,23 +6,15 @@
from typing_extensions import TypedDict
from .assistant_tool_param import AssistantToolParam
+from .assistant_response_format_option_param import AssistantResponseFormatOptionParam
-__all__ = ["AssistantUpdateParams"]
+__all__ = ["AssistantUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
class AssistantUpdateParams(TypedDict, total=False):
description: Optional[str]
"""The description of the assistant. The maximum length is 512 characters."""
- file_ids: List[str]
- """
- A list of [File](https://platform.openai.com/docs/api-reference/files) IDs
- attached to this assistant. There can be a maximum of 20 files attached to the
- assistant. Files are ordered by their creation date in ascending order. If a
- file was previously attached to the list but does not show up in the list, it
- will be deleted from the assistant.
- """
-
instructions: Optional[str]
"""The system instructions that the assistant uses.
@@ -50,9 +42,78 @@ class AssistantUpdateParams(TypedDict, total=False):
name: Optional[str]
"""The name of the assistant. The maximum length is 256 characters."""
+ response_format: Optional[AssistantResponseFormatOptionParam]
+ """Specifies the format that the model must output.
+
+ Compatible with
+ [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+
+ Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+ message the model generates is valid JSON.
+
+ **Important:** when using JSON mode, you **must** also instruct the model to
+ produce JSON yourself via a system or user message. Without this, the model may
+ generate an unending stream of whitespace until the generation reaches the token
+ limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ the message content may be partially cut off if `finish_reason="length"`, which
+ indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ max context length.
+ """
+
+ temperature: Optional[float]
+ """What sampling temperature to use, between 0 and 2.
+
+ Higher values like 0.8 will make the output more random, while lower values like
+ 0.2 will make it more focused and deterministic.
+ """
+
+ tool_resources: Optional[ToolResources]
+ """A set of resources that are used by the assistant's tools.
+
+ The resources are specific to the type of tool. For example, the
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ tool requires a list of vector store IDs.
+ """
+
tools: Iterable[AssistantToolParam]
"""A list of tool enabled on the assistant.
There can be a maximum of 128 tools per assistant. Tools can be of types
- `code_interpreter`, `retrieval`, or `function`.
+ `code_interpreter`, `file_search`, or `function`.
"""
+
+ top_p: Optional[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or temperature but not both.
+ """
+
+
+class ToolResourcesCodeInterpreter(TypedDict, total=False):
+ file_ids: List[str]
+ """
+ Overrides the list of
+ [file](https://platform.openai.com/docs/api-reference/files) IDs made available
+ to the `code_interpreter` tool. There can be a maximum of 20 files associated
+ with the tool.
+ """
+
+
+class ToolResourcesFileSearch(TypedDict, total=False):
+ vector_store_ids: List[str]
+ """
+ Overrides the
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ attached to this assistant. There can be a maximum of 1 vector store attached to
+ the assistant.
+ """
+
+
+class ToolResources(TypedDict, total=False):
+ code_interpreter: ToolResourcesCodeInterpreter
+
+ file_search: ToolResourcesFileSearch
diff --git a/src/openai/types/beta/assistants/__init__.py b/src/openai/types/beta/assistants/__init__.py
deleted file mode 100644
index d4dd2de018..0000000000
--- a/src/openai/types/beta/assistants/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .assistant_file import AssistantFile as AssistantFile
-from .file_list_params import FileListParams as FileListParams
-from .file_create_params import FileCreateParams as FileCreateParams
-from .file_delete_response import FileDeleteResponse as FileDeleteResponse
diff --git a/src/openai/types/beta/assistants/assistant_file.py b/src/openai/types/beta/assistants/assistant_file.py
deleted file mode 100644
index 25aec07b49..0000000000
--- a/src/openai/types/beta/assistants/assistant_file.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["AssistantFile"]
-
-
-class AssistantFile(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints."""
-
- assistant_id: str
- """The assistant ID that the file is attached to."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the assistant file was created."""
-
- object: Literal["assistant.file"]
- """The object type, which is always `assistant.file`."""
diff --git a/src/openai/types/beta/file_search_tool.py b/src/openai/types/beta/file_search_tool.py
new file mode 100644
index 0000000000..eea55ea6ac
--- /dev/null
+++ b/src/openai/types/beta/file_search_tool.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["FileSearchTool"]
+
+
+class FileSearchTool(BaseModel):
+ type: Literal["file_search"]
+ """The type of tool being defined: `file_search`"""
diff --git a/src/openai/types/beta/retrieval_tool_param.py b/src/openai/types/beta/file_search_tool_param.py
similarity index 50%
rename from src/openai/types/beta/retrieval_tool_param.py
rename to src/openai/types/beta/file_search_tool_param.py
index d76c0beefc..d33fd06da4 100644
--- a/src/openai/types/beta/retrieval_tool_param.py
+++ b/src/openai/types/beta/file_search_tool_param.py
@@ -4,9 +4,9 @@
from typing_extensions import Literal, Required, TypedDict
-__all__ = ["RetrievalToolParam"]
+__all__ = ["FileSearchToolParam"]
-class RetrievalToolParam(TypedDict, total=False):
- type: Required[Literal["retrieval"]]
- """The type of tool being defined: `retrieval`"""
+class FileSearchToolParam(TypedDict, total=False):
+ type: Required[Literal["file_search"]]
+ """The type of tool being defined: `file_search`"""
diff --git a/src/openai/types/beta/thread.py b/src/openai/types/beta/thread.py
index 8fd1423068..6f7a6c7d0c 100644
--- a/src/openai/types/beta/thread.py
+++ b/src/openai/types/beta/thread.py
@@ -1,11 +1,36 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Optional
+from typing import List, Optional
from typing_extensions import Literal
from ..._models import BaseModel
-__all__ = ["Thread"]
+__all__ = ["Thread", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
+
+
+class ToolResourcesCodeInterpreter(BaseModel):
+ file_ids: Optional[List[str]] = None
+ """
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ available to the `code_interpreter` tool. There can be a maximum of 20 files
+ associated with the tool.
+ """
+
+
+class ToolResourcesFileSearch(BaseModel):
+ vector_store_ids: Optional[List[str]] = None
+ """
+ The
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ attached to this thread. There can be a maximum of 1 vector store attached to
+ the thread.
+ """
+
+
+class ToolResources(BaseModel):
+ code_interpreter: Optional[ToolResourcesCodeInterpreter] = None
+
+ file_search: Optional[ToolResourcesFileSearch] = None
class Thread(BaseModel):
@@ -25,3 +50,11 @@ class Thread(BaseModel):
object: Literal["thread"]
"""The object type, which is always `thread`."""
+
+ tool_resources: Optional[ToolResources] = None
+ """
+ A set of resources that are made available to the assistant's tools in this
+ thread. The resources are specific to the type of tool. For example, the
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ tool requires a list of vector store IDs.
+ """
diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py
index 50f947a40a..d7d5a758e8 100644
--- a/src/openai/types/beta/thread_create_and_run_params.py
+++ b/src/openai/types/beta/thread_create_and_run_params.py
@@ -6,7 +6,7 @@
from typing_extensions import Literal, Required, TypedDict
from .function_tool_param import FunctionToolParam
-from .retrieval_tool_param import RetrievalToolParam
+from .file_search_tool_param import FileSearchToolParam
from .code_interpreter_tool_param import CodeInterpreterToolParam
from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
from .assistant_response_format_option_param import AssistantResponseFormatOptionParam
@@ -15,6 +15,14 @@
"ThreadCreateAndRunParamsBase",
"Thread",
"ThreadMessage",
+ "ThreadMessageAttachment",
+ "ThreadToolResources",
+ "ThreadToolResourcesCodeInterpreter",
+ "ThreadToolResourcesFileSearch",
+ "ThreadToolResourcesFileSearchVectorStore",
+ "ToolResources",
+ "ToolResourcesCodeInterpreter",
+ "ToolResourcesFileSearch",
"Tool",
"TruncationStrategy",
"ThreadCreateAndRunParamsNonStreaming",
@@ -41,7 +49,7 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False):
The maximum number of completion tokens that may be used over the course of the
run. The run will make a best effort to use only the number of completion tokens
specified, across multiple turns of the run. If the run exceeds the number of
- completion tokens specified, the run will end with status `complete`. See
+ completion tokens specified, the run will end with status `incomplete`. See
`incomplete_details` for more info.
"""
@@ -50,7 +58,7 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False):
The run will make a best effort to use only the number of prompt tokens
specified, across multiple turns of the run. If the run exceeds the number of
- prompt tokens specified, the run will end with status `complete`. See
+ prompt tokens specified, the run will end with status `incomplete`. See
`incomplete_details` for more info.
"""
@@ -132,15 +140,37 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False):
call that tool.
"""
+ tool_resources: Optional[ToolResources]
+ """A set of resources that are used by the assistant's tools.
+
+ The resources are specific to the type of tool. For example, the
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ tool requires a list of vector store IDs.
+ """
+
tools: Optional[Iterable[Tool]]
"""Override the tools the assistant can use for this run.
This is useful for modifying the behavior on a per-run basis.
"""
+ top_p: Optional[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+ """
+
truncation_strategy: Optional[TruncationStrategy]
+class ThreadMessageAttachment(TypedDict, total=False):
+ add_to: List[Literal["file_search", "code_interpreter"]]
+
+ file_id: str
+ """The ID of the file to attach to the message."""
+
+
class ThreadMessage(TypedDict, total=False):
content: Required[str]
"""The content of the message."""
@@ -154,13 +184,8 @@ class ThreadMessage(TypedDict, total=False):
value to insert messages from the assistant into the conversation.
"""
- file_ids: List[str]
- """
- A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
- the message should use. There can be a maximum of 10 files attached to a
- message. Useful for tools like `retrieval` and `code_interpreter` that can
- access and use files.
- """
+ attachments: Optional[Iterable[ThreadMessageAttachment]]
+ """A list of files attached to the message, and the tools they should be added to."""
metadata: Optional[object]
"""Set of 16 key-value pairs that can be attached to an object.
@@ -171,6 +196,56 @@ class ThreadMessage(TypedDict, total=False):
"""
+class ThreadToolResourcesCodeInterpreter(TypedDict, total=False):
+ file_ids: List[str]
+ """
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ available to the `code_interpreter` tool. There can be a maximum of 20 files
+ associated with the tool.
+ """
+
+
+class ThreadToolResourcesFileSearchVectorStore(TypedDict, total=False):
+ file_ids: List[str]
+ """
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
+ add to the vector store. There can be a maximum of 10000 files in a vector
+ store.
+ """
+
+ metadata: object
+ """Set of 16 key-value pairs that can be attached to a vector store.
+
+ This can be useful for storing additional information about the vector store in
+ a structured format. Keys can be a maximum of 64 characters long and values can
+ be a maxium of 512 characters long.
+ """
+
+
+class ThreadToolResourcesFileSearch(TypedDict, total=False):
+ vector_store_ids: List[str]
+ """
+ The
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ attached to this thread. There can be a maximum of 1 vector store attached to
+ the thread.
+ """
+
+ vector_stores: Iterable[ThreadToolResourcesFileSearchVectorStore]
+ """
+ A helper to create a
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ with file_ids and attach it to this thread. There can be a maximum of 1 vector
+ store attached to the thread.
+ """
+
+
+class ThreadToolResources(TypedDict, total=False):
+ code_interpreter: ThreadToolResourcesCodeInterpreter
+
+ file_search: ThreadToolResourcesFileSearch
+
+
class Thread(TypedDict, total=False):
messages: Iterable[ThreadMessage]
"""
@@ -186,8 +261,41 @@ class Thread(TypedDict, total=False):
a maxium of 512 characters long.
"""
+ tool_resources: Optional[ThreadToolResources]
+ """
+ A set of resources that are made available to the assistant's tools in this
+ thread. The resources are specific to the type of tool. For example, the
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ tool requires a list of vector store IDs.
+ """
+
+
+class ToolResourcesCodeInterpreter(TypedDict, total=False):
+ file_ids: List[str]
+ """
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ available to the `code_interpreter` tool. There can be a maximum of 20 files
+ associated with the tool.
+ """
+
+
+class ToolResourcesFileSearch(TypedDict, total=False):
+ vector_store_ids: List[str]
+ """
+ The ID of the
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ attached to this assistant. There can be a maximum of 1 vector store attached to
+ the assistant.
+ """
+
+
+class ToolResources(TypedDict, total=False):
+ code_interpreter: ToolResourcesCodeInterpreter
+
+ file_search: ToolResourcesFileSearch
+
-Tool = Union[CodeInterpreterToolParam, RetrievalToolParam, FunctionToolParam]
+Tool = Union[CodeInterpreterToolParam, FileSearchToolParam, FunctionToolParam]
class TruncationStrategy(TypedDict, total=False):
diff --git a/src/openai/types/beta/thread_create_params.py b/src/openai/types/beta/thread_create_params.py
index 1b382186aa..84a98a74d7 100644
--- a/src/openai/types/beta/thread_create_params.py
+++ b/src/openai/types/beta/thread_create_params.py
@@ -5,7 +5,15 @@
from typing import List, Iterable, Optional
from typing_extensions import Literal, Required, TypedDict
-__all__ = ["ThreadCreateParams", "Message"]
+__all__ = [
+ "ThreadCreateParams",
+ "Message",
+ "MessageAttachment",
+ "ToolResources",
+ "ToolResourcesCodeInterpreter",
+ "ToolResourcesFileSearch",
+ "ToolResourcesFileSearchVectorStore",
+]
class ThreadCreateParams(TypedDict, total=False):
@@ -23,6 +31,21 @@ class ThreadCreateParams(TypedDict, total=False):
a maxium of 512 characters long.
"""
+ tool_resources: Optional[ToolResources]
+ """
+ A set of resources that are made available to the assistant's tools in this
+ thread. The resources are specific to the type of tool. For example, the
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ tool requires a list of vector store IDs.
+ """
+
+
+class MessageAttachment(TypedDict, total=False):
+ add_to: List[Literal["file_search", "code_interpreter"]]
+
+ file_id: str
+ """The ID of the file to attach to the message."""
+
class Message(TypedDict, total=False):
content: Required[str]
@@ -37,13 +60,8 @@ class Message(TypedDict, total=False):
value to insert messages from the assistant into the conversation.
"""
- file_ids: List[str]
- """
- A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
- the message should use. There can be a maximum of 10 files attached to a
- message. Useful for tools like `retrieval` and `code_interpreter` that can
- access and use files.
- """
+ attachments: Optional[Iterable[MessageAttachment]]
+ """A list of files attached to the message, and the tools they should be added to."""
metadata: Optional[object]
"""Set of 16 key-value pairs that can be attached to an object.
@@ -52,3 +70,53 @@ class Message(TypedDict, total=False):
structured format. Keys can be a maximum of 64 characters long and values can be
a maxium of 512 characters long.
"""
+
+
+class ToolResourcesCodeInterpreter(TypedDict, total=False):
+ file_ids: List[str]
+ """
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ available to the `code_interpreter` tool. There can be a maximum of 20 files
+ associated with the tool.
+ """
+
+
+class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
+ file_ids: List[str]
+ """
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
+ add to the vector store. There can be a maximum of 10000 files in a vector
+ store.
+ """
+
+ metadata: object
+ """Set of 16 key-value pairs that can be attached to a vector store.
+
+ This can be useful for storing additional information about the vector store in
+ a structured format. Keys can be a maximum of 64 characters long and values can
+ be a maxium of 512 characters long.
+ """
+
+
+class ToolResourcesFileSearch(TypedDict, total=False):
+ vector_store_ids: List[str]
+ """
+ The
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ attached to this thread. There can be a maximum of 1 vector store attached to
+ the thread.
+ """
+
+ vector_stores: Iterable[ToolResourcesFileSearchVectorStore]
+ """
+ A helper to create a
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ with file_ids and attach it to this thread. There can be a maximum of 1 vector
+ store attached to the thread.
+ """
+
+
+class ToolResources(TypedDict, total=False):
+ code_interpreter: ToolResourcesCodeInterpreter
+
+ file_search: ToolResourcesFileSearch
diff --git a/src/openai/types/beta/thread_update_params.py b/src/openai/types/beta/thread_update_params.py
index 94f1b1e22e..7210ab77c9 100644
--- a/src/openai/types/beta/thread_update_params.py
+++ b/src/openai/types/beta/thread_update_params.py
@@ -2,10 +2,10 @@
from __future__ import annotations
-from typing import Optional
+from typing import List, Optional
from typing_extensions import TypedDict
-__all__ = ["ThreadUpdateParams"]
+__all__ = ["ThreadUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
class ThreadUpdateParams(TypedDict, total=False):
@@ -16,3 +16,36 @@ class ThreadUpdateParams(TypedDict, total=False):
structured format. Keys can be a maximum of 64 characters long and values can be
a maxium of 512 characters long.
"""
+
+ tool_resources: Optional[ToolResources]
+ """
+ A set of resources that are made available to the assistant's tools in this
+ thread. The resources are specific to the type of tool. For example, the
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
+ tool requires a list of vector store IDs.
+ """
+
+
+class ToolResourcesCodeInterpreter(TypedDict, total=False):
+ file_ids: List[str]
+ """
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ available to the `code_interpreter` tool. There can be a maximum of 20 files
+ associated with the tool.
+ """
+
+
+class ToolResourcesFileSearch(TypedDict, total=False):
+ vector_store_ids: List[str]
+ """
+ The
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ attached to this thread. There can be a maximum of 1 vector store attached to
+ the thread.
+ """
+
+
+class ToolResources(TypedDict, total=False):
+ code_interpreter: ToolResourcesCodeInterpreter
+
+ file_search: ToolResourcesFileSearch
diff --git a/src/openai/types/beta/threads/message.py b/src/openai/types/beta/threads/message.py
index bde0263975..42f0162734 100644
--- a/src/openai/types/beta/threads/message.py
+++ b/src/openai/types/beta/threads/message.py
@@ -6,7 +6,14 @@
from ...._models import BaseModel
from .message_content import MessageContent
-__all__ = ["Message", "IncompleteDetails"]
+__all__ = ["Message", "Attachment", "IncompleteDetails"]
+
+
+class Attachment(BaseModel):
+ add_to: Optional[List[Literal["file_search", "code_interpreter"]]] = None
+
+ file_id: Optional[str] = None
+ """The ID of the file to attach to the message."""
class IncompleteDetails(BaseModel):
@@ -25,6 +32,9 @@ class Message(BaseModel):
authored this message.
"""
+ attachments: Optional[List[Attachment]] = None
+ """A list of files attached to the message, and the tools they were added to."""
+
completed_at: Optional[int] = None
"""The Unix timestamp (in seconds) for when the message was completed."""
@@ -34,13 +44,6 @@ class Message(BaseModel):
created_at: int
"""The Unix timestamp (in seconds) for when the message was created."""
- file_ids: List[str]
- """
- A list of [file](https://platform.openai.com/docs/api-reference/files) IDs that
- the assistant should use. Useful for tools like retrieval and code_interpreter
- that can access files. A maximum of 10 files can be attached to a message.
- """
-
incomplete_at: Optional[int] = None
"""The Unix timestamp (in seconds) for when the message was marked as incomplete."""
diff --git a/src/openai/types/beta/threads/message_create_params.py b/src/openai/types/beta/threads/message_create_params.py
index 9b9467ef4d..1ef1d9ae10 100644
--- a/src/openai/types/beta/threads/message_create_params.py
+++ b/src/openai/types/beta/threads/message_create_params.py
@@ -2,10 +2,10 @@
from __future__ import annotations
-from typing import List, Optional
+from typing import List, Iterable, Optional
from typing_extensions import Literal, Required, TypedDict
-__all__ = ["MessageCreateParams"]
+__all__ = ["MessageCreateParams", "Attachment"]
class MessageCreateParams(TypedDict, total=False):
@@ -21,13 +21,8 @@ class MessageCreateParams(TypedDict, total=False):
value to insert messages from the assistant into the conversation.
"""
- file_ids: List[str]
- """
- A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
- the message should use. There can be a maximum of 10 files attached to a
- message. Useful for tools like `retrieval` and `code_interpreter` that can
- access and use files.
- """
+ attachments: Optional[Iterable[Attachment]]
+ """A list of files attached to the message, and the tools they should be added to."""
metadata: Optional[object]
"""Set of 16 key-value pairs that can be attached to an object.
@@ -36,3 +31,10 @@ class MessageCreateParams(TypedDict, total=False):
structured format. Keys can be a maximum of 64 characters long and values can be
a maxium of 512 characters long.
"""
+
+
+class Attachment(TypedDict, total=False):
+ add_to: List[Literal["file_search", "code_interpreter"]]
+
+ file_id: str
+ """The ID of the file to attach to the message."""
diff --git a/src/openai/types/beta/threads/message_delta.py b/src/openai/types/beta/threads/message_delta.py
index 3a55e1442a..ecd0dfe319 100644
--- a/src/openai/types/beta/threads/message_delta.py
+++ b/src/openai/types/beta/threads/message_delta.py
@@ -13,12 +13,5 @@ class MessageDelta(BaseModel):
content: Optional[List[MessageContentDelta]] = None
"""The content of the message in array of text and/or images."""
- file_ids: Optional[List[str]] = None
- """
- A list of [file](https://platform.openai.com/docs/api-reference/files) IDs that
- the assistant should use. Useful for tools like retrieval and code_interpreter
- that can access files. A maximum of 10 files can be attached to a message.
- """
-
role: Optional[Literal["user", "assistant"]] = None
"""The entity that produced the message. One of `user` or `assistant`."""
diff --git a/src/openai/types/beta/threads/messages/__init__.py b/src/openai/types/beta/threads/messages/__init__.py
deleted file mode 100644
index d129297620..0000000000
--- a/src/openai/types/beta/threads/messages/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .message_file import MessageFile as MessageFile
-from .file_list_params import FileListParams as FileListParams
diff --git a/src/openai/types/beta/threads/messages/message_file.py b/src/openai/types/beta/threads/messages/message_file.py
deleted file mode 100644
index 342479ab7b..0000000000
--- a/src/openai/types/beta/threads/messages/message_file.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ....._models import BaseModel
-
-__all__ = ["MessageFile"]
-
-
-class MessageFile(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the message file was created."""
-
- message_id: str
- """
- The ID of the [message](https://platform.openai.com/docs/api-reference/messages)
- that the [File](https://platform.openai.com/docs/api-reference/files) is
- attached to.
- """
-
- object: Literal["thread.message.file"]
- """The object type, which is always `thread.message.file`."""
diff --git a/src/openai/types/beta/threads/run.py b/src/openai/types/beta/threads/run.py
index 2efc3c77fa..8f427ce6e8 100644
--- a/src/openai/types/beta/threads/run.py
+++ b/src/openai/types/beta/threads/run.py
@@ -105,13 +105,6 @@ class Run(BaseModel):
failed_at: Optional[int] = None
"""The Unix timestamp (in seconds) for when the run failed."""
- file_ids: List[str]
- """
- The list of [File](https://platform.openai.com/docs/api-reference/files) IDs the
- [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
- this run.
- """
-
incomplete_details: Optional[IncompleteDetails] = None
"""Details on why the run is incomplete.
@@ -227,3 +220,6 @@ class Run(BaseModel):
temperature: Optional[float] = None
"""The sampling temperature used for this run. If not set, defaults to 1."""
+
+ top_p: Optional[float] = None
+ """The nucleus sampling value used for this run. If not set, defaults to 1."""
diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py
index 9f2d4ba18b..fd0b4e7920 100644
--- a/src/openai/types/beta/threads/run_create_params.py
+++ b/src/openai/types/beta/threads/run_create_params.py
@@ -12,6 +12,7 @@
__all__ = [
"RunCreateParamsBase",
"AdditionalMessage",
+ "AdditionalMessageAttachment",
"TruncationStrategy",
"RunCreateParamsNonStreaming",
"RunCreateParamsStreaming",
@@ -142,9 +143,23 @@ class RunCreateParamsBase(TypedDict, total=False):
This is useful for modifying the behavior on a per-run basis.
"""
+ top_p: Optional[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+ """
+
truncation_strategy: Optional[TruncationStrategy]
+class AdditionalMessageAttachment(TypedDict, total=False):
+ add_to: List[Literal["file_search", "code_interpreter"]]
+
+ file_id: str
+ """The ID of the file to attach to the message."""
+
+
class AdditionalMessage(TypedDict, total=False):
content: Required[str]
"""The content of the message."""
@@ -158,13 +173,8 @@ class AdditionalMessage(TypedDict, total=False):
value to insert messages from the assistant into the conversation.
"""
- file_ids: List[str]
- """
- A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
- the message should use. There can be a maximum of 10 files attached to a
- message. Useful for tools like `retrieval` and `code_interpreter` that can
- access and use files.
- """
+ attachments: Optional[Iterable[AdditionalMessageAttachment]]
+ """A list of files attached to the message, and the tools they should be added to."""
metadata: Optional[object]
"""Set of 16 key-value pairs that can be attached to an object.
diff --git a/src/openai/types/beta/threads/runs/__init__.py b/src/openai/types/beta/threads/runs/__init__.py
index 256510dcc7..a312ce3df2 100644
--- a/src/openai/types/beta/threads/runs/__init__.py
+++ b/src/openai/types/beta/threads/runs/__init__.py
@@ -8,14 +8,14 @@
from .tool_call_delta import ToolCallDelta as ToolCallDelta
from .step_list_params import StepListParams as StepListParams
from .function_tool_call import FunctionToolCall as FunctionToolCall
-from .retrieval_tool_call import RetrievalToolCall as RetrievalToolCall
from .run_step_delta_event import RunStepDeltaEvent as RunStepDeltaEvent
from .code_interpreter_logs import CodeInterpreterLogs as CodeInterpreterLogs
+from .file_search_tool_call import FileSearchToolCall as FileSearchToolCall
from .tool_call_delta_object import ToolCallDeltaObject as ToolCallDeltaObject
from .tool_calls_step_details import ToolCallsStepDetails as ToolCallsStepDetails
from .function_tool_call_delta import FunctionToolCallDelta as FunctionToolCallDelta
-from .retrieval_tool_call_delta import RetrievalToolCallDelta as RetrievalToolCallDelta
from .code_interpreter_tool_call import CodeInterpreterToolCall as CodeInterpreterToolCall
+from .file_search_tool_call_delta import FileSearchToolCallDelta as FileSearchToolCallDelta
from .run_step_delta_message_delta import RunStepDeltaMessageDelta as RunStepDeltaMessageDelta
from .code_interpreter_output_image import CodeInterpreterOutputImage as CodeInterpreterOutputImage
from .message_creation_step_details import MessageCreationStepDetails as MessageCreationStepDetails
diff --git a/src/openai/types/beta/threads/runs/retrieval_tool_call.py b/src/openai/types/beta/threads/runs/file_search_tool_call.py
similarity index 61%
rename from src/openai/types/beta/threads/runs/retrieval_tool_call.py
rename to src/openai/types/beta/threads/runs/file_search_tool_call.py
index 48704ed331..57c0ca9a90 100644
--- a/src/openai/types/beta/threads/runs/retrieval_tool_call.py
+++ b/src/openai/types/beta/threads/runs/file_search_tool_call.py
@@ -4,18 +4,18 @@
from ....._models import BaseModel
-__all__ = ["RetrievalToolCall"]
+__all__ = ["FileSearchToolCall"]
-class RetrievalToolCall(BaseModel):
+class FileSearchToolCall(BaseModel):
id: str
"""The ID of the tool call object."""
- retrieval: object
+ file_search: object
"""For now, this is always going to be an empty object."""
- type: Literal["retrieval"]
+ type: Literal["file_search"]
"""The type of tool call.
- This is always going to be `retrieval` for this type of tool call.
+ This is always going to be `file_search` for this type of tool call.
"""
diff --git a/src/openai/types/beta/threads/runs/retrieval_tool_call_delta.py b/src/openai/types/beta/threads/runs/file_search_tool_call_delta.py
similarity index 67%
rename from src/openai/types/beta/threads/runs/retrieval_tool_call_delta.py
rename to src/openai/types/beta/threads/runs/file_search_tool_call_delta.py
index 3310079399..df5ac217dc 100644
--- a/src/openai/types/beta/threads/runs/retrieval_tool_call_delta.py
+++ b/src/openai/types/beta/threads/runs/file_search_tool_call_delta.py
@@ -5,21 +5,21 @@
from ....._models import BaseModel
-__all__ = ["RetrievalToolCallDelta"]
+__all__ = ["FileSearchToolCallDelta"]
-class RetrievalToolCallDelta(BaseModel):
+class FileSearchToolCallDelta(BaseModel):
+ file_search: object
+ """For now, this is always going to be an empty object."""
+
index: int
"""The index of the tool call in the tool calls array."""
- type: Literal["retrieval"]
+ type: Literal["file_search"]
"""The type of tool call.
- This is always going to be `retrieval` for this type of tool call.
+ This is always going to be `file_search` for this type of tool call.
"""
id: Optional[str] = None
"""The ID of the tool call object."""
-
- retrieval: Optional[object] = None
- """For now, this is always going to be an empty object."""
diff --git a/src/openai/types/beta/threads/runs/tool_call.py b/src/openai/types/beta/threads/runs/tool_call.py
index dcca797bf0..77d86b46d9 100644
--- a/src/openai/types/beta/threads/runs/tool_call.py
+++ b/src/openai/types/beta/threads/runs/tool_call.py
@@ -5,11 +5,11 @@
from ....._utils import PropertyInfo
from .function_tool_call import FunctionToolCall
-from .retrieval_tool_call import RetrievalToolCall
+from .file_search_tool_call import FileSearchToolCall
from .code_interpreter_tool_call import CodeInterpreterToolCall
__all__ = ["ToolCall"]
ToolCall = Annotated[
- Union[CodeInterpreterToolCall, RetrievalToolCall, FunctionToolCall], PropertyInfo(discriminator="type")
+ Union[CodeInterpreterToolCall, FileSearchToolCall, FunctionToolCall], PropertyInfo(discriminator="type")
]
diff --git a/src/openai/types/beta/threads/runs/tool_call_delta.py b/src/openai/types/beta/threads/runs/tool_call_delta.py
index fc98981abf..90cfe0657e 100644
--- a/src/openai/types/beta/threads/runs/tool_call_delta.py
+++ b/src/openai/types/beta/threads/runs/tool_call_delta.py
@@ -5,12 +5,12 @@
from ....._utils import PropertyInfo
from .function_tool_call_delta import FunctionToolCallDelta
-from .retrieval_tool_call_delta import RetrievalToolCallDelta
+from .file_search_tool_call_delta import FileSearchToolCallDelta
from .code_interpreter_tool_call_delta import CodeInterpreterToolCallDelta
__all__ = ["ToolCallDelta"]
ToolCallDelta = Annotated[
- Union[CodeInterpreterToolCallDelta, RetrievalToolCallDelta, FunctionToolCallDelta],
+ Union[CodeInterpreterToolCallDelta, FileSearchToolCallDelta, FunctionToolCallDelta],
PropertyInfo(discriminator="type"),
]
diff --git a/src/openai/types/beta/threads/runs/tool_call_delta_object.py b/src/openai/types/beta/threads/runs/tool_call_delta_object.py
index 9cd59a6e24..189dce772c 100644
--- a/src/openai/types/beta/threads/runs/tool_call_delta_object.py
+++ b/src/openai/types/beta/threads/runs/tool_call_delta_object.py
@@ -17,5 +17,5 @@ class ToolCallDeltaObject(BaseModel):
"""An array of tool calls the run step was involved in.
These can be associated with one of three types of tools: `code_interpreter`,
- `retrieval`, or `function`.
+ `file_search`, or `function`.
"""
diff --git a/src/openai/types/beta/threads/runs/tool_calls_step_details.py b/src/openai/types/beta/threads/runs/tool_calls_step_details.py
index ca08fabd0e..a084d387c7 100644
--- a/src/openai/types/beta/threads/runs/tool_calls_step_details.py
+++ b/src/openai/types/beta/threads/runs/tool_calls_step_details.py
@@ -14,7 +14,7 @@ class ToolCallsStepDetails(BaseModel):
"""An array of tool calls the run step was involved in.
These can be associated with one of three types of tools: `code_interpreter`,
- `retrieval`, or `function`.
+ `file_search`, or `function`.
"""
type: Literal["tool_calls"]
diff --git a/src/openai/types/beta/vector_store.py b/src/openai/types/beta/vector_store.py
new file mode 100644
index 0000000000..122705734d
--- /dev/null
+++ b/src/openai/types/beta/vector_store.py
@@ -0,0 +1,79 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["VectorStore", "FileCounts", "ExpiresAfter"]
+
+
+class FileCounts(BaseModel):
+ cancelled: int
+ """The number of files that were cancelled."""
+
+ completed: int
+ """The number of files that have been successfully processed."""
+
+ failed: int
+ """The number of files that have failed to process."""
+
+ in_progress: int
+ """The number of files that are currently being processed."""
+
+ total: int
+ """The total number of files."""
+
+
+class ExpiresAfter(BaseModel):
+ anchor: Literal["last_active_at"]
+ """Anchor timestamp after which the expiration policy applies.
+
+ Supported anchors: `last_active_at`.
+ """
+
+ days: int
+ """The number of days after the anchor time that the vector store will expire."""
+
+
+class VectorStore(BaseModel):
+ id: str
+ """The identifier, which can be referenced in API endpoints."""
+
+ bytes: int
+ """The byte size of the vector store."""
+
+ created_at: int
+ """The Unix timestamp (in seconds) for when the vector store was created."""
+
+ file_counts: FileCounts
+
+ last_active_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the vector store was last active."""
+
+ metadata: Optional[object] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format. Keys can be a maximum of 64 characters long and values can be
+ a maxium of 512 characters long.
+ """
+
+ name: str
+ """The name of the vector store."""
+
+ object: Literal["vector_store"]
+ """The object type, which is always `vector_store`."""
+
+ status: Literal["expired", "in_progress", "completed"]
+ """
+ The status of the vector store, which can be either `expired`, `in_progress`, or
+ `completed`. A status of `completed` indicates that the vector store is ready
+ for use.
+ """
+
+ expires_after: Optional[ExpiresAfter] = None
+ """The expiration policy for a vector store."""
+
+ expires_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the vector store will expire."""
diff --git a/src/openai/types/beta/vector_store_create_params.py b/src/openai/types/beta/vector_store_create_params.py
new file mode 100644
index 0000000000..f1a3abcbdf
--- /dev/null
+++ b/src/openai/types/beta/vector_store_create_params.py
@@ -0,0 +1,42 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["VectorStoreCreateParams", "ExpiresAfter"]
+
+
+class VectorStoreCreateParams(TypedDict, total=False):
+ expires_after: ExpiresAfter
+ """The expiration policy for a vector store."""
+
+ file_ids: List[str]
+ """
+ A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ the vector store should use. Useful for tools like `file_search` that can access
+ files.
+ """
+
+ metadata: Optional[object]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format. Keys can be a maximum of 64 characters long and values can be
+ a maxium of 512 characters long.
+ """
+
+ name: str
+ """The name of the vector store."""
+
+
+class ExpiresAfter(TypedDict, total=False):
+ anchor: Required[Literal["last_active_at"]]
+ """Anchor timestamp after which the expiration policy applies.
+
+ Supported anchors: `last_active_at`.
+ """
+
+ days: Required[int]
+ """The number of days after the anchor time that the vector store will expire."""
diff --git a/src/openai/types/beta/retrieval_tool.py b/src/openai/types/beta/vector_store_deleted.py
similarity index 52%
rename from src/openai/types/beta/retrieval_tool.py
rename to src/openai/types/beta/vector_store_deleted.py
index b07b785c66..21ccda1db5 100644
--- a/src/openai/types/beta/retrieval_tool.py
+++ b/src/openai/types/beta/vector_store_deleted.py
@@ -4,9 +4,12 @@
from ..._models import BaseModel
-__all__ = ["RetrievalTool"]
+__all__ = ["VectorStoreDeleted"]
-class RetrievalTool(BaseModel):
- type: Literal["retrieval"]
- """The type of tool being defined: `retrieval`"""
+class VectorStoreDeleted(BaseModel):
+ id: str
+
+ deleted: bool
+
+ object: Literal["vector_store.deleted"]
diff --git a/src/openai/types/beta/assistants/file_list_params.py b/src/openai/types/beta/vector_store_list_params.py
similarity index 92%
rename from src/openai/types/beta/assistants/file_list_params.py
rename to src/openai/types/beta/vector_store_list_params.py
index 53c493b36a..f39f67266d 100644
--- a/src/openai/types/beta/assistants/file_list_params.py
+++ b/src/openai/types/beta/vector_store_list_params.py
@@ -4,10 +4,10 @@
from typing_extensions import Literal, TypedDict
-__all__ = ["FileListParams"]
+__all__ = ["VectorStoreListParams"]
-class FileListParams(TypedDict, total=False):
+class VectorStoreListParams(TypedDict, total=False):
after: str
"""A cursor for use in pagination.
diff --git a/src/openai/types/beta/vector_store_update_params.py b/src/openai/types/beta/vector_store_update_params.py
new file mode 100644
index 0000000000..0f9593e476
--- /dev/null
+++ b/src/openai/types/beta/vector_store_update_params.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["VectorStoreUpdateParams", "ExpiresAfter"]
+
+
+class VectorStoreUpdateParams(TypedDict, total=False):
+ expires_after: Optional[ExpiresAfter]
+ """The expiration policy for a vector store."""
+
+ metadata: Optional[object]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format. Keys can be a maximum of 64 characters long and values can be
+ a maxium of 512 characters long.
+ """
+
+ name: Optional[str]
+ """The name of the vector store."""
+
+
+class ExpiresAfter(TypedDict, total=False):
+ anchor: Required[Literal["last_active_at"]]
+ """Anchor timestamp after which the expiration policy applies.
+
+ Supported anchors: `last_active_at`.
+ """
+
+ days: Required[int]
+ """The number of days after the anchor time that the vector store will expire."""
diff --git a/src/openai/types/beta/vector_stores/__init__.py b/src/openai/types/beta/vector_stores/__init__.py
new file mode 100644
index 0000000000..ff05dd63d8
--- /dev/null
+++ b/src/openai/types/beta/vector_stores/__init__.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .file_list_params import FileListParams as FileListParams
+from .vector_store_file import VectorStoreFile as VectorStoreFile
+from .file_create_params import FileCreateParams as FileCreateParams
+from .vector_store_file_batch import VectorStoreFileBatch as VectorStoreFileBatch
+from .file_batch_create_params import FileBatchCreateParams as FileBatchCreateParams
+from .vector_store_file_deleted import VectorStoreFileDeleted as VectorStoreFileDeleted
+from .file_batch_list_files_params import FileBatchListFilesParams as FileBatchListFilesParams
diff --git a/src/openai/types/beta/vector_stores/file_batch_create_params.py b/src/openai/types/beta/vector_stores/file_batch_create_params.py
new file mode 100644
index 0000000000..0882829732
--- /dev/null
+++ b/src/openai/types/beta/vector_stores/file_batch_create_params.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Required, TypedDict
+
+__all__ = ["FileBatchCreateParams"]
+
+
+class FileBatchCreateParams(TypedDict, total=False):
+ file_ids: Required[List[str]]
+ """
+ A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ the vector store should use. Useful for tools like `file_search` that can access
+ files.
+ """
diff --git a/src/openai/types/beta/vector_stores/file_batch_list_files_params.py b/src/openai/types/beta/vector_stores/file_batch_list_files_params.py
new file mode 100644
index 0000000000..24dee7d5a5
--- /dev/null
+++ b/src/openai/types/beta/vector_stores/file_batch_list_files_params.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["FileBatchListFilesParams"]
+
+
+class FileBatchListFilesParams(TypedDict, total=False):
+ vector_store_id: Required[str]
+
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ before: str
+ """A cursor for use in pagination.
+
+ `before` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include before=obj_foo in order to fetch the previous page
+ of the list.
+ """
+
+ filter: Literal["in_progress", "completed", "failed", "cancelled"]
+ """Filter by file status.
+
+ One of `in_progress`, `completed`, `failed`, `cancelled`.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
+
+ order: Literal["asc", "desc"]
+ """Sort order by the `created_at` timestamp of the objects.
+
+ `asc` for ascending order and `desc` for descending order.
+ """
diff --git a/src/openai/types/beta/assistants/file_create_params.py b/src/openai/types/beta/vector_stores/file_create_params.py
similarity index 70%
rename from src/openai/types/beta/assistants/file_create_params.py
rename to src/openai/types/beta/vector_stores/file_create_params.py
index 55f0e8cda1..2fee588abf 100644
--- a/src/openai/types/beta/assistants/file_create_params.py
+++ b/src/openai/types/beta/vector_stores/file_create_params.py
@@ -10,7 +10,7 @@
class FileCreateParams(TypedDict, total=False):
file_id: Required[str]
"""
- A [File](https://platform.openai.com/docs/api-reference/files) ID (with
- `purpose="assistants"`) that the assistant should use. Useful for tools like
- `retrieval` and `code_interpreter` that can access files.
+ A [File](https://platform.openai.com/docs/api-reference/files) ID that the
+ vector store should use. Useful for tools like `file_search` that can access
+ files.
"""
diff --git a/src/openai/types/beta/threads/messages/file_list_params.py b/src/openai/types/beta/vector_stores/file_list_params.py
similarity index 84%
rename from src/openai/types/beta/threads/messages/file_list_params.py
rename to src/openai/types/beta/vector_stores/file_list_params.py
index 7e2d6136ec..23dd7f0d94 100644
--- a/src/openai/types/beta/threads/messages/file_list_params.py
+++ b/src/openai/types/beta/vector_stores/file_list_params.py
@@ -2,14 +2,12 @@
from __future__ import annotations
-from typing_extensions import Literal, Required, TypedDict
+from typing_extensions import Literal, TypedDict
__all__ = ["FileListParams"]
class FileListParams(TypedDict, total=False):
- thread_id: Required[str]
-
after: str
"""A cursor for use in pagination.
@@ -28,6 +26,12 @@ class FileListParams(TypedDict, total=False):
of the list.
"""
+ filter: Literal["in_progress", "completed", "failed", "cancelled"]
+ """Filter by file status.
+
+ One of `in_progress`, `completed`, `failed`, `cancelled`.
+ """
+
limit: int
"""A limit on the number of objects to be returned.
diff --git a/src/openai/types/beta/vector_stores/vector_store_file.py b/src/openai/types/beta/vector_stores/vector_store_file.py
new file mode 100644
index 0000000000..a878b281d5
--- /dev/null
+++ b/src/openai/types/beta/vector_stores/vector_store_file.py
@@ -0,0 +1,48 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["VectorStoreFile", "LastError"]
+
+
+class LastError(BaseModel):
+ code: Literal["internal_error", "file_not_found", "parsing_error", "unhandled_mime_type"]
+ """One of `server_error` or `rate_limit_exceeded`."""
+
+ message: str
+ """A human-readable description of the error."""
+
+
+class VectorStoreFile(BaseModel):
+ id: str
+ """The identifier, which can be referenced in API endpoints."""
+
+ created_at: int
+ """The Unix timestamp (in seconds) for when the vector store file was created."""
+
+ last_error: Optional[LastError] = None
+ """The last error associated with this vector store file.
+
+ Will be `null` if there are no errors.
+ """
+
+ object: Literal["vector_store.file"]
+ """The object type, which is always `vector_store.file`."""
+
+ status: Literal["in_progress", "completed", "cancelled", "failed"]
+ """
+ The status of the vector store file, which can be either `in_progress`,
+ `completed`, `cancelled`, or `failed`. The status `completed` indicates that the
+ vector store file is ready for use.
+ """
+
+ vector_store_id: str
+ """
+ The ID of the
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ that the [File](https://platform.openai.com/docs/api-reference/files) is
+ attached to.
+ """
diff --git a/src/openai/types/beta/vector_stores/vector_store_file_batch.py b/src/openai/types/beta/vector_stores/vector_store_file_batch.py
new file mode 100644
index 0000000000..df130a58de
--- /dev/null
+++ b/src/openai/types/beta/vector_stores/vector_store_file_batch.py
@@ -0,0 +1,54 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["VectorStoreFileBatch", "FileCounts"]
+
+
+class FileCounts(BaseModel):
+ cancelled: int
+ """The number of files that where cancelled."""
+
+ completed: int
+ """The number of files that have been processed."""
+
+ failed: int
+ """The number of files that have failed to process."""
+
+ in_progress: int
+ """The number of files that are currently being processed."""
+
+ total: int
+ """The total number of files."""
+
+
+class VectorStoreFileBatch(BaseModel):
+ id: str
+ """The identifier, which can be referenced in API endpoints."""
+
+ created_at: int
+ """
+ The Unix timestamp (in seconds) for when the vector store files batch was
+ created.
+ """
+
+ file_counts: FileCounts
+
+ object: Literal["vector_store.files_batch"]
+ """The object type, which is always `vector_store.file_batch`."""
+
+ status: Literal["in_progress", "completed", "cancelled", "failed"]
+ """
+ The status of the vector store files batch, which can be either `in_progress`,
+ `completed`, `cancelled` or `failed`.
+ """
+
+ vector_store_id: str
+ """
+ The ID of the
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ that the [File](https://platform.openai.com/docs/api-reference/files) is
+ attached to.
+ """
diff --git a/src/openai/types/beta/assistants/file_delete_response.py b/src/openai/types/beta/vector_stores/vector_store_file_deleted.py
similarity index 60%
rename from src/openai/types/beta/assistants/file_delete_response.py
rename to src/openai/types/beta/vector_stores/vector_store_file_deleted.py
index 685fb2a75c..ae37f84364 100644
--- a/src/openai/types/beta/assistants/file_delete_response.py
+++ b/src/openai/types/beta/vector_stores/vector_store_file_deleted.py
@@ -4,12 +4,12 @@
from ...._models import BaseModel
-__all__ = ["FileDeleteResponse"]
+__all__ = ["VectorStoreFileDeleted"]
-class FileDeleteResponse(BaseModel):
+class VectorStoreFileDeleted(BaseModel):
id: str
deleted: bool
- object: Literal["assistant.file.deleted"]
+ object: Literal["vector_store.file.deleted"]
diff --git a/src/openai/types/fine_tuning/job_create_params.py b/src/openai/types/fine_tuning/job_create_params.py
index 892c737fa3..1925f90d12 100644
--- a/src/openai/types/fine_tuning/job_create_params.py
+++ b/src/openai/types/fine_tuning/job_create_params.py
@@ -19,7 +19,7 @@ class JobCreateParams(TypedDict, total=False):
training_file: Required[str]
"""The ID of an uploaded file that contains training data.
- See [upload file](https://platform.openai.com/docs/api-reference/files/upload)
+ See [upload file](https://platform.openai.com/docs/api-reference/files/create)
for how to upload a file.
Your dataset must be formatted as a JSONL file. Additionally, you must upload
diff --git a/tests/api_resources/beta/test_assistants.py b/tests/api_resources/beta/test_assistants.py
index a509627b8e..a92acb2ca5 100644
--- a/tests/api_resources/beta/test_assistants.py
+++ b/tests/api_resources/beta/test_assistants.py
@@ -33,11 +33,25 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
assistant = client.beta.assistants.create(
model="gpt-4-turbo",
description="string",
- file_ids=["string", "string", "string"],
instructions="string",
metadata={},
name="string",
+ response_format="none",
+ temperature=1,
+ tool_resources={
+ "code_interpreter": {"file_ids": ["string", "string", "string"]},
+ "file_search": {
+ "vector_store_ids": ["string"],
+ "vector_stores": [
+ {
+ "file_ids": ["string", "string", "string"],
+ "metadata": {},
+ }
+ ],
+ },
+ },
tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}],
+ top_p=1,
)
assert_matches_type(Assistant, assistant, path=["response"])
@@ -115,12 +129,18 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None:
assistant = client.beta.assistants.update(
"string",
description="string",
- file_ids=["string", "string", "string"],
instructions="string",
metadata={},
model="string",
name="string",
+ response_format="none",
+ temperature=1,
+ tool_resources={
+ "code_interpreter": {"file_ids": ["string", "string", "string"]},
+ "file_search": {"vector_store_ids": ["string"]},
+ },
tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}],
+ top_p=1,
)
assert_matches_type(Assistant, assistant, path=["response"])
@@ -244,11 +264,25 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
assistant = await async_client.beta.assistants.create(
model="gpt-4-turbo",
description="string",
- file_ids=["string", "string", "string"],
instructions="string",
metadata={},
name="string",
+ response_format="none",
+ temperature=1,
+ tool_resources={
+ "code_interpreter": {"file_ids": ["string", "string", "string"]},
+ "file_search": {
+ "vector_store_ids": ["string"],
+ "vector_stores": [
+ {
+ "file_ids": ["string", "string", "string"],
+ "metadata": {},
+ }
+ ],
+ },
+ },
tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}],
+ top_p=1,
)
assert_matches_type(Assistant, assistant, path=["response"])
@@ -326,12 +360,18 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) ->
assistant = await async_client.beta.assistants.update(
"string",
description="string",
- file_ids=["string", "string", "string"],
instructions="string",
metadata={},
model="string",
name="string",
+ response_format="none",
+ temperature=1,
+ tool_resources={
+ "code_interpreter": {"file_ids": ["string", "string", "string"]},
+ "file_search": {"vector_store_ids": ["string"]},
+ },
tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}],
+ top_p=1,
)
assert_matches_type(Assistant, assistant, path=["response"])
diff --git a/tests/api_resources/beta/test_threads.py b/tests/api_resources/beta/test_threads.py
index 7c07251433..980fd9a75e 100644
--- a/tests/api_resources/beta/test_threads.py
+++ b/tests/api_resources/beta/test_threads.py
@@ -33,23 +33,74 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
],
metadata={},
+ tool_resources={
+ "code_interpreter": {"file_ids": ["string", "string", "string"]},
+ "file_search": {
+ "vector_store_ids": ["string"],
+ "vector_stores": [
+ {
+ "file_ids": ["string", "string", "string"],
+ "metadata": {},
+ }
+ ],
+ },
+ },
)
assert_matches_type(Thread, thread, path=["response"])
@@ -123,6 +174,10 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None:
thread = client.beta.threads.update(
"string",
metadata={},
+ tool_resources={
+ "code_interpreter": {"file_ids": ["string", "string", "string"]},
+ "file_search": {"vector_store_ids": ["string"]},
+ },
)
assert_matches_type(Thread, thread, path=["response"])
@@ -219,26 +274,82 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI)
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
],
+ "tool_resources": {
+ "code_interpreter": {"file_ids": ["string", "string", "string"]},
+ "file_search": {
+ "vector_store_ids": ["string"],
+ "vector_stores": [
+ {
+ "file_ids": ["string", "string", "string"],
+ "metadata": {},
+ }
+ ],
+ },
+ },
"metadata": {},
},
tool_choice="none",
+ tool_resources={
+ "code_interpreter": {"file_ids": ["string", "string", "string"]},
+ "file_search": {"vector_store_ids": ["string"]},
+ },
tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}],
+ top_p=1,
truncation_strategy={
"type": "auto",
"last_messages": 1,
@@ -295,26 +406,82 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI)
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
],
+ "tool_resources": {
+ "code_interpreter": {"file_ids": ["string", "string", "string"]},
+ "file_search": {
+ "vector_store_ids": ["string"],
+ "vector_stores": [
+ {
+ "file_ids": ["string", "string", "string"],
+ "metadata": {},
+ }
+ ],
+ },
+ },
"metadata": {},
},
tool_choice="none",
+ tool_resources={
+ "code_interpreter": {"file_ids": ["string", "string", "string"]},
+ "file_search": {"vector_store_ids": ["string"]},
+ },
tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}],
+ top_p=1,
truncation_strategy={
"type": "auto",
"last_messages": 1,
@@ -363,23 +530,74 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
],
metadata={},
+ tool_resources={
+ "code_interpreter": {"file_ids": ["string", "string", "string"]},
+ "file_search": {
+ "vector_store_ids": ["string"],
+ "vector_stores": [
+ {
+ "file_ids": ["string", "string", "string"],
+ "metadata": {},
+ }
+ ],
+ },
+ },
)
assert_matches_type(Thread, thread, path=["response"])
@@ -453,6 +671,10 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) ->
thread = await async_client.beta.threads.update(
"string",
metadata={},
+ tool_resources={
+ "code_interpreter": {"file_ids": ["string", "string", "string"]},
+ "file_search": {"vector_store_ids": ["string"]},
+ },
)
assert_matches_type(Thread, thread, path=["response"])
@@ -549,26 +771,82 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
],
+ "tool_resources": {
+ "code_interpreter": {"file_ids": ["string", "string", "string"]},
+ "file_search": {
+ "vector_store_ids": ["string"],
+ "vector_stores": [
+ {
+ "file_ids": ["string", "string", "string"],
+ "metadata": {},
+ }
+ ],
+ },
+ },
"metadata": {},
},
tool_choice="none",
+ tool_resources={
+ "code_interpreter": {"file_ids": ["string", "string", "string"]},
+ "file_search": {"vector_store_ids": ["string"]},
+ },
tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}],
+ top_p=1,
truncation_strategy={
"type": "auto",
"last_messages": 1,
@@ -625,26 +903,82 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
],
+ "tool_resources": {
+ "code_interpreter": {"file_ids": ["string", "string", "string"]},
+ "file_search": {
+ "vector_store_ids": ["string"],
+ "vector_stores": [
+ {
+ "file_ids": ["string", "string", "string"],
+ "metadata": {},
+ }
+ ],
+ },
+ },
"metadata": {},
},
tool_choice="none",
+ tool_resources={
+ "code_interpreter": {"file_ids": ["string", "string", "string"]},
+ "file_search": {"vector_store_ids": ["string"]},
+ },
tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}],
+ top_p=1,
truncation_strategy={
"type": "auto",
"last_messages": 1,
diff --git a/tests/api_resources/beta/test_vector_stores.py b/tests/api_resources/beta/test_vector_stores.py
new file mode 100644
index 0000000000..e671c96a45
--- /dev/null
+++ b/tests/api_resources/beta/test_vector_stores.py
@@ -0,0 +1,426 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncCursorPage, AsyncCursorPage
+from openai.types.beta import (
+ VectorStore,
+ VectorStoreDeleted,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestVectorStores:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ vector_store = client.beta.vector_stores.create()
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ @parametrize
+ def test_method_create_with_all_params(self, client: OpenAI) -> None:
+ vector_store = client.beta.vector_stores.create(
+ expires_after={
+ "anchor": "last_active_at",
+ "days": 1,
+ },
+ file_ids=["string", "string", "string"],
+ metadata={},
+ name="string",
+ )
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.beta.vector_stores.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ vector_store = response.parse()
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.beta.vector_stores.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ vector_store = response.parse()
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_retrieve(self, client: OpenAI) -> None:
+ vector_store = client.beta.vector_stores.retrieve(
+ "string",
+ )
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: OpenAI) -> None:
+ response = client.beta.vector_stores.with_raw_response.retrieve(
+ "string",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ vector_store = response.parse()
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+ with client.beta.vector_stores.with_streaming_response.retrieve(
+ "string",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ vector_store = response.parse()
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ client.beta.vector_stores.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ def test_method_update(self, client: OpenAI) -> None:
+ vector_store = client.beta.vector_stores.update(
+ "string",
+ )
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ @parametrize
+ def test_method_update_with_all_params(self, client: OpenAI) -> None:
+ vector_store = client.beta.vector_stores.update(
+ "string",
+ expires_after={
+ "anchor": "last_active_at",
+ "days": 1,
+ },
+ metadata={},
+ name="string",
+ )
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ @parametrize
+ def test_raw_response_update(self, client: OpenAI) -> None:
+ response = client.beta.vector_stores.with_raw_response.update(
+ "string",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ vector_store = response.parse()
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ @parametrize
+ def test_streaming_response_update(self, client: OpenAI) -> None:
+ with client.beta.vector_stores.with_streaming_response.update(
+ "string",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ vector_store = response.parse()
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_update(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ client.beta.vector_stores.with_raw_response.update(
+ "",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ vector_store = client.beta.vector_stores.list()
+ assert_matches_type(SyncCursorPage[VectorStore], vector_store, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ vector_store = client.beta.vector_stores.list(
+ after="string",
+ before="string",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(SyncCursorPage[VectorStore], vector_store, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.beta.vector_stores.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ vector_store = response.parse()
+ assert_matches_type(SyncCursorPage[VectorStore], vector_store, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.beta.vector_stores.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ vector_store = response.parse()
+ assert_matches_type(SyncCursorPage[VectorStore], vector_store, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ vector_store = client.beta.vector_stores.delete(
+ "string",
+ )
+ assert_matches_type(VectorStoreDeleted, vector_store, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.beta.vector_stores.with_raw_response.delete(
+ "string",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ vector_store = response.parse()
+ assert_matches_type(VectorStoreDeleted, vector_store, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.beta.vector_stores.with_streaming_response.delete(
+ "string",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ vector_store = response.parse()
+ assert_matches_type(VectorStoreDeleted, vector_store, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ client.beta.vector_stores.with_raw_response.delete(
+ "",
+ )
+
+
+class TestAsyncVectorStores:
+ parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ vector_store = await async_client.beta.vector_stores.create()
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ vector_store = await async_client.beta.vector_stores.create(
+ expires_after={
+ "anchor": "last_active_at",
+ "days": 1,
+ },
+ file_ids=["string", "string", "string"],
+ metadata={},
+ name="string",
+ )
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.beta.vector_stores.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ vector_store = response.parse()
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.beta.vector_stores.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ vector_store = await response.parse()
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+ vector_store = await async_client.beta.vector_stores.retrieve(
+ "string",
+ )
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.beta.vector_stores.with_raw_response.retrieve(
+ "string",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ vector_store = response.parse()
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.beta.vector_stores.with_streaming_response.retrieve(
+ "string",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ vector_store = await response.parse()
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ await async_client.beta.vector_stores.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ async def test_method_update(self, async_client: AsyncOpenAI) -> None:
+ vector_store = await async_client.beta.vector_stores.update(
+ "string",
+ )
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ vector_store = await async_client.beta.vector_stores.update(
+ "string",
+ expires_after={
+ "anchor": "last_active_at",
+ "days": 1,
+ },
+ metadata={},
+ name="string",
+ )
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.beta.vector_stores.with_raw_response.update(
+ "string",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ vector_store = response.parse()
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.beta.vector_stores.with_streaming_response.update(
+ "string",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ vector_store = await response.parse()
+ assert_matches_type(VectorStore, vector_store, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ await async_client.beta.vector_stores.with_raw_response.update(
+ "",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ vector_store = await async_client.beta.vector_stores.list()
+ assert_matches_type(AsyncCursorPage[VectorStore], vector_store, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ vector_store = await async_client.beta.vector_stores.list(
+ after="string",
+ before="string",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(AsyncCursorPage[VectorStore], vector_store, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.beta.vector_stores.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ vector_store = response.parse()
+ assert_matches_type(AsyncCursorPage[VectorStore], vector_store, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.beta.vector_stores.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ vector_store = await response.parse()
+ assert_matches_type(AsyncCursorPage[VectorStore], vector_store, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ vector_store = await async_client.beta.vector_stores.delete(
+ "string",
+ )
+ assert_matches_type(VectorStoreDeleted, vector_store, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.beta.vector_stores.with_raw_response.delete(
+ "string",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ vector_store = response.parse()
+ assert_matches_type(VectorStoreDeleted, vector_store, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.beta.vector_stores.with_streaming_response.delete(
+ "string",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ vector_store = await response.parse()
+ assert_matches_type(VectorStoreDeleted, vector_store, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ await async_client.beta.vector_stores.with_raw_response.delete(
+ "",
+ )
diff --git a/tests/api_resources/beta/threads/messages/__init__.py b/tests/api_resources/beta/threads/messages/__init__.py
deleted file mode 100644
index fd8019a9a1..0000000000
--- a/tests/api_resources/beta/threads/messages/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/beta/threads/messages/test_files.py b/tests/api_resources/beta/threads/messages/test_files.py
deleted file mode 100644
index af4eea9377..0000000000
--- a/tests/api_resources/beta/threads/messages/test_files.py
+++ /dev/null
@@ -1,263 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from openai import OpenAI, AsyncOpenAI
-from tests.utils import assert_matches_type
-from openai.pagination import SyncCursorPage, AsyncCursorPage
-from openai.types.beta.threads.messages import MessageFile
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestFiles:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @parametrize
- def test_method_retrieve(self, client: OpenAI) -> None:
- file = client.beta.threads.messages.files.retrieve(
- "file-abc123",
- thread_id="thread_abc123",
- message_id="msg_abc123",
- )
- assert_matches_type(MessageFile, file, path=["response"])
-
- @parametrize
- def test_raw_response_retrieve(self, client: OpenAI) -> None:
- response = client.beta.threads.messages.files.with_raw_response.retrieve(
- "file-abc123",
- thread_id="thread_abc123",
- message_id="msg_abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(MessageFile, file, path=["response"])
-
- @parametrize
- def test_streaming_response_retrieve(self, client: OpenAI) -> None:
- with client.beta.threads.messages.files.with_streaming_response.retrieve(
- "file-abc123",
- thread_id="thread_abc123",
- message_id="msg_abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(MessageFile, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_retrieve(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.beta.threads.messages.files.with_raw_response.retrieve(
- "file-abc123",
- thread_id="",
- message_id="msg_abc123",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- client.beta.threads.messages.files.with_raw_response.retrieve(
- "file-abc123",
- thread_id="thread_abc123",
- message_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- client.beta.threads.messages.files.with_raw_response.retrieve(
- "",
- thread_id="thread_abc123",
- message_id="msg_abc123",
- )
-
- @parametrize
- def test_method_list(self, client: OpenAI) -> None:
- file = client.beta.threads.messages.files.list(
- "string",
- thread_id="string",
- )
- assert_matches_type(SyncCursorPage[MessageFile], file, path=["response"])
-
- @parametrize
- def test_method_list_with_all_params(self, client: OpenAI) -> None:
- file = client.beta.threads.messages.files.list(
- "string",
- thread_id="string",
- after="string",
- before="string",
- limit=0,
- order="asc",
- )
- assert_matches_type(SyncCursorPage[MessageFile], file, path=["response"])
-
- @parametrize
- def test_raw_response_list(self, client: OpenAI) -> None:
- response = client.beta.threads.messages.files.with_raw_response.list(
- "string",
- thread_id="string",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(SyncCursorPage[MessageFile], file, path=["response"])
-
- @parametrize
- def test_streaming_response_list(self, client: OpenAI) -> None:
- with client.beta.threads.messages.files.with_streaming_response.list(
- "string",
- thread_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(SyncCursorPage[MessageFile], file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_list(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.beta.threads.messages.files.with_raw_response.list(
- "string",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- client.beta.threads.messages.files.with_raw_response.list(
- "",
- thread_id="string",
- )
-
-
-class TestAsyncFiles:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
- file = await async_client.beta.threads.messages.files.retrieve(
- "file-abc123",
- thread_id="thread_abc123",
- message_id="msg_abc123",
- )
- assert_matches_type(MessageFile, file, path=["response"])
-
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.messages.files.with_raw_response.retrieve(
- "file-abc123",
- thread_id="thread_abc123",
- message_id="msg_abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(MessageFile, file, path=["response"])
-
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.messages.files.with_streaming_response.retrieve(
- "file-abc123",
- thread_id="thread_abc123",
- message_id="msg_abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(MessageFile, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.beta.threads.messages.files.with_raw_response.retrieve(
- "file-abc123",
- thread_id="",
- message_id="msg_abc123",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- await async_client.beta.threads.messages.files.with_raw_response.retrieve(
- "file-abc123",
- thread_id="thread_abc123",
- message_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- await async_client.beta.threads.messages.files.with_raw_response.retrieve(
- "",
- thread_id="thread_abc123",
- message_id="msg_abc123",
- )
-
- @parametrize
- async def test_method_list(self, async_client: AsyncOpenAI) -> None:
- file = await async_client.beta.threads.messages.files.list(
- "string",
- thread_id="string",
- )
- assert_matches_type(AsyncCursorPage[MessageFile], file, path=["response"])
-
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
- file = await async_client.beta.threads.messages.files.list(
- "string",
- thread_id="string",
- after="string",
- before="string",
- limit=0,
- order="asc",
- )
- assert_matches_type(AsyncCursorPage[MessageFile], file, path=["response"])
-
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.threads.messages.files.with_raw_response.list(
- "string",
- thread_id="string",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(AsyncCursorPage[MessageFile], file, path=["response"])
-
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.threads.messages.files.with_streaming_response.list(
- "string",
- thread_id="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(AsyncCursorPage[MessageFile], file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.beta.threads.messages.files.with_raw_response.list(
- "string",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- await async_client.beta.threads.messages.files.with_raw_response.list(
- "",
- thread_id="string",
- )
diff --git a/tests/api_resources/beta/threads/test_messages.py b/tests/api_resources/beta/threads/test_messages.py
index 22198ccbc5..5ea5ac3bd5 100644
--- a/tests/api_resources/beta/threads/test_messages.py
+++ b/tests/api_resources/beta/threads/test_messages.py
@@ -33,7 +33,20 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
"string",
content="x",
role="user",
- file_ids=["string"],
+ attachments=[
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
metadata={},
)
assert_matches_type(Message, message, path=["response"])
@@ -249,7 +262,20 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
"string",
content="x",
role="user",
- file_ids=["string"],
+ attachments=[
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
metadata={},
)
assert_matches_type(Message, message, path=["response"])
diff --git a/tests/api_resources/beta/threads/test_runs.py b/tests/api_resources/beta/threads/test_runs.py
index cf5b2998b9..3d8a6ce058 100644
--- a/tests/api_resources/beta/threads/test_runs.py
+++ b/tests/api_resources/beta/threads/test_runs.py
@@ -40,19 +40,58 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
],
@@ -66,6 +105,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
temperature=1,
tool_choice="none",
tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}],
+ top_p=1,
truncation_strategy={
"type": "auto",
"last_messages": 1,
@@ -127,19 +167,58 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
],
@@ -152,6 +231,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
temperature=1,
tool_choice="none",
tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}],
+ top_p=1,
truncation_strategy={
"type": "auto",
"last_messages": 1,
@@ -552,19 +632,58 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
],
@@ -578,6 +697,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
temperature=1,
tool_choice="none",
tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}],
+ top_p=1,
truncation_strategy={
"type": "auto",
"last_messages": 1,
@@ -639,19 +759,58 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
{
"role": "user",
"content": "x",
- "file_ids": ["string"],
+ "attachments": [
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ {
+ "file_id": "string",
+ "add_to": ["file_search", "code_interpreter"],
+ },
+ ],
"metadata": {},
},
],
@@ -664,6 +823,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
temperature=1,
tool_choice="none",
tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}],
+ top_p=1,
truncation_strategy={
"type": "auto",
"last_messages": 1,
diff --git a/tests/api_resources/beta/assistants/__init__.py b/tests/api_resources/beta/vector_stores/__init__.py
similarity index 100%
rename from tests/api_resources/beta/assistants/__init__.py
rename to tests/api_resources/beta/vector_stores/__init__.py
diff --git a/tests/api_resources/beta/vector_stores/test_file_batches.py b/tests/api_resources/beta/vector_stores/test_file_batches.py
new file mode 100644
index 0000000000..9854d1a138
--- /dev/null
+++ b/tests/api_resources/beta/vector_stores/test_file_batches.py
@@ -0,0 +1,424 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncCursorPage, AsyncCursorPage
+from openai.types.beta.vector_stores import (
+ VectorStoreFile,
+ VectorStoreFileBatch,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestFileBatches:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ file_batch = client.beta.vector_stores.file_batches.create(
+ "vs_abc123",
+ file_ids=["string"],
+ )
+ assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.beta.vector_stores.file_batches.with_raw_response.create(
+ "vs_abc123",
+ file_ids=["string"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file_batch = response.parse()
+ assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.beta.vector_stores.file_batches.with_streaming_response.create(
+ "vs_abc123",
+ file_ids=["string"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file_batch = response.parse()
+ assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_create(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ client.beta.vector_stores.file_batches.with_raw_response.create(
+ "",
+ file_ids=["string"],
+ )
+
+ @parametrize
+ def test_method_retrieve(self, client: OpenAI) -> None:
+ file_batch = client.beta.vector_stores.file_batches.retrieve(
+ "vsfb_abc123",
+ vector_store_id="vs_abc123",
+ )
+ assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: OpenAI) -> None:
+ response = client.beta.vector_stores.file_batches.with_raw_response.retrieve(
+ "vsfb_abc123",
+ vector_store_id="vs_abc123",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file_batch = response.parse()
+ assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+ with client.beta.vector_stores.file_batches.with_streaming_response.retrieve(
+ "vsfb_abc123",
+ vector_store_id="vs_abc123",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file_batch = response.parse()
+ assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ client.beta.vector_stores.file_batches.with_raw_response.retrieve(
+ "vsfb_abc123",
+ vector_store_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
+ client.beta.vector_stores.file_batches.with_raw_response.retrieve(
+ "",
+ vector_store_id="vs_abc123",
+ )
+
+ @parametrize
+ def test_method_cancel(self, client: OpenAI) -> None:
+ file_batch = client.beta.vector_stores.file_batches.cancel(
+ "string",
+ vector_store_id="string",
+ )
+ assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
+
+ @parametrize
+ def test_raw_response_cancel(self, client: OpenAI) -> None:
+ response = client.beta.vector_stores.file_batches.with_raw_response.cancel(
+ "string",
+ vector_store_id="string",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file_batch = response.parse()
+ assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
+
+ @parametrize
+ def test_streaming_response_cancel(self, client: OpenAI) -> None:
+ with client.beta.vector_stores.file_batches.with_streaming_response.cancel(
+ "string",
+ vector_store_id="string",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file_batch = response.parse()
+ assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_cancel(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ client.beta.vector_stores.file_batches.with_raw_response.cancel(
+ "string",
+ vector_store_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
+ client.beta.vector_stores.file_batches.with_raw_response.cancel(
+ "",
+ vector_store_id="string",
+ )
+
+ @parametrize
+ def test_method_list_files(self, client: OpenAI) -> None:
+ file_batch = client.beta.vector_stores.file_batches.list_files(
+ "string",
+ vector_store_id="string",
+ )
+ assert_matches_type(SyncCursorPage[VectorStoreFile], file_batch, path=["response"])
+
+ @parametrize
+ def test_method_list_files_with_all_params(self, client: OpenAI) -> None:
+ file_batch = client.beta.vector_stores.file_batches.list_files(
+ "string",
+ vector_store_id="string",
+ after="string",
+ before="string",
+ filter="in_progress",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(SyncCursorPage[VectorStoreFile], file_batch, path=["response"])
+
+ @parametrize
+ def test_raw_response_list_files(self, client: OpenAI) -> None:
+ response = client.beta.vector_stores.file_batches.with_raw_response.list_files(
+ "string",
+ vector_store_id="string",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file_batch = response.parse()
+ assert_matches_type(SyncCursorPage[VectorStoreFile], file_batch, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list_files(self, client: OpenAI) -> None:
+ with client.beta.vector_stores.file_batches.with_streaming_response.list_files(
+ "string",
+ vector_store_id="string",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file_batch = response.parse()
+ assert_matches_type(SyncCursorPage[VectorStoreFile], file_batch, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_list_files(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ client.beta.vector_stores.file_batches.with_raw_response.list_files(
+ "string",
+ vector_store_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
+ client.beta.vector_stores.file_batches.with_raw_response.list_files(
+ "",
+ vector_store_id="string",
+ )
+
+
+class TestAsyncFileBatches:
+ parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ file_batch = await async_client.beta.vector_stores.file_batches.create(
+ "vs_abc123",
+ file_ids=["string"],
+ )
+ assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.beta.vector_stores.file_batches.with_raw_response.create(
+ "vs_abc123",
+ file_ids=["string"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file_batch = response.parse()
+ assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.beta.vector_stores.file_batches.with_streaming_response.create(
+ "vs_abc123",
+ file_ids=["string"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file_batch = await response.parse()
+ assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ await async_client.beta.vector_stores.file_batches.with_raw_response.create(
+ "",
+ file_ids=["string"],
+ )
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+ file_batch = await async_client.beta.vector_stores.file_batches.retrieve(
+ "vsfb_abc123",
+ vector_store_id="vs_abc123",
+ )
+ assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.beta.vector_stores.file_batches.with_raw_response.retrieve(
+ "vsfb_abc123",
+ vector_store_id="vs_abc123",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file_batch = response.parse()
+ assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.beta.vector_stores.file_batches.with_streaming_response.retrieve(
+ "vsfb_abc123",
+ vector_store_id="vs_abc123",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file_batch = await response.parse()
+ assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ await async_client.beta.vector_stores.file_batches.with_raw_response.retrieve(
+ "vsfb_abc123",
+ vector_store_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
+ await async_client.beta.vector_stores.file_batches.with_raw_response.retrieve(
+ "",
+ vector_store_id="vs_abc123",
+ )
+
+ @parametrize
+ async def test_method_cancel(self, async_client: AsyncOpenAI) -> None:
+ file_batch = await async_client.beta.vector_stores.file_batches.cancel(
+ "string",
+ vector_store_id="string",
+ )
+ assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
+
+ @parametrize
+ async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.beta.vector_stores.file_batches.with_raw_response.cancel(
+ "string",
+ vector_store_id="string",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file_batch = response.parse()
+ assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.beta.vector_stores.file_batches.with_streaming_response.cancel(
+ "string",
+ vector_store_id="string",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file_batch = await response.parse()
+ assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ await async_client.beta.vector_stores.file_batches.with_raw_response.cancel(
+ "string",
+ vector_store_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
+ await async_client.beta.vector_stores.file_batches.with_raw_response.cancel(
+ "",
+ vector_store_id="string",
+ )
+
+ @parametrize
+ async def test_method_list_files(self, async_client: AsyncOpenAI) -> None:
+ file_batch = await async_client.beta.vector_stores.file_batches.list_files(
+ "string",
+ vector_store_id="string",
+ )
+ assert_matches_type(AsyncCursorPage[VectorStoreFile], file_batch, path=["response"])
+
+ @parametrize
+ async def test_method_list_files_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ file_batch = await async_client.beta.vector_stores.file_batches.list_files(
+ "string",
+ vector_store_id="string",
+ after="string",
+ before="string",
+ filter="in_progress",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(AsyncCursorPage[VectorStoreFile], file_batch, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list_files(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.beta.vector_stores.file_batches.with_raw_response.list_files(
+ "string",
+ vector_store_id="string",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file_batch = response.parse()
+ assert_matches_type(AsyncCursorPage[VectorStoreFile], file_batch, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list_files(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.beta.vector_stores.file_batches.with_streaming_response.list_files(
+ "string",
+ vector_store_id="string",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file_batch = await response.parse()
+ assert_matches_type(AsyncCursorPage[VectorStoreFile], file_batch, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_list_files(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ await async_client.beta.vector_stores.file_batches.with_raw_response.list_files(
+ "string",
+ vector_store_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
+ await async_client.beta.vector_stores.file_batches.with_raw_response.list_files(
+ "",
+ vector_store_id="string",
+ )
diff --git a/tests/api_resources/beta/assistants/test_files.py b/tests/api_resources/beta/vector_stores/test_files.py
similarity index 59%
rename from tests/api_resources/beta/assistants/test_files.py
rename to tests/api_resources/beta/vector_stores/test_files.py
index 50106234aa..58301e2d37 100644
--- a/tests/api_resources/beta/assistants/test_files.py
+++ b/tests/api_resources/beta/vector_stores/test_files.py
@@ -10,7 +10,10 @@
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
from openai.pagination import SyncCursorPage, AsyncCursorPage
-from openai.types.beta.assistants import AssistantFile, FileDeleteResponse
+from openai.types.beta.vector_stores import (
+ VectorStoreFile,
+ VectorStoreFileDeleted,
+)
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -20,189 +23,190 @@ class TestFiles:
@parametrize
def test_method_create(self, client: OpenAI) -> None:
- file = client.beta.assistants.files.create(
- "file-abc123",
+ file = client.beta.vector_stores.files.create(
+ "vs_abc123",
file_id="string",
)
- assert_matches_type(AssistantFile, file, path=["response"])
+ assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
- response = client.beta.assistants.files.with_raw_response.create(
- "file-abc123",
+ response = client.beta.vector_stores.files.with_raw_response.create(
+ "vs_abc123",
file_id="string",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
- assert_matches_type(AssistantFile, file, path=["response"])
+ assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
- with client.beta.assistants.files.with_streaming_response.create(
- "file-abc123",
+ with client.beta.vector_stores.files.with_streaming_response.create(
+ "vs_abc123",
file_id="string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
- assert_matches_type(AssistantFile, file, path=["response"])
+ assert_matches_type(VectorStoreFile, file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_create(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
- client.beta.assistants.files.with_raw_response.create(
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ client.beta.vector_stores.files.with_raw_response.create(
"",
file_id="string",
)
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
- file = client.beta.assistants.files.retrieve(
- "string",
- assistant_id="string",
+ file = client.beta.vector_stores.files.retrieve(
+ "file-abc123",
+ vector_store_id="vs_abc123",
)
- assert_matches_type(AssistantFile, file, path=["response"])
+ assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
- response = client.beta.assistants.files.with_raw_response.retrieve(
- "string",
- assistant_id="string",
+ response = client.beta.vector_stores.files.with_raw_response.retrieve(
+ "file-abc123",
+ vector_store_id="vs_abc123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
- assert_matches_type(AssistantFile, file, path=["response"])
+ assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
- with client.beta.assistants.files.with_streaming_response.retrieve(
- "string",
- assistant_id="string",
+ with client.beta.vector_stores.files.with_streaming_response.retrieve(
+ "file-abc123",
+ vector_store_id="vs_abc123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
- assert_matches_type(AssistantFile, file, path=["response"])
+ assert_matches_type(VectorStoreFile, file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
- client.beta.assistants.files.with_raw_response.retrieve(
- "string",
- assistant_id="",
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ client.beta.vector_stores.files.with_raw_response.retrieve(
+ "file-abc123",
+ vector_store_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- client.beta.assistants.files.with_raw_response.retrieve(
+ client.beta.vector_stores.files.with_raw_response.retrieve(
"",
- assistant_id="string",
+ vector_store_id="vs_abc123",
)
@parametrize
def test_method_list(self, client: OpenAI) -> None:
- file = client.beta.assistants.files.list(
+ file = client.beta.vector_stores.files.list(
"string",
)
- assert_matches_type(SyncCursorPage[AssistantFile], file, path=["response"])
+ assert_matches_type(SyncCursorPage[VectorStoreFile], file, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
- file = client.beta.assistants.files.list(
+ file = client.beta.vector_stores.files.list(
"string",
after="string",
before="string",
+ filter="in_progress",
limit=0,
order="asc",
)
- assert_matches_type(SyncCursorPage[AssistantFile], file, path=["response"])
+ assert_matches_type(SyncCursorPage[VectorStoreFile], file, path=["response"])
@parametrize
def test_raw_response_list(self, client: OpenAI) -> None:
- response = client.beta.assistants.files.with_raw_response.list(
+ response = client.beta.vector_stores.files.with_raw_response.list(
"string",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
- assert_matches_type(SyncCursorPage[AssistantFile], file, path=["response"])
+ assert_matches_type(SyncCursorPage[VectorStoreFile], file, path=["response"])
@parametrize
def test_streaming_response_list(self, client: OpenAI) -> None:
- with client.beta.assistants.files.with_streaming_response.list(
+ with client.beta.vector_stores.files.with_streaming_response.list(
"string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
- assert_matches_type(SyncCursorPage[AssistantFile], file, path=["response"])
+ assert_matches_type(SyncCursorPage[VectorStoreFile], file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_list(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
- client.beta.assistants.files.with_raw_response.list(
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ client.beta.vector_stores.files.with_raw_response.list(
"",
)
@parametrize
def test_method_delete(self, client: OpenAI) -> None:
- file = client.beta.assistants.files.delete(
+ file = client.beta.vector_stores.files.delete(
"string",
- assistant_id="string",
+ vector_store_id="string",
)
- assert_matches_type(FileDeleteResponse, file, path=["response"])
+ assert_matches_type(VectorStoreFileDeleted, file, path=["response"])
@parametrize
def test_raw_response_delete(self, client: OpenAI) -> None:
- response = client.beta.assistants.files.with_raw_response.delete(
+ response = client.beta.vector_stores.files.with_raw_response.delete(
"string",
- assistant_id="string",
+ vector_store_id="string",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
- assert_matches_type(FileDeleteResponse, file, path=["response"])
+ assert_matches_type(VectorStoreFileDeleted, file, path=["response"])
@parametrize
def test_streaming_response_delete(self, client: OpenAI) -> None:
- with client.beta.assistants.files.with_streaming_response.delete(
+ with client.beta.vector_stores.files.with_streaming_response.delete(
"string",
- assistant_id="string",
+ vector_store_id="string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
- assert_matches_type(FileDeleteResponse, file, path=["response"])
+ assert_matches_type(VectorStoreFileDeleted, file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_delete(self, client: OpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
- client.beta.assistants.files.with_raw_response.delete(
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ client.beta.vector_stores.files.with_raw_response.delete(
"string",
- assistant_id="",
+ vector_store_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- client.beta.assistants.files.with_raw_response.delete(
+ client.beta.vector_stores.files.with_raw_response.delete(
"",
- assistant_id="string",
+ vector_store_id="string",
)
@@ -211,187 +215,188 @@ class TestAsyncFiles:
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
- file = await async_client.beta.assistants.files.create(
- "file-abc123",
+ file = await async_client.beta.vector_stores.files.create(
+ "vs_abc123",
file_id="string",
)
- assert_matches_type(AssistantFile, file, path=["response"])
+ assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.assistants.files.with_raw_response.create(
- "file-abc123",
+ response = await async_client.beta.vector_stores.files.with_raw_response.create(
+ "vs_abc123",
file_id="string",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
- assert_matches_type(AssistantFile, file, path=["response"])
+ assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.assistants.files.with_streaming_response.create(
- "file-abc123",
+ async with async_client.beta.vector_stores.files.with_streaming_response.create(
+ "vs_abc123",
file_id="string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = await response.parse()
- assert_matches_type(AssistantFile, file, path=["response"])
+ assert_matches_type(VectorStoreFile, file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
- await async_client.beta.assistants.files.with_raw_response.create(
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ await async_client.beta.vector_stores.files.with_raw_response.create(
"",
file_id="string",
)
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
- file = await async_client.beta.assistants.files.retrieve(
- "string",
- assistant_id="string",
+ file = await async_client.beta.vector_stores.files.retrieve(
+ "file-abc123",
+ vector_store_id="vs_abc123",
)
- assert_matches_type(AssistantFile, file, path=["response"])
+ assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.assistants.files.with_raw_response.retrieve(
- "string",
- assistant_id="string",
+ response = await async_client.beta.vector_stores.files.with_raw_response.retrieve(
+ "file-abc123",
+ vector_store_id="vs_abc123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
- assert_matches_type(AssistantFile, file, path=["response"])
+ assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.assistants.files.with_streaming_response.retrieve(
- "string",
- assistant_id="string",
+ async with async_client.beta.vector_stores.files.with_streaming_response.retrieve(
+ "file-abc123",
+ vector_store_id="vs_abc123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = await response.parse()
- assert_matches_type(AssistantFile, file, path=["response"])
+ assert_matches_type(VectorStoreFile, file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
- await async_client.beta.assistants.files.with_raw_response.retrieve(
- "string",
- assistant_id="",
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ await async_client.beta.vector_stores.files.with_raw_response.retrieve(
+ "file-abc123",
+ vector_store_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- await async_client.beta.assistants.files.with_raw_response.retrieve(
+ await async_client.beta.vector_stores.files.with_raw_response.retrieve(
"",
- assistant_id="string",
+ vector_store_id="vs_abc123",
)
@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
- file = await async_client.beta.assistants.files.list(
+ file = await async_client.beta.vector_stores.files.list(
"string",
)
- assert_matches_type(AsyncCursorPage[AssistantFile], file, path=["response"])
+ assert_matches_type(AsyncCursorPage[VectorStoreFile], file, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
- file = await async_client.beta.assistants.files.list(
+ file = await async_client.beta.vector_stores.files.list(
"string",
after="string",
before="string",
+ filter="in_progress",
limit=0,
order="asc",
)
- assert_matches_type(AsyncCursorPage[AssistantFile], file, path=["response"])
+ assert_matches_type(AsyncCursorPage[VectorStoreFile], file, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.assistants.files.with_raw_response.list(
+ response = await async_client.beta.vector_stores.files.with_raw_response.list(
"string",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
- assert_matches_type(AsyncCursorPage[AssistantFile], file, path=["response"])
+ assert_matches_type(AsyncCursorPage[VectorStoreFile], file, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.assistants.files.with_streaming_response.list(
+ async with async_client.beta.vector_stores.files.with_streaming_response.list(
"string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = await response.parse()
- assert_matches_type(AsyncCursorPage[AssistantFile], file, path=["response"])
+ assert_matches_type(AsyncCursorPage[VectorStoreFile], file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
- await async_client.beta.assistants.files.with_raw_response.list(
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ await async_client.beta.vector_stores.files.with_raw_response.list(
"",
)
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
- file = await async_client.beta.assistants.files.delete(
+ file = await async_client.beta.vector_stores.files.delete(
"string",
- assistant_id="string",
+ vector_store_id="string",
)
- assert_matches_type(FileDeleteResponse, file, path=["response"])
+ assert_matches_type(VectorStoreFileDeleted, file, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.beta.assistants.files.with_raw_response.delete(
+ response = await async_client.beta.vector_stores.files.with_raw_response.delete(
"string",
- assistant_id="string",
+ vector_store_id="string",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
- assert_matches_type(FileDeleteResponse, file, path=["response"])
+ assert_matches_type(VectorStoreFileDeleted, file, path=["response"])
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
- async with async_client.beta.assistants.files.with_streaming_response.delete(
+ async with async_client.beta.vector_stores.files.with_streaming_response.delete(
"string",
- assistant_id="string",
+ vector_store_id="string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = await response.parse()
- assert_matches_type(FileDeleteResponse, file, path=["response"])
+ assert_matches_type(VectorStoreFileDeleted, file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
- await async_client.beta.assistants.files.with_raw_response.delete(
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
+ await async_client.beta.vector_stores.files.with_raw_response.delete(
"string",
- assistant_id="",
+ vector_store_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- await async_client.beta.assistants.files.with_raw_response.delete(
+ await async_client.beta.vector_stores.files.with_raw_response.delete(
"",
- assistant_id="string",
+ vector_store_id="string",
)