From 7fe34f2d0bda9c1cb116a593f02bd0cc15a52e12 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 15 Apr 2024 16:09:44 -0400 Subject: [PATCH] feat(api): add batch API (#768) https://platform.openai.com/docs/api-reference/batch/create --- .stats.yml | 2 +- api.md | 14 ++ src/index.ts | 7 + src/resources/batches.ts | 225 ++++++++++++++++++++++++++++ src/resources/index.ts | 1 + tests/api-resources/batches.test.ts | 71 +++++++++ 6 files changed, 319 insertions(+), 1 deletion(-) create mode 100644 src/resources/batches.ts create mode 100644 tests/api-resources/batches.test.ts diff --git a/.stats.yml b/.stats.yml index 284caebf4..47c2bce1c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1 +1 @@ -configured_endpoints: 52 +configured_endpoints: 55 diff --git a/api.md b/api.md index c6a2bf273..02030dc07 100644 --- a/api.md +++ b/api.md @@ -337,3 +337,17 @@ Methods: - client.beta.threads.messages.files.retrieve(threadId, messageId, fileId) -> MessageFile - client.beta.threads.messages.files.list(threadId, messageId, { ...params }) -> MessageFilesPage + +# Batches + +Types: + +- Batch +- BatchError +- BatchRequestCounts + +Methods: + +- client.batches.create({ ...params }) -> Batch +- client.batches.retrieve(batchId) -> Batch +- client.batches.cancel(batchId) -> Batch diff --git a/src/index.ts b/src/index.ts index 9a2b2eaad..84fdd3979 100644 --- a/src/index.ts +++ b/src/index.ts @@ -150,6 +150,7 @@ export class OpenAI extends Core.APIClient { models: API.Models = new API.Models(this); fineTuning: API.FineTuning = new API.FineTuning(this); beta: API.Beta = new API.Beta(this); + batches: API.Batches = new API.Batches(this); protected override defaultQuery(): Core.DefaultQuery | undefined { return this._options.defaultQuery; @@ -285,6 +286,12 @@ export namespace OpenAI { export import Beta = API.Beta; + export import Batches = API.Batches; + export import Batch = API.Batch; + export import BatchError = API.BatchError; + export import BatchRequestCounts = API.BatchRequestCounts; + export import BatchCreateParams = API.BatchCreateParams; + export import ErrorObject = API.ErrorObject; export import FunctionDefinition = API.FunctionDefinition; export import FunctionParameters = API.FunctionParameters; diff --git a/src/resources/batches.ts b/src/resources/batches.ts new file mode 100644 index 000000000..75b491a16 --- /dev/null +++ b/src/resources/batches.ts @@ -0,0 +1,225 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import * as Core from 'openai/core'; +import { APIResource } from 'openai/resource'; +import * as BatchesAPI from 'openai/resources/batches'; + +export class Batches extends APIResource { + /** + * Creates and executes a batch from an uploaded file of requests + */ + create(body: BatchCreateParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/batches', { body, ...options }); + } + + /** + * Retrieves a batch. + */ + retrieve(batchId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/batches/${batchId}`, options); + } + + /** + * Cancels an in-progress batch. + */ + cancel(batchId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post(`/batches/${batchId}/cancel`, options); + } +} + +export interface Batch { + id: string; + + /** + * The time frame within which the batch should be processed. + */ + completion_window: string; + + /** + * The Unix timestamp (in seconds) for when the batch was created. + */ + created_at: string; + + /** + * The OpenAI API endpoint used by the batch. + */ + endpoint: string; + + /** + * The ID of the input file for the batch. + */ + input_file_id: string; + + /** + * The object type, which is always `batch`. + */ + object: 'batch'; + + /** + * The current status of the batch. + */ + status: + | 'validating' + | 'failed' + | 'in_progress' + | 'finalizing' + | 'completed' + | 'expired' + | 'cancelling' + | 'cancelled'; + + /** + * The Unix timestamp (in seconds) for when the batch was cancelled. + */ + cancelled_at?: string; + + /** + * The Unix timestamp (in seconds) for when the batch started cancelling. + */ + cancelling_at?: string; + + /** + * The Unix timestamp (in seconds) for when the batch was completed. + */ + completed_at?: string; + + /** + * The ID of the file containing the outputs of requests with errors. + */ + error_file_id?: string; + + errors?: Batch.Errors; + + /** + * The Unix timestamp (in seconds) for when the batch expired. + */ + expired_at?: string; + + /** + * The Unix timestamp (in seconds) for when the batch will expire. + */ + expires_at?: string; + + /** + * The Unix timestamp (in seconds) for when the batch failed. + */ + failed_at?: string; + + /** + * The Unix timestamp (in seconds) for when the batch started finalizing. + */ + finalizing_at?: string; + + /** + * The Unix timestamp (in seconds) for when the batch started processing. + */ + in_progress_at?: string; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys + * can be a maximum of 64 characters long and values can be a maxium of 512 + * characters long. + */ + metadata?: unknown | null; + + /** + * The ID of the file containing the outputs of successfully executed requests. + */ + output_file_id?: string; + + /** + * The request counts for different statuses within the batch. + */ + request_counts?: BatchRequestCounts; +} + +export namespace Batch { + export interface Errors { + data?: Array; + + /** + * The object type, which is always `list`. + */ + object?: string; + } +} + +export interface BatchError { + /** + * An error code identifying the error type. + */ + code?: string; + + /** + * The line number of the input file where the error occurred, if applicable. + */ + line?: number | null; + + /** + * A human-readable message providing more details about the error. + */ + message?: string; + + /** + * The name of the parameter that caused the error, if applicable. + */ + param?: string | null; +} + +/** + * The request counts for different statuses within the batch. + */ +export interface BatchRequestCounts { + /** + * Number of requests that have been completed successfully. + */ + completed: number; + + /** + * Number of requests that have failed. + */ + failed: number; + + /** + * Total number of requests in the batch. + */ + total: number; +} + +export interface BatchCreateParams { + /** + * The time frame within which the batch should be processed. Currently only `24h` + * is supported. + */ + completion_window: '24h'; + + /** + * The endpoint to be used for all requests in the batch. Currently only + * `/v1/chat/completions` is supported. + */ + endpoint: '/v1/chat/completions'; + + /** + * The ID of an uploaded file that contains requests for the new batch. + * + * See [upload file](https://platform.openai.com/docs/api-reference/files/create) + * for how to upload a file. + * + * Your input file must be formatted as a JSONL file, and must be uploaded with the + * purpose `batch`. + */ + input_file_id: string; + + /** + * Optional custom metadata for the batch. + */ + metadata?: Record | null; +} + +export namespace Batches { + export import Batch = BatchesAPI.Batch; + export import BatchError = BatchesAPI.BatchError; + export import BatchRequestCounts = BatchesAPI.BatchRequestCounts; + export import BatchCreateParams = BatchesAPI.BatchCreateParams; +} diff --git a/src/resources/index.ts b/src/resources/index.ts index a9741f5fd..282e57ea1 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -3,6 +3,7 @@ export * from './chat/index'; export * from './shared'; export { Audio } from './audio/audio'; +export { Batch, BatchError, BatchRequestCounts, BatchCreateParams, Batches } from './batches'; export { Beta } from './beta/beta'; export { Completion, diff --git a/tests/api-resources/batches.test.ts b/tests/api-resources/batches.test.ts new file mode 100644 index 000000000..e4a9015d1 --- /dev/null +++ b/tests/api-resources/batches.test.ts @@ -0,0 +1,71 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const openai = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', +}); + +describe('resource batches', () => { + test('create: only required params', async () => { + const responsePromise = openai.batches.create({ + completion_window: '24h', + endpoint: '/v1/chat/completions', + input_file_id: 'string', + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await openai.batches.create({ + completion_window: '24h', + endpoint: '/v1/chat/completions', + input_file_id: 'string', + metadata: { foo: 'string' }, + }); + }); + + test('retrieve', async () => { + const responsePromise = openai.batches.retrieve('string'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(openai.batches.retrieve('string', { path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); + }); + + test('cancel', async () => { + const responsePromise = openai.batches.cancel('string'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('cancel: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(openai.batches.cancel('string', { path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); + }); +});