diff --git a/clients/algoliasearch-client-csharp/algoliasearch/Utils/SearchClientExtensions.cs b/clients/algoliasearch-client-csharp/algoliasearch/Utils/SearchClientExtensions.cs index 04854c3db9..038c7d60a5 100644 --- a/clients/algoliasearch-client-csharp/algoliasearch/Utils/SearchClientExtensions.cs +++ b/clients/algoliasearch-client-csharp/algoliasearch/Utils/SearchClientExtensions.cs @@ -167,12 +167,13 @@ public partial interface ISearchClient /// The index in which to perform the request. /// The list of `objects` to store in the given Algolia `indexName`. /// Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable.. + /// The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000. /// Add extra http header or query parameters to Algolia. /// Cancellation Token to cancel the request. /// - Task> SaveObjectsAsync(string indexName, IEnumerable objects, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class; + Task> SaveObjectsAsync(string indexName, IEnumerable objects, bool waitForTasks = false, int batchSize = 1000, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class; /// - List SaveObjects(string indexName, IEnumerable objects, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class; + List SaveObjects(string indexName, IEnumerable objects, bool waitForTasks = false, int batchSize = 1000, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class; /// /// Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it. @@ -180,11 +181,12 @@ public partial interface ISearchClient /// The index in which to perform the request. /// The list of `objectIDs` to remove from the given Algolia `indexName`. /// Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable.. + /// The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000. /// Add extra http header or query parameters to Algolia. /// Cancellation Token to cancel the request. - Task> DeleteObjectsAsync(string indexName, IEnumerable objectIDs, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default); + Task> DeleteObjectsAsync(string indexName, IEnumerable objectIDs, bool waitForTasks = false, int batchSize = 1000, RequestOptions options = null, CancellationToken cancellationToken = default); /// - List DeleteObjects(string indexName, IEnumerable objectIDs, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default); + List DeleteObjects(string indexName, IEnumerable objectIDs, bool waitForTasks = false, int batchSize = 1000, RequestOptions options = null, CancellationToken cancellationToken = default); /// /// Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. @@ -193,11 +195,12 @@ public partial interface ISearchClient /// The list of `objects` to update in the given Algolia `indexName`. /// To be provided if non-existing objects are passed, otherwise, the call will fail. /// Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable.. + /// The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000. /// Add extra http header or query parameters to Algolia. /// Cancellation Token to cancel the request. - Task> PartialUpdateObjectsAsync(string indexName, IEnumerable objects, bool createIfNotExists, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class; + Task> PartialUpdateObjectsAsync(string indexName, IEnumerable objects, bool createIfNotExists, bool waitForTasks = false, int batchSize = 1000, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class; /// - List PartialUpdateObjects(string indexName, IEnumerable objects, bool createIfNotExists, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class; + List PartialUpdateObjects(string indexName, IEnumerable objects, bool createIfNotExists, bool waitForTasks = false, int batchSize = 1000, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class; /// /// Helper: Check if an index exists. @@ -568,43 +571,45 @@ public List ChunkedBatch(string indexName, IEnumerable obje /// public async Task> SaveObjectsAsync(string indexName, IEnumerable objects, bool waitForTasks = false, + int batchSize = 1000, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class { - return await ChunkedBatchAsync(indexName, objects, Action.AddObject, waitForTasks, 1000, options, cancellationToken).ConfigureAwait(false); + return await ChunkedBatchAsync(indexName, objects, Action.AddObject, waitForTasks, batchSize, options, cancellationToken).ConfigureAwait(false); } /// - public List SaveObjects(string indexName, IEnumerable objects, bool waitForTasks = false, RequestOptions options = null, + public List SaveObjects(string indexName, IEnumerable objects, bool waitForTasks = false, int batchSize = 1000, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class => - AsyncHelper.RunSync(() => SaveObjectsAsync(indexName, objects, waitForTasks, options, cancellationToken)); + AsyncHelper.RunSync(() => SaveObjectsAsync(indexName, objects, waitForTasks, batchSize, options, cancellationToken)); /// public async Task> DeleteObjectsAsync(string indexName, IEnumerable objectIDs, bool waitForTasks = false, + int batchSize = 1000, RequestOptions options = null, CancellationToken cancellationToken = default) { - return await ChunkedBatchAsync(indexName, objectIDs.Select(id => new { objectID = id }), Action.DeleteObject, waitForTasks, 1000, options, cancellationToken).ConfigureAwait(false); + return await ChunkedBatchAsync(indexName, objectIDs.Select(id => new { objectID = id }), Action.DeleteObject, waitForTasks, batchSize, options, cancellationToken).ConfigureAwait(false); } /// - public List DeleteObjects(string indexName, IEnumerable objectIDs, bool waitForTasks = false, RequestOptions options = null, + public List DeleteObjects(string indexName, IEnumerable objectIDs, bool waitForTasks = false, int batchSize = 1000, RequestOptions options = null, CancellationToken cancellationToken = default) => - AsyncHelper.RunSync(() => DeleteObjectsAsync(indexName, objectIDs, waitForTasks, options, cancellationToken)); + AsyncHelper.RunSync(() => DeleteObjectsAsync(indexName, objectIDs, waitForTasks, batchSize, options, cancellationToken)); /// - public async Task> PartialUpdateObjectsAsync(string indexName, IEnumerable objects, bool createIfNotExists, bool waitForTasks = false, + public async Task> PartialUpdateObjectsAsync(string indexName, IEnumerable objects, bool createIfNotExists, bool waitForTasks = false, int batchSize = 1000, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class { - return await ChunkedBatchAsync(indexName, objects, createIfNotExists ? Action.PartialUpdateObject : Action.PartialUpdateObjectNoCreate, waitForTasks, 1000, options, cancellationToken).ConfigureAwait(false); + return await ChunkedBatchAsync(indexName, objects, createIfNotExists ? Action.PartialUpdateObject : Action.PartialUpdateObjectNoCreate, waitForTasks, batchSize, options, cancellationToken).ConfigureAwait(false); } /// - public List PartialUpdateObjects(string indexName, IEnumerable objects, bool createIfNotExists, bool waitForTasks = false, + public List PartialUpdateObjects(string indexName, IEnumerable objects, bool createIfNotExists, bool waitForTasks = false, int batchSize = 1000, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class => - AsyncHelper.RunSync(() => PartialUpdateObjectsAsync(indexName, objects, createIfNotExists, waitForTasks, options, cancellationToken)); + AsyncHelper.RunSync(() => PartialUpdateObjectsAsync(indexName, objects, createIfNotExists, waitForTasks, batchSize, options, cancellationToken)); private static async Task> CreateIterable(Func> executeQuery, Func stopCondition) diff --git a/clients/algoliasearch-client-kotlin/client/src/commonMain/kotlin/com/algolia/client/extensions/SearchClient.kt b/clients/algoliasearch-client-kotlin/client/src/commonMain/kotlin/com/algolia/client/extensions/SearchClient.kt index d97875a5be..03fd1ee3ef 100644 --- a/clients/algoliasearch-client-kotlin/client/src/commonMain/kotlin/com/algolia/client/extensions/SearchClient.kt +++ b/clients/algoliasearch-client-kotlin/client/src/commonMain/kotlin/com/algolia/client/extensions/SearchClient.kt @@ -371,6 +371,7 @@ public suspend fun SearchClient.chunkedBatch( * @param indexName The index in which to perform the request. * @param objects The list of objects to index. * @param waitForTask If true, wait for the task to complete. + * @param batchSize The size of the batch. Default is 1000. * @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. * @return The list of responses from the batch requests. * @@ -379,6 +380,7 @@ public suspend fun SearchClient.saveObjects( indexName: String, objects: List, waitForTask: Boolean = false, + batchSize: Int = 1000, requestOptions: RequestOptions? = null, ): List { return this.chunkedBatch( @@ -386,7 +388,7 @@ public suspend fun SearchClient.saveObjects( objects = objects, action = Action.AddObject, waitForTask = waitForTask, - batchSize = 1000, + batchSize = batchSize, requestOptions = requestOptions, ) } @@ -397,6 +399,7 @@ public suspend fun SearchClient.saveObjects( * @param indexName The index in which to perform the request. * @param objectIDs The list of objectIDs to delete from the index. * @param waitForTask If true, wait for the task to complete. + * @param batchSize The size of the batch. Default is 1000. * @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. * @return The list of responses from the batch requests. * @@ -405,6 +408,7 @@ public suspend fun SearchClient.deleteObjects( indexName: String, objectIDs: List, waitForTask: Boolean = false, + batchSize: Int = 1000, requestOptions: RequestOptions? = null, ): List { return this.chunkedBatch( @@ -412,7 +416,7 @@ public suspend fun SearchClient.deleteObjects( objects = objectIDs.map { id -> JsonObject(mapOf("objectID" to Json.encodeToJsonElement(id))) }, action = Action.DeleteObject, waitForTask = waitForTask, - batchSize = 1000, + batchSize = batchSize, requestOptions = requestOptions, ) } @@ -424,6 +428,7 @@ public suspend fun SearchClient.deleteObjects( * @param objects The list of objects to update in the index. * @param createIfNotExists To be provided if non-existing objects are passed, otherwise, the call will fail.. * @param waitForTask If true, wait for the task to complete. + * @param batchSize The size of the batch. Default is 1000. * @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. * @return The list of responses from the batch requests. * @@ -433,6 +438,7 @@ public suspend fun SearchClient.partialUpdateObjects( objects: List, createIfNotExists: Boolean, waitForTask: Boolean = false, + batchSize: Int = 1000, requestOptions: RequestOptions? = null, ): List { return this.chunkedBatch( @@ -440,7 +446,7 @@ public suspend fun SearchClient.partialUpdateObjects( objects = objects, action = if (createIfNotExists) Action.PartialUpdateObject else Action.PartialUpdateObjectNoCreate, waitForTask = waitForTask, - batchSize = 1000, + batchSize = batchSize, requestOptions = requestOptions, ) } diff --git a/clients/algoliasearch-client-scala/src/main/scala/algoliasearch/extension/package.scala b/clients/algoliasearch-client-scala/src/main/scala/algoliasearch/extension/package.scala index f8c6e04a42..e72cb1621e 100644 --- a/clients/algoliasearch-client-scala/src/main/scala/algoliasearch/extension/package.scala +++ b/clients/algoliasearch-client-scala/src/main/scala/algoliasearch/extension/package.scala @@ -254,6 +254,8 @@ package object extension { * The list of objects to save. * @param waitForTasks * Whether to wait for the tasks to complete. + * @param batchSize + * The size of the batch. Default is 1000. * @param requestOptions * Additional request configuration. * @return @@ -263,9 +265,10 @@ package object extension { indexName: String, objects: Seq[Any], waitForTasks: Boolean = false, + batchSize: Int = 1000, requestOptions: Option[RequestOptions] = None )(implicit ec: ExecutionContext): Future[Seq[BatchResponse]] = { - chunkedBatch(indexName, objects, Action.AddObject, waitForTasks, 1000, requestOptions) + chunkedBatch(indexName, objects, Action.AddObject, waitForTasks, batchSize, requestOptions) } /** Helper: Deletes every objects for the given objectIDs. The `chunkedBatch` helper is used under the hood, which @@ -277,6 +280,8 @@ package object extension { * The list of objectIDs to delete. * @param waitForTasks * Whether to wait for the tasks to complete. + * @param batchSize + * The size of the batch. Default is 1000. * @param requestOptions * Additional request configuration. * @return @@ -286,6 +291,7 @@ package object extension { indexName: String, objectIDs: Seq[String], waitForTasks: Boolean = false, + batchSize: Int = 1000, requestOptions: Option[RequestOptions] = None )(implicit ec: ExecutionContext): Future[Seq[BatchResponse]] = { chunkedBatch( @@ -293,7 +299,7 @@ package object extension { objectIDs.map(id => new { val objectID: String = id }), Action.DeleteObject, waitForTasks, - 1000, + batchSize, requestOptions ) } @@ -309,6 +315,8 @@ package object extension { * To be provided if non-existing objects are passed, otherwise, the call will fail. * @param waitForTasks * Whether to wait for the tasks to complete. + * @param batchSize + * The size of the batch. Default is 1000. * @param requestOptions * Additional request configuration. * @return @@ -319,6 +327,7 @@ package object extension { objects: Seq[Any], createIfNotExists: Boolean = false, waitForTasks: Boolean = false, + batchSize: Int = 1000, requestOptions: Option[RequestOptions] = None )(implicit ec: ExecutionContext): Future[Seq[BatchResponse]] = { chunkedBatch( @@ -326,7 +335,7 @@ package object extension { objects, if (createIfNotExists) Action.PartialUpdateObject else Action.PartialUpdateObjectNoCreate, waitForTasks, - 1000, + batchSize, requestOptions ) } diff --git a/clients/algoliasearch-client-swift/Sources/Search/Extra/SearchClientExtension.swift b/clients/algoliasearch-client-swift/Sources/Search/Extra/SearchClientExtension.swift index bb2fe4b21b..ee7abd7a06 100644 --- a/clients/algoliasearch-client-swift/Sources/Search/Extra/SearchClientExtension.swift +++ b/clients/algoliasearch-client-swift/Sources/Search/Extra/SearchClientExtension.swift @@ -468,12 +468,14 @@ public extension SearchClient { /// - parameter indexName: The name of the index where to save the objects /// - parameter objects: The new objects /// - parameter waitForTasks: If we should wait for the batch task to be finished before processing the next one + /// - parameter batchSize: The maximum number of objects to include in a batch /// - parameter requestOptions: The request options /// - returns: [BatchResponse] func saveObjects( indexName: String, objects: [some Encodable], waitForTasks: Bool = false, + batchSize: Int = 1000, requestOptions: RequestOptions? = nil ) async throws -> [BatchResponse] { try await self.chunkedBatch( @@ -481,7 +483,7 @@ public extension SearchClient { objects: objects, action: .addObject, waitForTasks: waitForTasks, - batchSize: 1000, + batchSize: batchSize, requestOptions: requestOptions ) } @@ -491,12 +493,14 @@ public extension SearchClient { /// - parameter indexName: The name of the index to delete objectIDs from /// - parameter objectIDs: The objectIDs to delete /// - parameter waitForTasks: If we should wait for the batch task to be finished before processing the next one + /// - parameter batchSize: The maximum number of objects to include in a batch /// - parameter requestOptions: The request options /// - returns: [BatchResponse] func deleteObjects( indexName: String, objectIDs: [String], waitForTasks: Bool = false, + batchSize: Int = 1000, requestOptions: RequestOptions? = nil ) async throws -> [BatchResponse] { try await self.chunkedBatch( @@ -504,7 +508,7 @@ public extension SearchClient { objects: objectIDs.map { AnyCodable(["objectID": $0]) }, action: .deleteObject, waitForTasks: waitForTasks, - batchSize: 1000, + batchSize: batchSize, requestOptions: requestOptions ) } @@ -516,6 +520,7 @@ public extension SearchClient { /// - parameter createIfNotExists: To be provided if non-existing objects are passed, otherwise, the call will /// fail.. /// - parameter waitForTasks: If we should wait for the batch task to be finished before processing the next one + /// - parameter batchSize: The maximum number of objects to include in a batch /// - parameter requestOptions: The request options /// - returns: [BatchResponse] func partialUpdateObjects( @@ -523,6 +528,7 @@ public extension SearchClient { objects: [some Encodable], createIfNotExists: Bool = false, waitForTasks: Bool = false, + batchSize: Int = 1000, requestOptions: RequestOptions? = nil ) async throws -> [BatchResponse] { try await self.chunkedBatch( @@ -530,7 +536,7 @@ public extension SearchClient { objects: objects, action: createIfNotExists ? .partialUpdateObject : .partialUpdateObjectNoCreate, waitForTasks: waitForTasks, - batchSize: 1000, + batchSize: batchSize, requestOptions: requestOptions ) } diff --git a/specs/search/helpers/deleteObjects.yml b/specs/search/helpers/deleteObjects.yml index 2d1a2d032d..bbdb376b66 100644 --- a/specs/search/helpers/deleteObjects.yml +++ b/specs/search/helpers/deleteObjects.yml @@ -28,6 +28,12 @@ method: required: false schema: type: boolean + - in: query + name: batchSize + description: The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000. + required: false + schema: + type: integer - in: query name: requestOptions description: The request options to pass to the `batch` method. diff --git a/specs/search/helpers/partialUpdateObjects.yml b/specs/search/helpers/partialUpdateObjects.yml index 2dcae02b8f..0b80f88df1 100644 --- a/specs/search/helpers/partialUpdateObjects.yml +++ b/specs/search/helpers/partialUpdateObjects.yml @@ -34,6 +34,12 @@ method: required: false schema: type: boolean + - in: query + name: batchSize + description: The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000. + required: false + schema: + type: integer - in: query name: requestOptions description: The request options to pass to the `batch` method. diff --git a/specs/search/helpers/saveObjects.yml b/specs/search/helpers/saveObjects.yml index bf7cee3fe8..71b412c7aa 100644 --- a/specs/search/helpers/saveObjects.yml +++ b/specs/search/helpers/saveObjects.yml @@ -28,6 +28,12 @@ method: required: false schema: type: boolean + - in: query + name: batchSize + description: The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000. + required: false + schema: + type: integer - in: query name: requestOptions description: The request options to pass to the `batch` method. diff --git a/templates/java/api_helpers.mustache b/templates/java/api_helpers.mustache index 439b192422..ac7848b370 100644 --- a/templates/java/api_helpers.mustache +++ b/templates/java/api_helpers.mustache @@ -708,7 +708,25 @@ public List saveObjects(String indexName, Iterable objects * the transporter requestOptions. (optional) */ public List saveObjects(String indexName, Iterable objects, boolean waitForTasks, RequestOptions requestOptions) { - return chunkedBatch(indexName, objects, Action.ADD_OBJECT, waitForTasks, 1000, requestOptions); + return saveObjects(indexName, objects, false, 1000, requestOptions); +} + +/** + * Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used + * under the hood, which creates a `batch` requests with at most 1000 objects in it. + * + * @param indexName The `indexName` to replace `objects` in. + * @param objects The array of `objects` to store in the given Algolia `indexName`. + * @param waitForTasks - Whether or not we should wait until every `batch` tasks has been + * processed, this operation may slow the total execution time of this method but is more + * reliable. + * @param batchSize The size of the chunk of `objects`. The number of `batch` calls will be equal + * to `length(objects) / batchSize`. + * @param requestOptions The requestOptions to send along with the query, they will be merged with + * the transporter requestOptions. (optional) + */ +public List saveObjects(String indexName, Iterable objects, boolean waitForTasks, int batchSize, RequestOptions requestOptions) { + return chunkedBatch(indexName, objects, Action.ADD_OBJECT, waitForTasks, batchSize, requestOptions); } /** @@ -747,7 +765,25 @@ public List deleteObjects(String indexName, List objectID * @param requestOptions The requestOptions to send along with the query, they will be merged with * the transporter requestOptions. (optional) */ -public List deleteObjects(String indexName, List objectIDs, boolean waitForTasks, RequestOptions requestOptions) { +public List deleteObjects(String indexName, List objectIDs, boolean waitForTasks,RequestOptions requestOptions) { + return deleteObjects(indexName, objectIDs, false, 1000, null); +} + +/** + * Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under + * the hood, which creates a `batch` requests with at most 1000 objectIDs in it. + * + * @param indexName The `indexName` to delete `objectIDs` from. + * @param objectIDs The array of `objectIDs` to delete from the `indexName`. + * @param waitForTasks - Whether or not we should wait until every `batch` tasks has been + * processed, this operation may slow the total execution time of this method but is more + * reliable. + * @param batchSize The size of the chunk of `objects`. The number of `batch` calls will be equal + * to `length(objects) / batchSize`. + * @param requestOptions The requestOptions to send along with the query, they will be merged with + * the transporter requestOptions. (optional) + */ +public List deleteObjects(String indexName, List objectIDs, boolean waitForTasks, int batchSize, RequestOptions requestOptions) { List> objects = new ArrayList<>(); for (String id : objectIDs) { @@ -756,7 +792,7 @@ public List deleteObjects(String indexName, List objectID objects.add(obj); } - return chunkedBatch(indexName, objects, Action.DELETE_OBJECT, waitForTasks, 1000, requestOptions); + return chunkedBatch(indexName, objects, Action.DELETE_OBJECT, waitForTasks, batchSize, requestOptions); } /** @@ -816,13 +852,41 @@ public List partialUpdateObjects( boolean createIfNotExists, boolean waitForTasks, RequestOptions requestOptions +) { + return partialUpdateObjects(indexName, objects, createIfNotExists, waitForTasks, 1000, null); +} + +/** + * Helper: Replaces object content of all the given objects according to their respective + * `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` + * requests with at most 1000 objects in it. + * + * @param indexName The `indexName` to update `objects` in. + * @param objects The array of `objects` to update in the given Algolia `indexName`. + * @param createIfNotExists To be provided if non-existing objects are passed, otherwise, the call + * will fail. + * @param waitForTasks - Whether or not we should wait until every `batch` tasks has been + * processed, this operation may slow the total execution time of this method but is more + * reliable. + * @param batchSize The size of the chunk of `objects`. The number of `batch` calls will be equal + * to `length(objects) / batchSize`. + * @param requestOptions The requestOptions to send along with the query, they will be merged with + * the transporter requestOptions. (optional) + */ +public List partialUpdateObjects( + String indexName, + Iterable objects, + boolean createIfNotExists, + boolean waitForTasks, + int batchSize, + RequestOptions requestOptions ) { return chunkedBatch( indexName, objects, createIfNotExists ? Action.PARTIAL_UPDATE_OBJECT : Action.PARTIAL_UPDATE_OBJECT_NO_CREATE, waitForTasks, - 1000, + batchSize, requestOptions ); } diff --git a/templates/javascript/clients/client/api/helpers.mustache b/templates/javascript/clients/client/api/helpers.mustache index 3feea7cf5e..436e903a3f 100644 --- a/templates/javascript/clients/client/api/helpers.mustache +++ b/templates/javascript/clients/client/api/helpers.mustache @@ -318,15 +318,16 @@ async chunkedBatch({ indexName, objects, action = 'addObject', waitForTasks, bat * @param saveObjects - The `saveObjects` object. * @param saveObjects.indexName - The `indexName` to save `objects` in. * @param saveObjects.objects - The array of `objects` to store in the given Algolia `indexName`. + * @param chunkedBatch.batchSize - The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000. * @param saveObjects.waitForTasks - Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable. * @param requestOptions - The requestOptions to send along with the query, they will be forwarded to the `batch` method and merged with the transporter requestOptions. */ async saveObjects( - { indexName, objects, waitForTasks }: SaveObjectsOptions, + { indexName, objects, waitForTasks, batchSize }: SaveObjectsOptions, requestOptions?: RequestOptions ): Promise { return await this.chunkedBatch( - { indexName, objects, action: 'addObject', waitForTasks }, + { indexName, objects, action: 'addObject', waitForTasks, batchSize }, requestOptions ); }, @@ -338,11 +339,12 @@ async saveObjects( * @param deleteObjects - The `deleteObjects` object. * @param deleteObjects.indexName - The `indexName` to delete `objectIDs` from. * @param deleteObjects.objectIDs - The objectIDs to delete. + * @param chunkedBatch.batchSize - The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000. * @param deleteObjects.waitForTasks - Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable. * @param requestOptions - The requestOptions to send along with the query, they will be forwarded to the `batch` method and merged with the transporter requestOptions. */ async deleteObjects( - { indexName, objectIDs, waitForTasks }: DeleteObjectsOptions, + { indexName, objectIDs, waitForTasks, batchSize }: DeleteObjectsOptions, requestOptions?: RequestOptions ): Promise { return await this.chunkedBatch( @@ -351,6 +353,7 @@ async deleteObjects( objects: objectIDs.map((objectID) => ({ objectID })), action: 'deleteObject', waitForTasks, + batchSize, }, requestOptions ); @@ -364,11 +367,12 @@ async deleteObjects( * @param partialUpdateObjects.indexName - The `indexName` to update `objects` in. * @param partialUpdateObjects.objects - The array of `objects` to update in the given Algolia `indexName`. * @param partialUpdateObjects.createIfNotExists - To be provided if non-existing objects are passed, otherwise, the call will fail.. + * @param chunkedBatch.batchSize - The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000. * @param partialUpdateObjects.waitForTasks - Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable. * @param requestOptions - The requestOptions to send along with the query, they will be forwarded to the `getTask` method and merged with the transporter requestOptions. */ async partialUpdateObjects( - { indexName, objects, createIfNotExists, waitForTasks }: PartialUpdateObjectsOptions, + { indexName, objects, createIfNotExists, waitForTasks, batchSize }: PartialUpdateObjectsOptions, requestOptions?: RequestOptions ): Promise { return await this.chunkedBatch( @@ -378,6 +382,7 @@ async partialUpdateObjects( action: createIfNotExists ? 'partialUpdateObject' : 'partialUpdateObjectNoCreate', + batchSize, waitForTasks }, requestOptions diff --git a/templates/javascript/clients/client/model/clientMethodProps.mustache b/templates/javascript/clients/client/model/clientMethodProps.mustache index c8e38583e7..ea2725500b 100644 --- a/templates/javascript/clients/client/model/clientMethodProps.mustache +++ b/templates/javascript/clients/client/model/clientMethodProps.mustache @@ -132,7 +132,7 @@ export type SearchClientNodeHelpers = { } {{/isSearchClient}} -export type DeleteObjectsOptions = Pick & { +export type DeleteObjectsOptions = Pick & { /** * The objectIDs to delete. */ @@ -141,7 +141,7 @@ export type DeleteObjectsOptions = Pick & { /** *To be provided if non-existing objects are passed, otherwise, the call will fail. @@ -151,7 +151,7 @@ export type PartialUpdateObjectsOptions = Pick< export type SaveObjectsOptions = Pick< ChunkedBatchOptions, - 'indexName' | 'objects' | 'waitForTasks' + 'indexName' | 'objects' | 'waitForTasks' | 'batchSize' >; export type ChunkedBatchOptions = ReplaceAllObjectsOptions & { diff --git a/templates/php/api.mustache b/templates/php/api.mustache index 12274200d4..29c703dce2 100644 --- a/templates/php/api.mustache +++ b/templates/php/api.mustache @@ -487,11 +487,12 @@ use Algolia\AlgoliaSearch\Exceptions\NotFoundException; * * @param string $indexName The `indexName` to replace `objects` in. * @param array $objects The array of `objects` to store in the given Algolia `indexName`. + * @param array $batchSize The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000. * @param array $requestOptions Request options * @param bool $waitForTasks Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable */ - public function saveObjects($indexName, $objects, $requestOptions = [], $waitForTasks = false) { - return $this->chunkedBatch($indexName, $objects, 'addObject', $waitForTasks, 1000, $requestOptions); + public function saveObjects($indexName, $objects, $batchSize = 1000, $requestOptions = [], $waitForTasks = false) { + return $this->chunkedBatch($indexName, $objects, 'addObject', $waitForTasks, $batchSize, $requestOptions); } /** @@ -499,10 +500,11 @@ use Algolia\AlgoliaSearch\Exceptions\NotFoundException; * * @param string $indexName The `indexName` to delete `objectIDs` from. * @param array $objectIDs The `objectIDs` to delete. + * @param array $batchSize The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000. * @param array $requestOptions Request options * @param bool $waitForTasks Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable */ - public function deleteObjects($indexName, $objectIDs, $requestOptions = [], $waitForTasks = false) + public function deleteObjects($indexName, $objectIDs, $batchSize = 1000, $requestOptions = [], $waitForTasks = false) { $objects = []; @@ -510,7 +512,7 @@ use Algolia\AlgoliaSearch\Exceptions\NotFoundException; $objects[] = ['objectID' => $id]; } - return $this->chunkedBatch($indexName, $objects, 'deleteObject', $waitForTasks, 1000, $requestOptions); + return $this->chunkedBatch($indexName, $objects, 'deleteObject', $waitForTasks, $batchSize, $requestOptions); } /** @@ -519,11 +521,12 @@ use Algolia\AlgoliaSearch\Exceptions\NotFoundException; * @param string $indexName The `indexName` to replace `objects` in. * @param array $objects The array of `objects` to store in the given Algolia `indexName`. * @param bool $createIfNotExists To be provided if non-existing objects are passed, otherwise, the call will fail.. + * @param array $batchSize The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000. * @param array $requestOptions Request options * @param bool $waitForTasks Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable */ - public function partialUpdateObjects($indexName, $objects, $createIfNotExists, $requestOptions = [], $waitForTasks = false) { - return $this->chunkedBatch($indexName, $objects, ($createIfNotExists == TRUE) ? 'partialUpdateObject' : 'partialUpdateObjectNoCreate', $waitForTasks, 1000, $requestOptions); + public function partialUpdateObjects($indexName, $objects, $createIfNotExists, $batchSize = 1000, $requestOptions = [], $waitForTasks = false) { + return $this->chunkedBatch($indexName, $objects, ($createIfNotExists == TRUE) ? 'partialUpdateObject' : 'partialUpdateObjectNoCreate', $waitForTasks, $batchSize, $requestOptions); } /** diff --git a/templates/python/search_helpers.mustache b/templates/python/search_helpers.mustache index c1cc8dad48..ea85983c42 100644 --- a/templates/python/search_helpers.mustache +++ b/templates/python/search_helpers.mustache @@ -274,24 +274,26 @@ index_name: str, objects: List[Dict[str, Any]], wait_for_tasks: bool = False, + batch_size: int = 1000, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> List[BatchResponse]: """ Helper: Saves the given array of objects in the given index. The `chunked_batch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. """ - return {{^isSyncClient}}await {{/isSyncClient}}self.chunked_batch(index_name=index_name, objects=objects, action=Action.ADDOBJECT, wait_for_tasks=wait_for_tasks, request_options=request_options) + return {{^isSyncClient}}await {{/isSyncClient}}self.chunked_batch(index_name=index_name, objects=objects, action=Action.ADDOBJECT, wait_for_tasks=wait_for_tasks, batch_size=batch_size, request_options=request_options) {{^isSyncClient}}async {{/isSyncClient}}def delete_objects( self, index_name: str, object_ids: List[str], wait_for_tasks: bool = False, + batch_size: int = 1000, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> List[BatchResponse]: """ Helper: Deletes every records for the given objectIDs. The `chunked_batch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it. """ - return {{^isSyncClient}}await {{/isSyncClient}}self.chunked_batch(index_name=index_name, objects=[{"objectID": id} for id in object_ids], action=Action.DELETEOBJECT, wait_for_tasks=wait_for_tasks, request_options=request_options) + return {{^isSyncClient}}await {{/isSyncClient}}self.chunked_batch(index_name=index_name, objects=[{"objectID": id} for id in object_ids], action=Action.DELETEOBJECT, wait_for_tasks=wait_for_tasks, batch_size=batch_size, request_options=request_options) {{^isSyncClient}}async {{/isSyncClient}}def partial_update_objects( self, @@ -299,12 +301,13 @@ objects: List[Dict[str, Any]], create_if_not_exists: bool = False, wait_for_tasks: bool = False, + batch_size: int = 1000, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> List[BatchResponse]: """ Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunked_batch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. """ - return {{^isSyncClient}}await {{/isSyncClient}}self.chunked_batch(index_name=index_name, objects=objects, action=Action.PARTIALUPDATEOBJECT if create_if_not_exists else Action.PARTIALUPDATEOBJECTNOCREATE, wait_for_tasks=wait_for_tasks, request_options=request_options) + return {{^isSyncClient}}await {{/isSyncClient}}self.chunked_batch(index_name=index_name, objects=objects, action=Action.PARTIALUPDATEOBJECT if create_if_not_exists else Action.PARTIALUPDATEOBJECTNOCREATE, wait_for_tasks=wait_for_tasks, batch_size=batch_size, request_options=request_options) {{^isSyncClient}}async {{/isSyncClient}}def chunked_batch( self, diff --git a/templates/ruby/search_helpers.mustache b/templates/ruby/search_helpers.mustache index d602cf3855..d46643f4c7 100644 --- a/templates/ruby/search_helpers.mustache +++ b/templates/ruby/search_helpers.mustache @@ -212,17 +212,18 @@ end # @param index_name [String]: The `index_name` to save `objects` in. # @param objects [Array]: The array of `objects` to store in the given Algolia `indexName`. # @param wait_for_tasks [Boolean]: Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable. +# @param batch_size [int] The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000. # @param request_options: The request options to send along with the query, they will be merged with the transporter base parameters (headers, query params, timeouts, etc.). (optional) # # @return [BatchResponse] # -def save_objects(index_name, objects, wait_for_tasks = false, request_options = {}) +def save_objects(index_name, objects, wait_for_tasks = false, batch_size = 1000, request_options = {}) chunked_batch( index_name, objects, Search::Action::ADD_OBJECT, wait_for_tasks, - 1000, + batch_size, request_options ) end @@ -232,17 +233,18 @@ end # @param index_name [String]: The `index_name` to delete `object_ids` from. # @param object_ids [Array]: The object_ids to delete. # @param wait_for_tasks [Boolean]: Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable. +# @param batch_size [int] The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000. # @param request_options: The request options to send along with the query, they will be merged with the transporter base parameters (headers, query params, timeouts, etc.). (optional) # # @return [BatchResponse] # -def delete_objects(index_name, object_ids, wait_for_tasks = false, request_options = {}) +def delete_objects(index_name, object_ids, wait_for_tasks = false, batch_size = 1000, request_options = {}) chunked_batch( index_name, object_ids.map { |id| { "objectID" => id } }, Search::Action::DELETE_OBJECT, wait_for_tasks, - 1000, + batch_size, request_options ) end @@ -253,17 +255,18 @@ end # @param objects [Array]: The objects to partially update. # @param create_if_not_exists [Boolean]: To be provided if non-existing objects are passed, otherwise, the call will fail. # @param wait_for_tasks [Boolean] Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable. +# @param batch_size [int] The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000. # @param request_options: The request options to send along with the query, they will be merged with the transporter base parameters (headers, query params, timeouts, etc.). (optional) # # @return [BatchResponse] # -def partial_update_objects(index_name, objects, create_if_not_exists, wait_for_tasks = false, request_options = {}) +def partial_update_objects(index_name, objects, create_if_not_exists, wait_for_tasks = false, batch_size = 1000, request_options = {}) chunked_batch( index_name, objects, create_if_not_exists ? Search::Action::PARTIAL_UPDATE_OBJECT : Search::Action::PARTIAL_UPDATE_OBJECT_NO_CREATE, wait_for_tasks, - 1000, + batch_size, request_options ) end