diff --git a/clients/algoliasearch-client-csharp/algoliasearch/Utils/SearchClientExtensions.cs b/clients/algoliasearch-client-csharp/algoliasearch/Utils/SearchClientExtensions.cs
index 04854c3db9..038c7d60a5 100644
--- a/clients/algoliasearch-client-csharp/algoliasearch/Utils/SearchClientExtensions.cs
+++ b/clients/algoliasearch-client-csharp/algoliasearch/Utils/SearchClientExtensions.cs
@@ -167,12 +167,13 @@ public partial interface ISearchClient
/// The index in which to perform the request.
/// The list of `objects` to store in the given Algolia `indexName`.
/// Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable..
+ /// The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000.
/// Add extra http header or query parameters to Algolia.
/// Cancellation Token to cancel the request.
///
- Task> SaveObjectsAsync(string indexName, IEnumerable objects, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class;
+ Task> SaveObjectsAsync(string indexName, IEnumerable objects, bool waitForTasks = false, int batchSize = 1000, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class;
///
- List SaveObjects(string indexName, IEnumerable objects, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class;
+ List SaveObjects(string indexName, IEnumerable objects, bool waitForTasks = false, int batchSize = 1000, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class;
///
/// Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it.
@@ -180,11 +181,12 @@ public partial interface ISearchClient
/// The index in which to perform the request.
/// The list of `objectIDs` to remove from the given Algolia `indexName`.
/// Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable..
+ /// The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000.
/// Add extra http header or query parameters to Algolia.
/// Cancellation Token to cancel the request.
- Task> DeleteObjectsAsync(string indexName, IEnumerable objectIDs, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default);
+ Task> DeleteObjectsAsync(string indexName, IEnumerable objectIDs, bool waitForTasks = false, int batchSize = 1000, RequestOptions options = null, CancellationToken cancellationToken = default);
///
- List DeleteObjects(string indexName, IEnumerable objectIDs, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default);
+ List DeleteObjects(string indexName, IEnumerable objectIDs, bool waitForTasks = false, int batchSize = 1000, RequestOptions options = null, CancellationToken cancellationToken = default);
///
/// Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it.
@@ -193,11 +195,12 @@ public partial interface ISearchClient
/// The list of `objects` to update in the given Algolia `indexName`.
/// To be provided if non-existing objects are passed, otherwise, the call will fail.
/// Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable..
+ /// The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000.
/// Add extra http header or query parameters to Algolia.
/// Cancellation Token to cancel the request.
- Task> PartialUpdateObjectsAsync(string indexName, IEnumerable objects, bool createIfNotExists, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class;
+ Task> PartialUpdateObjectsAsync(string indexName, IEnumerable objects, bool createIfNotExists, bool waitForTasks = false, int batchSize = 1000, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class;
///
- List PartialUpdateObjects(string indexName, IEnumerable objects, bool createIfNotExists, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class;
+ List PartialUpdateObjects(string indexName, IEnumerable objects, bool createIfNotExists, bool waitForTasks = false, int batchSize = 1000, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class;
///
/// Helper: Check if an index exists.
@@ -568,43 +571,45 @@ public List ChunkedBatch(string indexName, IEnumerable obje
///
public async Task> SaveObjectsAsync(string indexName, IEnumerable objects,
bool waitForTasks = false,
+ int batchSize = 1000,
RequestOptions options = null,
CancellationToken cancellationToken = default) where T : class
{
- return await ChunkedBatchAsync(indexName, objects, Action.AddObject, waitForTasks, 1000, options, cancellationToken).ConfigureAwait(false);
+ return await ChunkedBatchAsync(indexName, objects, Action.AddObject, waitForTasks, batchSize, options, cancellationToken).ConfigureAwait(false);
}
///
- public List SaveObjects(string indexName, IEnumerable objects, bool waitForTasks = false, RequestOptions options = null,
+ public List SaveObjects(string indexName, IEnumerable objects, bool waitForTasks = false, int batchSize = 1000, RequestOptions options = null,
CancellationToken cancellationToken = default) where T : class =>
- AsyncHelper.RunSync(() => SaveObjectsAsync(indexName, objects, waitForTasks, options, cancellationToken));
+ AsyncHelper.RunSync(() => SaveObjectsAsync(indexName, objects, waitForTasks, batchSize, options, cancellationToken));
///
public async Task> DeleteObjectsAsync(string indexName, IEnumerable objectIDs,
bool waitForTasks = false,
+ int batchSize = 1000,
RequestOptions options = null,
CancellationToken cancellationToken = default)
{
- return await ChunkedBatchAsync(indexName, objectIDs.Select(id => new { objectID = id }), Action.DeleteObject, waitForTasks, 1000, options, cancellationToken).ConfigureAwait(false);
+ return await ChunkedBatchAsync(indexName, objectIDs.Select(id => new { objectID = id }), Action.DeleteObject, waitForTasks, batchSize, options, cancellationToken).ConfigureAwait(false);
}
///
- public List DeleteObjects(string indexName, IEnumerable objectIDs, bool waitForTasks = false, RequestOptions options = null,
+ public List DeleteObjects(string indexName, IEnumerable objectIDs, bool waitForTasks = false, int batchSize = 1000, RequestOptions options = null,
CancellationToken cancellationToken = default) =>
- AsyncHelper.RunSync(() => DeleteObjectsAsync(indexName, objectIDs, waitForTasks, options, cancellationToken));
+ AsyncHelper.RunSync(() => DeleteObjectsAsync(indexName, objectIDs, waitForTasks, batchSize, options, cancellationToken));
///
- public async Task> PartialUpdateObjectsAsync(string indexName, IEnumerable objects, bool createIfNotExists, bool waitForTasks = false,
+ public async Task> PartialUpdateObjectsAsync(string indexName, IEnumerable objects, bool createIfNotExists, bool waitForTasks = false, int batchSize = 1000,
RequestOptions options = null,
CancellationToken cancellationToken = default) where T : class
{
- return await ChunkedBatchAsync(indexName, objects, createIfNotExists ? Action.PartialUpdateObject : Action.PartialUpdateObjectNoCreate, waitForTasks, 1000, options, cancellationToken).ConfigureAwait(false);
+ return await ChunkedBatchAsync(indexName, objects, createIfNotExists ? Action.PartialUpdateObject : Action.PartialUpdateObjectNoCreate, waitForTasks, batchSize, options, cancellationToken).ConfigureAwait(false);
}
///
- public List PartialUpdateObjects(string indexName, IEnumerable objects, bool createIfNotExists, bool waitForTasks = false,
+ public List PartialUpdateObjects(string indexName, IEnumerable objects, bool createIfNotExists, bool waitForTasks = false, int batchSize = 1000,
RequestOptions options = null, CancellationToken cancellationToken = default) where T : class =>
- AsyncHelper.RunSync(() => PartialUpdateObjectsAsync(indexName, objects, createIfNotExists, waitForTasks, options, cancellationToken));
+ AsyncHelper.RunSync(() => PartialUpdateObjectsAsync(indexName, objects, createIfNotExists, waitForTasks, batchSize, options, cancellationToken));
private static async Task> CreateIterable(Func> executeQuery,
Func stopCondition)
diff --git a/clients/algoliasearch-client-kotlin/client/src/commonMain/kotlin/com/algolia/client/extensions/SearchClient.kt b/clients/algoliasearch-client-kotlin/client/src/commonMain/kotlin/com/algolia/client/extensions/SearchClient.kt
index d97875a5be..03fd1ee3ef 100644
--- a/clients/algoliasearch-client-kotlin/client/src/commonMain/kotlin/com/algolia/client/extensions/SearchClient.kt
+++ b/clients/algoliasearch-client-kotlin/client/src/commonMain/kotlin/com/algolia/client/extensions/SearchClient.kt
@@ -371,6 +371,7 @@ public suspend fun SearchClient.chunkedBatch(
* @param indexName The index in which to perform the request.
* @param objects The list of objects to index.
* @param waitForTask If true, wait for the task to complete.
+ * @param batchSize The size of the batch. Default is 1000.
* @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions.
* @return The list of responses from the batch requests.
*
@@ -379,6 +380,7 @@ public suspend fun SearchClient.saveObjects(
indexName: String,
objects: List,
waitForTask: Boolean = false,
+ batchSize: Int = 1000,
requestOptions: RequestOptions? = null,
): List {
return this.chunkedBatch(
@@ -386,7 +388,7 @@ public suspend fun SearchClient.saveObjects(
objects = objects,
action = Action.AddObject,
waitForTask = waitForTask,
- batchSize = 1000,
+ batchSize = batchSize,
requestOptions = requestOptions,
)
}
@@ -397,6 +399,7 @@ public suspend fun SearchClient.saveObjects(
* @param indexName The index in which to perform the request.
* @param objectIDs The list of objectIDs to delete from the index.
* @param waitForTask If true, wait for the task to complete.
+ * @param batchSize The size of the batch. Default is 1000.
* @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions.
* @return The list of responses from the batch requests.
*
@@ -405,6 +408,7 @@ public suspend fun SearchClient.deleteObjects(
indexName: String,
objectIDs: List,
waitForTask: Boolean = false,
+ batchSize: Int = 1000,
requestOptions: RequestOptions? = null,
): List {
return this.chunkedBatch(
@@ -412,7 +416,7 @@ public suspend fun SearchClient.deleteObjects(
objects = objectIDs.map { id -> JsonObject(mapOf("objectID" to Json.encodeToJsonElement(id))) },
action = Action.DeleteObject,
waitForTask = waitForTask,
- batchSize = 1000,
+ batchSize = batchSize,
requestOptions = requestOptions,
)
}
@@ -424,6 +428,7 @@ public suspend fun SearchClient.deleteObjects(
* @param objects The list of objects to update in the index.
* @param createIfNotExists To be provided if non-existing objects are passed, otherwise, the call will fail..
* @param waitForTask If true, wait for the task to complete.
+ * @param batchSize The size of the batch. Default is 1000.
* @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions.
* @return The list of responses from the batch requests.
*
@@ -433,6 +438,7 @@ public suspend fun SearchClient.partialUpdateObjects(
objects: List,
createIfNotExists: Boolean,
waitForTask: Boolean = false,
+ batchSize: Int = 1000,
requestOptions: RequestOptions? = null,
): List {
return this.chunkedBatch(
@@ -440,7 +446,7 @@ public suspend fun SearchClient.partialUpdateObjects(
objects = objects,
action = if (createIfNotExists) Action.PartialUpdateObject else Action.PartialUpdateObjectNoCreate,
waitForTask = waitForTask,
- batchSize = 1000,
+ batchSize = batchSize,
requestOptions = requestOptions,
)
}
diff --git a/clients/algoliasearch-client-scala/src/main/scala/algoliasearch/extension/package.scala b/clients/algoliasearch-client-scala/src/main/scala/algoliasearch/extension/package.scala
index f8c6e04a42..e72cb1621e 100644
--- a/clients/algoliasearch-client-scala/src/main/scala/algoliasearch/extension/package.scala
+++ b/clients/algoliasearch-client-scala/src/main/scala/algoliasearch/extension/package.scala
@@ -254,6 +254,8 @@ package object extension {
* The list of objects to save.
* @param waitForTasks
* Whether to wait for the tasks to complete.
+ * @param batchSize
+ * The size of the batch. Default is 1000.
* @param requestOptions
* Additional request configuration.
* @return
@@ -263,9 +265,10 @@ package object extension {
indexName: String,
objects: Seq[Any],
waitForTasks: Boolean = false,
+ batchSize: Int = 1000,
requestOptions: Option[RequestOptions] = None
)(implicit ec: ExecutionContext): Future[Seq[BatchResponse]] = {
- chunkedBatch(indexName, objects, Action.AddObject, waitForTasks, 1000, requestOptions)
+ chunkedBatch(indexName, objects, Action.AddObject, waitForTasks, batchSize, requestOptions)
}
/** Helper: Deletes every objects for the given objectIDs. The `chunkedBatch` helper is used under the hood, which
@@ -277,6 +280,8 @@ package object extension {
* The list of objectIDs to delete.
* @param waitForTasks
* Whether to wait for the tasks to complete.
+ * @param batchSize
+ * The size of the batch. Default is 1000.
* @param requestOptions
* Additional request configuration.
* @return
@@ -286,6 +291,7 @@ package object extension {
indexName: String,
objectIDs: Seq[String],
waitForTasks: Boolean = false,
+ batchSize: Int = 1000,
requestOptions: Option[RequestOptions] = None
)(implicit ec: ExecutionContext): Future[Seq[BatchResponse]] = {
chunkedBatch(
@@ -293,7 +299,7 @@ package object extension {
objectIDs.map(id => new { val objectID: String = id }),
Action.DeleteObject,
waitForTasks,
- 1000,
+ batchSize,
requestOptions
)
}
@@ -309,6 +315,8 @@ package object extension {
* To be provided if non-existing objects are passed, otherwise, the call will fail.
* @param waitForTasks
* Whether to wait for the tasks to complete.
+ * @param batchSize
+ * The size of the batch. Default is 1000.
* @param requestOptions
* Additional request configuration.
* @return
@@ -319,6 +327,7 @@ package object extension {
objects: Seq[Any],
createIfNotExists: Boolean = false,
waitForTasks: Boolean = false,
+ batchSize: Int = 1000,
requestOptions: Option[RequestOptions] = None
)(implicit ec: ExecutionContext): Future[Seq[BatchResponse]] = {
chunkedBatch(
@@ -326,7 +335,7 @@ package object extension {
objects,
if (createIfNotExists) Action.PartialUpdateObject else Action.PartialUpdateObjectNoCreate,
waitForTasks,
- 1000,
+ batchSize,
requestOptions
)
}
diff --git a/clients/algoliasearch-client-swift/Sources/Search/Extra/SearchClientExtension.swift b/clients/algoliasearch-client-swift/Sources/Search/Extra/SearchClientExtension.swift
index bb2fe4b21b..ee7abd7a06 100644
--- a/clients/algoliasearch-client-swift/Sources/Search/Extra/SearchClientExtension.swift
+++ b/clients/algoliasearch-client-swift/Sources/Search/Extra/SearchClientExtension.swift
@@ -468,12 +468,14 @@ public extension SearchClient {
/// - parameter indexName: The name of the index where to save the objects
/// - parameter objects: The new objects
/// - parameter waitForTasks: If we should wait for the batch task to be finished before processing the next one
+ /// - parameter batchSize: The maximum number of objects to include in a batch
/// - parameter requestOptions: The request options
/// - returns: [BatchResponse]
func saveObjects(
indexName: String,
objects: [some Encodable],
waitForTasks: Bool = false,
+ batchSize: Int = 1000,
requestOptions: RequestOptions? = nil
) async throws -> [BatchResponse] {
try await self.chunkedBatch(
@@ -481,7 +483,7 @@ public extension SearchClient {
objects: objects,
action: .addObject,
waitForTasks: waitForTasks,
- batchSize: 1000,
+ batchSize: batchSize,
requestOptions: requestOptions
)
}
@@ -491,12 +493,14 @@ public extension SearchClient {
/// - parameter indexName: The name of the index to delete objectIDs from
/// - parameter objectIDs: The objectIDs to delete
/// - parameter waitForTasks: If we should wait for the batch task to be finished before processing the next one
+ /// - parameter batchSize: The maximum number of objects to include in a batch
/// - parameter requestOptions: The request options
/// - returns: [BatchResponse]
func deleteObjects(
indexName: String,
objectIDs: [String],
waitForTasks: Bool = false,
+ batchSize: Int = 1000,
requestOptions: RequestOptions? = nil
) async throws -> [BatchResponse] {
try await self.chunkedBatch(
@@ -504,7 +508,7 @@ public extension SearchClient {
objects: objectIDs.map { AnyCodable(["objectID": $0]) },
action: .deleteObject,
waitForTasks: waitForTasks,
- batchSize: 1000,
+ batchSize: batchSize,
requestOptions: requestOptions
)
}
@@ -516,6 +520,7 @@ public extension SearchClient {
/// - parameter createIfNotExists: To be provided if non-existing objects are passed, otherwise, the call will
/// fail..
/// - parameter waitForTasks: If we should wait for the batch task to be finished before processing the next one
+ /// - parameter batchSize: The maximum number of objects to include in a batch
/// - parameter requestOptions: The request options
/// - returns: [BatchResponse]
func partialUpdateObjects(
@@ -523,6 +528,7 @@ public extension SearchClient {
objects: [some Encodable],
createIfNotExists: Bool = false,
waitForTasks: Bool = false,
+ batchSize: Int = 1000,
requestOptions: RequestOptions? = nil
) async throws -> [BatchResponse] {
try await self.chunkedBatch(
@@ -530,7 +536,7 @@ public extension SearchClient {
objects: objects,
action: createIfNotExists ? .partialUpdateObject : .partialUpdateObjectNoCreate,
waitForTasks: waitForTasks,
- batchSize: 1000,
+ batchSize: batchSize,
requestOptions: requestOptions
)
}
diff --git a/specs/search/helpers/deleteObjects.yml b/specs/search/helpers/deleteObjects.yml
index 2d1a2d032d..bbdb376b66 100644
--- a/specs/search/helpers/deleteObjects.yml
+++ b/specs/search/helpers/deleteObjects.yml
@@ -28,6 +28,12 @@ method:
required: false
schema:
type: boolean
+ - in: query
+ name: batchSize
+ description: The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000.
+ required: false
+ schema:
+ type: integer
- in: query
name: requestOptions
description: The request options to pass to the `batch` method.
diff --git a/specs/search/helpers/partialUpdateObjects.yml b/specs/search/helpers/partialUpdateObjects.yml
index 2dcae02b8f..0b80f88df1 100644
--- a/specs/search/helpers/partialUpdateObjects.yml
+++ b/specs/search/helpers/partialUpdateObjects.yml
@@ -34,6 +34,12 @@ method:
required: false
schema:
type: boolean
+ - in: query
+ name: batchSize
+ description: The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000.
+ required: false
+ schema:
+ type: integer
- in: query
name: requestOptions
description: The request options to pass to the `batch` method.
diff --git a/specs/search/helpers/saveObjects.yml b/specs/search/helpers/saveObjects.yml
index bf7cee3fe8..71b412c7aa 100644
--- a/specs/search/helpers/saveObjects.yml
+++ b/specs/search/helpers/saveObjects.yml
@@ -28,6 +28,12 @@ method:
required: false
schema:
type: boolean
+ - in: query
+ name: batchSize
+ description: The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000.
+ required: false
+ schema:
+ type: integer
- in: query
name: requestOptions
description: The request options to pass to the `batch` method.
diff --git a/templates/java/api_helpers.mustache b/templates/java/api_helpers.mustache
index 439b192422..ac7848b370 100644
--- a/templates/java/api_helpers.mustache
+++ b/templates/java/api_helpers.mustache
@@ -708,7 +708,25 @@ public List saveObjects(String indexName, Iterable objects
* the transporter requestOptions. (optional)
*/
public List saveObjects(String indexName, Iterable objects, boolean waitForTasks, RequestOptions requestOptions) {
- return chunkedBatch(indexName, objects, Action.ADD_OBJECT, waitForTasks, 1000, requestOptions);
+ return saveObjects(indexName, objects, false, 1000, requestOptions);
+}
+
+/**
+ * Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used
+ * under the hood, which creates a `batch` requests with at most 1000 objects in it.
+ *
+ * @param indexName The `indexName` to replace `objects` in.
+ * @param objects The array of `objects` to store in the given Algolia `indexName`.
+ * @param waitForTasks - Whether or not we should wait until every `batch` tasks has been
+ * processed, this operation may slow the total execution time of this method but is more
+ * reliable.
+ * @param batchSize The size of the chunk of `objects`. The number of `batch` calls will be equal
+ * to `length(objects) / batchSize`.
+ * @param requestOptions The requestOptions to send along with the query, they will be merged with
+ * the transporter requestOptions. (optional)
+ */
+public List saveObjects(String indexName, Iterable objects, boolean waitForTasks, int batchSize, RequestOptions requestOptions) {
+ return chunkedBatch(indexName, objects, Action.ADD_OBJECT, waitForTasks, batchSize, requestOptions);
}
/**
@@ -747,7 +765,25 @@ public List deleteObjects(String indexName, List objectID
* @param requestOptions The requestOptions to send along with the query, they will be merged with
* the transporter requestOptions. (optional)
*/
-public List deleteObjects(String indexName, List objectIDs, boolean waitForTasks, RequestOptions requestOptions) {
+public List deleteObjects(String indexName, List objectIDs, boolean waitForTasks,RequestOptions requestOptions) {
+ return deleteObjects(indexName, objectIDs, false, 1000, null);
+}
+
+/**
+ * Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under
+ * the hood, which creates a `batch` requests with at most 1000 objectIDs in it.
+ *
+ * @param indexName The `indexName` to delete `objectIDs` from.
+ * @param objectIDs The array of `objectIDs` to delete from the `indexName`.
+ * @param waitForTasks - Whether or not we should wait until every `batch` tasks has been
+ * processed, this operation may slow the total execution time of this method but is more
+ * reliable.
+ * @param batchSize The size of the chunk of `objects`. The number of `batch` calls will be equal
+ * to `length(objects) / batchSize`.
+ * @param requestOptions The requestOptions to send along with the query, they will be merged with
+ * the transporter requestOptions. (optional)
+ */
+public List deleteObjects(String indexName, List objectIDs, boolean waitForTasks, int batchSize, RequestOptions requestOptions) {
List