diff --git a/sdk/cosmos/azure-cosmos-benchmark/ctl/linkedin/run_benchmark.sh b/sdk/cosmos/azure-cosmos-benchmark/ctl/linkedin/run_benchmark.sh index 3ae35d672e0a2..a2decb77c7d5b 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/ctl/linkedin/run_benchmark.sh +++ b/sdk/cosmos/azure-cosmos-benchmark/ctl/linkedin/run_benchmark.sh @@ -10,6 +10,7 @@ ## - ctl_concurrency (optional: default 50) ## - ctl_consistency_level (optional: default Session) ## - ctl_number_of_precreated_documents (optional: default 100,000) +## - ctl_bulk_load_batch_size (optional: default 200,000) ## - ctl_number_of_operations (optional: default 1,000,000) ## - ctl_max_running_time_duration (optional: default 10 minutes) ## - ctl_printing_interval (optional: default 30 seconds) @@ -63,6 +64,13 @@ else number_of_precreated_documents=$ctl_number_of_precreated_documents fi +if [ -z "$ctl_bulk_load_batch_size" ] +then +bulk_load_batch_size=200000 +else +bulk_load_batch_size=$ctl_bulk_load_batch_size +fi + if [ -z "$ctl_number_of_operations" ] then number_of_operations=-1 @@ -98,9 +106,9 @@ jvm_opt="" if [ -z "$ctl_graphite_endpoint" ] then -java -Xmx8g -Xms8g $jvm_opt -Dcosmos.directModeProtocol=$protocol -Dazure.cosmos.directModeProtocol=$protocol -jar "$jar_file" -serviceEndpoint "$service_endpoint" -masterKey "$master_key" -databaseId "$database_name" -collectionId "$collection_name" -numberOfCollectionForCtl "$number_Of_collection" -throughput $throughput -consistencyLevel $consistency_level -concurrency $concurrency -numberOfOperations $number_of_operations -operation $operation -connectionMode $connection_mode -maxRunningTimeDuration $max_running_time_duration -numberOfPreCreatedDocuments $number_of_precreated_documents -printingInterval $printing_interval -manageResources 2>&1 | tee -a "$log_filename" +java -Xmx8g -Xms8g $jvm_opt -Dcosmos.directModeProtocol=$protocol -Dazure.cosmos.directModeProtocol=$protocol -jar "$jar_file" -serviceEndpoint "$service_endpoint" -masterKey "$master_key" -databaseId "$database_name" -collectionId "$collection_name" -numberOfCollectionForCtl "$number_Of_collection" -throughput $throughput -consistencyLevel $consistency_level -concurrency $concurrency -numberOfOperations $number_of_operations -operation $operation -connectionMode $connection_mode -maxRunningTimeDuration $max_running_time_duration -numberOfPreCreatedDocuments $number_of_precreated_documents -bulkloadBatchSize $bulk_load_batch_size -printingInterval $printing_interval 2>&1 | tee -a "$log_filename" else -java -Xmx8g -Xms8g $jvm_opt -Dcosmos.directModeProtocol=$protocol -Dazure.cosmos.directModeProtocol=$protocol -jar "$jar_file" -serviceEndpoint "$service_endpoint" -masterKey "$master_key" -databaseId "$database_name" -collectionId "$collection_name" -numberOfCollectionForCtl "$number_Of_collection" -throughput $throughput -consistencyLevel $consistency_level -concurrency $concurrency -numberOfOperations $number_of_operations -operation $operation -connectionMode $connection_mode -maxRunningTimeDuration $max_running_time_duration -graphiteEndpoint $ctl_graphite_endpoint -numberOfPreCreatedDocuments $number_of_precreated_documents -printingInterval $printing_interval -manageResources 2>&1 | tee -a "$log_filename" +java -Xmx8g -Xms8g $jvm_opt -Dcosmos.directModeProtocol=$protocol -Dazure.cosmos.directModeProtocol=$protocol -jar "$jar_file" -serviceEndpoint "$service_endpoint" -masterKey "$master_key" -databaseId "$database_name" -collectionId "$collection_name" -numberOfCollectionForCtl "$number_Of_collection" -throughput $throughput -consistencyLevel $consistency_level -concurrency $concurrency -numberOfOperations $number_of_operations -operation $operation -connectionMode $connection_mode -maxRunningTimeDuration $max_running_time_duration -graphiteEndpoint $ctl_graphite_endpoint -numberOfPreCreatedDocuments $number_of_precreated_documents -bulkloadBatchSize $bulk_load_batch_size -printingInterval $printing_interval 2>&1 | tee -a "$log_filename" fi end=`date +%s` diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Configuration.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Configuration.java index c846e6b9c6c61..d7119f3702f4f 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Configuration.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Configuration.java @@ -89,8 +89,8 @@ public class Configuration { @Parameter(names = "-readWriteQueryPct", description = "Comma separated read write query workload percent") private String readWriteQueryPct = "90,9,1"; - @Parameter(names = "-manageResources", description = "Control switch for creating/deleting underlying test resources") - private boolean manageResources = false; + @Parameter(names = "-manageDatabase", description = "Control switch for creating/deleting underlying database resource") + private boolean manageDatabase = false; @Parameter(names = "-operation", description = "Type of Workload:\n" + "\tReadThroughput- run a READ workload that prints only throughput *\n" @@ -161,6 +161,9 @@ public Duration convert(String value) { @Parameter(names = "-contentResponseOnWriteEnabled", description = "if set to false, does not returns content response on document write operations") private String contentResponseOnWriteEnabled = String.valueOf(true); + @Parameter(names = "-bulkloadBatchSize", description = "Control the number of documents uploaded in each BulkExecutor load iteration (Only supported for the LinkedInCtlWorkload)") + private int bulkloadBatchSize = 200000; + @Parameter(names = {"-h", "-help", "--help"}, description = "Help", help = true) private boolean help = false; @@ -393,8 +396,12 @@ public String getReadWriteQueryPct() { return this.readWriteQueryPct; } - public boolean shouldManageResources() { - return this.manageResources; + public boolean shouldManageDatabase() { + return this.manageDatabase; + } + + public int getBulkloadBatchSize() { + return this.bulkloadBatchSize; } public String toString() { diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/ResourceManagerImpl.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/CollectionResourceManager.java similarity index 65% rename from sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/ResourceManagerImpl.java rename to sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/CollectionResourceManager.java index f413a82842bce..4fe2b2431f66c 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/ResourceManagerImpl.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/CollectionResourceManager.java @@ -10,8 +10,8 @@ import com.azure.cosmos.benchmark.Configuration; import com.azure.cosmos.benchmark.linkedin.data.CollectionAttributes; import com.azure.cosmos.benchmark.linkedin.data.EntityConfiguration; +import com.azure.cosmos.benchmark.linkedin.impl.Constants; import com.azure.cosmos.models.CosmosContainerProperties; -import com.azure.cosmos.models.ThroughputProperties; import com.google.common.base.Preconditions; import java.time.Duration; import java.util.List; @@ -19,19 +19,21 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static com.azure.cosmos.benchmark.linkedin.impl.Constants.PARTITION_KEY_PATH; -import static com.azure.cosmos.models.ThroughputProperties.createManualThroughput; - -public class ResourceManagerImpl implements ResourceManager { - private static final Logger LOGGER = LoggerFactory.getLogger(ResourceManagerImpl.class); +/** + * Implementation for managing only the Collections for this test. This class facilitates + * container creation after the CTL environment has provisioned the database with the + * required throughput + */ +public class CollectionResourceManager implements ResourceManager { + private static final Logger LOGGER = LoggerFactory.getLogger(CollectionResourceManager.class); private static final Duration RESOURCE_CRUD_WAIT_TIME = Duration.ofSeconds(30); private final Configuration _configuration; private final EntityConfiguration _entityConfiguration; private final CosmosAsyncClient _client; - public ResourceManagerImpl(final Configuration configuration, + public CollectionResourceManager(final Configuration configuration, final EntityConfiguration entityConfiguration, final CosmosAsyncClient client) { Preconditions.checkNotNull(configuration, @@ -39,54 +41,38 @@ public ResourceManagerImpl(final Configuration configuration, Preconditions.checkNotNull(entityConfiguration, "The Test Entity specific configuration can not be null"); Preconditions.checkNotNull(client, "Need a non-null client for " - + "setting up the Database and containers for the test"); + + "setting up the Database and collections for the test"); _configuration = configuration; _entityConfiguration = entityConfiguration; _client = client; } @Override - public void createDatabase() throws CosmosException { - try { - LOGGER.info("Creating database {} for the ctl workload if one doesn't exist", _configuration.getDatabaseId()); - final ThroughputProperties throughputProperties = createManualThroughput(_configuration.getThroughput()); - _client.createDatabaseIfNotExists(_configuration.getDatabaseId(), throughputProperties) - .block(RESOURCE_CRUD_WAIT_TIME); - } catch (CosmosException e) { - LOGGER.error("Exception while creating database {}", _configuration.getDatabaseId(), e); - throw e; - } - - deleteExistingContainers(); - } - - @Override - public void createContainer() throws CosmosException { + public void createResources() throws CosmosException { final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final CollectionAttributes collectionAttributes = _entityConfiguration.collectionAttributes(); try { LOGGER.info("Creating container {} in the database {}", containerName, database.getId()); final CosmosContainerProperties containerProperties = - new CosmosContainerProperties(containerName, PARTITION_KEY_PATH) + new CosmosContainerProperties(containerName, Constants.PARTITION_KEY_PATH) .setIndexingPolicy(collectionAttributes.indexingPolicy()); database.createContainerIfNotExists(containerProperties) .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { - LOGGER.error("Exception while creating container {}", containerName, e); + LOGGER.error("Exception while creating collection {}", containerName, e); throw e; } } @Override public void deleteResources() { - // Delete all the containers in the database - deleteExistingContainers(); + deleteExistingCollections(); - LOGGER.info("Resource cleanup completed"); + LOGGER.info("Collection resource cleanup completed"); } - private void deleteExistingContainers() { + private void deleteExistingCollections() { final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final List cosmosAsyncContainers = database.readAllContainers() .byPage() @@ -98,12 +84,12 @@ private void deleteExistingContainers() { // Run a best effort attempt to delete all existing containers and data there-in for (CosmosAsyncContainer cosmosAsyncContainer : cosmosAsyncContainers) { - LOGGER.info("Deleting container {} in the Database {}", cosmosAsyncContainer.getId(), _configuration.getDatabaseId()); + LOGGER.info("Deleting collection {} in the Database {}", cosmosAsyncContainer.getId(), _configuration.getDatabaseId()); try { cosmosAsyncContainer.delete() .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { - LOGGER.error("Error deleting container {} in the Database {}", + LOGGER.error("Error deleting collection {} in the Database {}", cosmosAsyncContainer.getId(), _configuration.getDatabaseId(), e); } } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/DataGenerationIterator.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/DataGenerationIterator.java index 32792767fdafb..161f3da924787 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/DataGenerationIterator.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/DataGenerationIterator.java @@ -9,7 +9,6 @@ import com.google.common.base.Preconditions; import java.util.Iterator; import java.util.Map; -import java.util.stream.Collectors; /** @@ -17,21 +16,25 @@ */ public class DataGenerationIterator implements Iterator> { - public static final int BATCH_SIZE = 200000; - private final DataGenerator _dataGenerator; private final int _totalRecordCount; private int _totalDataGenerated; + private final int _batchLoadBatchSize; /** * @param dataGenerator The underlying DataGenerator capable of generating a batch of records * @param recordCount Number of records we want to generate generate for this test. - * Actual data generation happens in pre-determined batch size + * @param batchLoadBatchSize The number of documents to generate, and load, in each BulkExecutor iteration */ - public DataGenerationIterator(final DataGenerator dataGenerator, int recordCount) { + public DataGenerationIterator(final DataGenerator dataGenerator, int recordCount, int batchLoadBatchSize) { + Preconditions.checkArgument(recordCount > 0, + "The number of documents to generate must be greater than 0"); + Preconditions.checkArgument(batchLoadBatchSize > 0, + "The number of documents to generate and load on each BulkExecutor load iteration must be greater than 0"); _dataGenerator = Preconditions.checkNotNull(dataGenerator, "The underlying DataGenerator for this iterator can not be null"); _totalRecordCount = recordCount; + _batchLoadBatchSize = batchLoadBatchSize; _totalDataGenerated = 0; } @@ -42,7 +45,7 @@ public boolean hasNext() { @Override public Map next() { - final int recordsToGenerate = Math.min(BATCH_SIZE, _totalRecordCount - _totalDataGenerated); + final int recordsToGenerate = Math.min(_batchLoadBatchSize, _totalRecordCount - _totalDataGenerated); // Filter Keys in case there are duplicates final Map newDocuments = _dataGenerator.generate(recordsToGenerate); diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/DataLoader.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/DataLoader.java index b09e91b8fcc2a..dc10180c585e9 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/DataLoader.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/DataLoader.java @@ -29,9 +29,9 @@ public class DataLoader { private static final Logger LOGGER = LoggerFactory.getLogger(DataLoader.class); - private static final int MAX_BATCH_SIZE = 20000; - private static final int BULK_OPERATION_CONCURRENCY = 5; - private static final Duration BULK_LOAD_WAIT_DURATION = Duration.ofSeconds(120); + private static final int MAX_BATCH_SIZE = 10000; + private static final int BULK_OPERATION_CONCURRENCY = 10; + private static final Duration VALIDATE_DATA_WAIT_DURATION = Duration.ofSeconds(120); private static final String COUNT_ALL_QUERY = "SELECT COUNT(1) FROM c"; private static final String COUNT_ALL_QUERY_RESULT_FIELD = "$1"; @@ -48,11 +48,13 @@ public DataLoader(final Configuration configuration, _client = Preconditions.checkNotNull(client, "The CosmosAsyncClient needed for data loading can not be null"); _dataGenerator = new DataGenerationIterator(entityConfiguration.dataGenerator(), - _configuration.getNumberOfPreCreatedDocuments()); + _configuration.getNumberOfPreCreatedDocuments(), + _configuration.getBulkloadBatchSize()); } public void loadData() { - LOGGER.info("Starting batched data loading, loading {} documents in each iteration", DataGenerationIterator.BATCH_SIZE); + LOGGER.info("Starting batched data loading, loading {} documents in each iteration", + _configuration.getBulkloadBatchSize()); while (_dataGenerator.hasNext()) { final Map newDocuments = _dataGenerator.next(); bulkCreateItems(newDocuments); @@ -71,11 +73,14 @@ private void bulkCreateItems(final Map records) { database.getId(), containerName); + // We want to wait longer depending on the number of documents in each iteration + final Duration blockingWaitTime = Duration.ofSeconds(120 * + (((_configuration.getBulkloadBatchSize() - 1) / 200000) + 1)); final BulkProcessingOptions bulkProcessingOptions = new BulkProcessingOptions<>(Object.class); bulkProcessingOptions.setMaxMicroBatchSize(MAX_BATCH_SIZE) .setMaxMicroBatchConcurrency(BULK_OPERATION_CONCURRENCY); container.processBulkOperations(Flux.fromIterable(cosmosItemOperations), bulkProcessingOptions) - .blockLast(BULK_LOAD_WAIT_DURATION); + .blockLast(blockingWaitTime); LOGGER.info("Completed loading {} documents into [{}:{}]", cosmosItemOperations.size(), database.getId(), @@ -93,7 +98,7 @@ private void validateDataCreation(int expectedSize) { .queryItems(COUNT_ALL_QUERY, ObjectNode.class) .byPage() .collectList() - .block(BULK_LOAD_WAIT_DURATION); + .block(VALIDATE_DATA_WAIT_DURATION); final int resultCount = Optional.ofNullable(queryItemsResponseList) .map(responseList -> responseList.get(0)) .map(FeedResponse::getResults) diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/DatabaseResourceManager.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/DatabaseResourceManager.java new file mode 100644 index 0000000000000..d729a7e6f909a --- /dev/null +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/DatabaseResourceManager.java @@ -0,0 +1,81 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.benchmark.linkedin; + +import com.azure.cosmos.CosmosAsyncClient; +import com.azure.cosmos.CosmosAsyncDatabase; +import com.azure.cosmos.CosmosException; +import com.azure.cosmos.benchmark.Configuration; +import com.azure.cosmos.benchmark.linkedin.data.EntityConfiguration; +import com.azure.cosmos.models.ThroughputProperties; +import com.google.common.base.Preconditions; +import java.time.Duration; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * For local testing, the database creation needs to happen as part of the Test setup. This class + * manages the database AND collection setup, and useful for ensuring database and other resources + * and not left unused after local testing + */ +public class DatabaseResourceManager implements ResourceManager { + + private static final Logger LOGGER = LoggerFactory.getLogger(DatabaseResourceManager.class); + private static final Duration RESOURCE_CRUD_WAIT_TIME = Duration.ofSeconds(30); + + private final Configuration _configuration; + private final CosmosAsyncClient _client; + private final CollectionResourceManager _collectionResourceManager; + + public DatabaseResourceManager(final Configuration configuration, + final EntityConfiguration entityConfiguration, + final CosmosAsyncClient client) { + Preconditions.checkNotNull(configuration, + "The Workload configuration defining the parameters can not be null"); + Preconditions.checkNotNull(entityConfiguration, + "The Test Entity specific configuration can not be null"); + Preconditions.checkNotNull(client, "Need a non-null client for " + + "setting up the Database and collections for the test"); + _configuration = configuration; + _client = client; + _collectionResourceManager = new CollectionResourceManager(_configuration, entityConfiguration, _client); + } + + @Override + public void createResources() throws CosmosException { + try { + LOGGER.info("Creating database {} for the ctl workload if one doesn't exist", _configuration.getDatabaseId()); + final ThroughputProperties throughputProperties = + ThroughputProperties.createManualThroughput(_configuration.getThroughput()); + _client.createDatabaseIfNotExists(_configuration.getDatabaseId(), throughputProperties) + .block(RESOURCE_CRUD_WAIT_TIME); + } catch (CosmosException e) { + LOGGER.error("Exception while creating database {}", _configuration.getDatabaseId(), e); + throw e; + } + + // Delete any existing collections/containers in this database + _collectionResourceManager.deleteResources(); + + // And recreate the collections for this test + _collectionResourceManager.createResources(); + } + + @Override + public void deleteResources() { + // Followed by the main database used for testing + final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); + try { + LOGGER.info("Deleting the main database {} used in this test. Collection", _configuration.getDatabaseId()); + database.delete() + .block(RESOURCE_CRUD_WAIT_TIME); + } catch (CosmosException e) { + LOGGER.error("Exception deleting the database {}", _configuration.getDatabaseId(), e); + throw e; + } + + LOGGER.info("Database resource cleanup completed"); + } +} diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/LICtlWorkload.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/LICtlWorkload.java index f67349771afe7..2dee1644075db 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/LICtlWorkload.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/LICtlWorkload.java @@ -39,17 +39,16 @@ public LICtlWorkload(final Configuration configuration) { _bulkLoadClient = AsyncClientFactory.buildBulkLoadAsyncClient(configuration); _metricsRegistry = new MetricRegistry(); _reporter = ScheduledReporterFactory.create(_configuration, _metricsRegistry); - _resourceManager = _configuration.shouldManageResources() - ? new ResourceManagerImpl(_configuration, _entityConfiguration, _client) - : new NoopResourceManagerImpl(); + _resourceManager = _configuration.shouldManageDatabase() + ? new DatabaseResourceManager(_configuration, _entityConfiguration, _client) + : new CollectionResourceManager(_configuration, _entityConfiguration, _client); _dataLoader = new DataLoader(_configuration, _entityConfiguration, _bulkLoadClient); _getTestRunner = new GetTestRunner(_configuration, _client, _metricsRegistry, _entityConfiguration); } public void setup() throws CosmosException { - _resourceManager.createDatabase(); - - _resourceManager.createContainer(); + LOGGER.info("Creating resources"); + _resourceManager.createResources(); LOGGER.info("Loading data"); _dataLoader.loadData(); diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/NoopResourceManagerImpl.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/NoopResourceManagerImpl.java deleted file mode 100644 index 685dff1e4bb40..0000000000000 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/NoopResourceManagerImpl.java +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.cosmos.benchmark.linkedin; - -import com.azure.cosmos.CosmosException; - - -/** - * ResourceManager implementation where the CTL workload does not manage the underlying - * Database and Container resources. - */ -public class NoopResourceManagerImpl implements ResourceManager { - @Override - public void createDatabase() throws CosmosException { - - } - - @Override - public void createContainer() throws CosmosException { - - } - - @Override - public void deleteResources() { - - } -} diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/ResourceManager.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/ResourceManager.java index 4ee295c46b0ce..e1befd9aa5d8c 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/ResourceManager.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/ResourceManager.java @@ -11,20 +11,12 @@ */ public interface ResourceManager { /** - * Initialize the CosmosDB database required for running this test, or if the database exists, delete all - * legacy containers + * Initialize this resource required for running this test * * @throws CosmosException in the event of an error creating the underlying database, or deleting * containers from a previously created database of the same name */ - void createDatabase() throws CosmosException; - - /** - * Create desired container/collection for the test - * - * @throws CosmosException if the container could not be created - */ - void createContainer() throws CosmosException; + void createResources() throws CosmosException; /** * Delete all managed resources e.g. account, databases and/or containers etc diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/Accessor.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/Accessor.java index 811dede8d322e..d71e001d24969 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/Accessor.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/Accessor.java @@ -4,7 +4,9 @@ package com.azure.cosmos.benchmark.linkedin.impl; import com.azure.cosmos.benchmark.linkedin.impl.exceptions.AccessorException; +import com.azure.cosmos.benchmark.linkedin.impl.models.BatchGetResult; import com.azure.cosmos.benchmark.linkedin.impl.models.GetRequestOptions; +import com.azure.cosmos.benchmark.linkedin.impl.models.QueryOptions; import com.azure.cosmos.benchmark.linkedin.impl.models.Result; @@ -27,4 +29,14 @@ public interface Accessor { * @throws AccessorException when the underlying data store throws an exception for any reason. */ Result get(final K key, final GetRequestOptions requestOptions) throws AccessorException; + + /** + * Retrieves the entity from the data source using SQL expression, using the query options provided. + * + * @param queryOptions The SQL query, and related options for executing this query + * @return A BatchGetResult containing the results [if present] and metadata about the operation + * @throws AccessorException when the underlying data store throws an exception for any reason. + */ + BatchGetResult query(final QueryOptions queryOptions) + throws AccessorException; } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/Constants.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/Constants.java index dd8199bdd5770..f6fa820812b14 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/Constants.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/Constants.java @@ -9,6 +9,7 @@ public class Constants { public static final String PARTITION_KEY_PATH = "/" + PARTITION_KEY; public final static String METHOD_GET = "GET"; + public final static String METHOD_SQL_QUERY = "SQL_QUERY"; public final static String ERROR_COUNT = "ErrorCount"; public final static String TOO_MANY_REQUESTS = "TooManyRequests"; public final static String DELETED_INDICATOR = "__deletedTs__"; diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/CosmosDBDataAccessor.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/CosmosDBDataAccessor.java index 13392302f64c9..b24f6ee5b163d 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/CosmosDBDataAccessor.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/CosmosDBDataAccessor.java @@ -6,7 +6,9 @@ import com.azure.cosmos.benchmark.linkedin.impl.exceptions.CosmosDBDataAccessorException; import com.azure.cosmos.benchmark.linkedin.impl.keyextractor.KeyExtractor; import com.azure.cosmos.benchmark.linkedin.impl.metrics.MetricsFactory; +import com.azure.cosmos.benchmark.linkedin.impl.models.BatchGetResult; import com.azure.cosmos.benchmark.linkedin.impl.models.GetRequestOptions; +import com.azure.cosmos.benchmark.linkedin.impl.models.QueryOptions; import com.azure.cosmos.benchmark.linkedin.impl.models.Result; import com.google.common.base.Preconditions; import java.time.Clock; @@ -22,36 +24,36 @@ @ThreadSafe public class CosmosDBDataAccessor implements Accessor { - private final DataLocator _dataLocator; - private final KeyExtractor _keyExtractor; - private final ResponseHandler _responseHandler; - private final GetExecutor _getExecutor; - private final Clock _clock; - - public CosmosDBDataAccessor(final DataLocator dataLocator, - final KeyExtractor keyExtractor, - final ResponseHandler responseHandler, - final MetricsFactory metricsFactory, - final Clock clock, - final OperationsLogger logger) { - _dataLocator = Preconditions.checkNotNull(dataLocator, "DataLocator for this entity can not be null"); - _keyExtractor = Preconditions.checkNotNull(keyExtractor, "The CosmosDBKeyExtractorV3 can not be null"); - _responseHandler = Preconditions.checkNotNull(responseHandler, "The CosmosDBResponseHandler can not be null"); - _clock = Preconditions.checkNotNull(clock, "clock cannot be null"); - _getExecutor = new GetExecutor<>(_dataLocator, - _keyExtractor, - _responseHandler, - metricsFactory, - _clock, - logger); - } - - @Override - public Result get(final K key, final GetRequestOptions requestOptions) throws CosmosDBDataAccessorException { - Preconditions.checkNotNull(key, "The key to fetch the Entity is null!"); - Preconditions.checkNotNull(requestOptions, "The RequestOptions for fetching the Entity is null!"); - Preconditions.checkArgument(_keyExtractor.isKeyValid(key), "The key parameter %s is invalid!", key); - - return _getExecutor.get(key, requestOptions); - } + private final DataLocator _dataLocator; + private final KeyExtractor _keyExtractor; + private final ResponseHandler _responseHandler; + private final GetExecutor _getExecutor; + private final QueryExecutor _queryExecutor; + private final Clock _clock; + + public CosmosDBDataAccessor(final DataLocator dataLocator, final KeyExtractor keyExtractor, + final ResponseHandler responseHandler, final MetricsFactory metricsFactory, final Clock clock, + final OperationsLogger logger) { + _dataLocator = Preconditions.checkNotNull(dataLocator, "DataLocator for this entity can not be null"); + _keyExtractor = Preconditions.checkNotNull(keyExtractor, "The CosmosDBKeyExtractorV3 can not be null"); + _responseHandler = Preconditions.checkNotNull(responseHandler, "The CosmosDBResponseHandler can not be null"); + _clock = Preconditions.checkNotNull(clock, "clock cannot be null"); + _getExecutor = new GetExecutor<>(_dataLocator, _keyExtractor, _responseHandler, metricsFactory, _clock, logger); + _queryExecutor = new QueryExecutor<>(_dataLocator, _responseHandler, metricsFactory, _clock, logger); + } + + @Override + public Result get(final K key, final GetRequestOptions requestOptions) throws CosmosDBDataAccessorException { + Preconditions.checkNotNull(key, "The key to fetch the Entity is null!"); + Preconditions.checkNotNull(requestOptions, "The RequestOptions for fetching the Entity is null!"); + Preconditions.checkArgument(_keyExtractor.isKeyValid(key), "The key parameter %s is invalid!", key); + + return _getExecutor.get(key, requestOptions); + } + + @Override + public BatchGetResult query(QueryOptions queryOptions) throws CosmosDBDataAccessorException { + Preconditions.checkNotNull(queryOptions, "The QueryOptions for fetching the Entity can't be null"); + return this._queryExecutor.query(queryOptions); + } } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/QueryExecutor.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/QueryExecutor.java new file mode 100644 index 0000000000000..3fbc3a71963fb --- /dev/null +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/QueryExecutor.java @@ -0,0 +1,119 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.benchmark.linkedin.impl; + +import com.azure.cosmos.benchmark.linkedin.impl.exceptions.CosmosDBDataAccessorException; +import com.azure.cosmos.benchmark.linkedin.impl.metrics.MetricsFactory; +import com.azure.cosmos.benchmark.linkedin.impl.models.BatchGetResult; +import com.azure.cosmos.benchmark.linkedin.impl.models.CollectionKey; +import com.azure.cosmos.benchmark.linkedin.impl.models.QueryOptions; +import com.azure.cosmos.models.CosmosQueryRequestOptions; +import com.azure.cosmos.models.FeedResponse; +import com.azure.cosmos.models.PartitionKey; +import com.azure.cosmos.models.SqlQuerySpec; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.google.common.base.Preconditions; +import java.time.Clock; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import javax.annotation.concurrent.ThreadSafe; + + +/** + * Class to encapsulate the CosmosDB GET operation for Azure Async SDK. + * + * @param The key for the entity stored in the data store + * @param The entity stored in the data store + */ +@ThreadSafe +class QueryExecutor { + + private static final String ERROR_MESSAGE = "Exception when performing SQL query"; + + private final DataLocator _dataLocator; + private final ResponseHandler _responseHandler; + private final Metrics _metrics; + private final Clock _clock; + private final OperationsLogger _logger; + + /** + * This is initialized during the CosmosDBDataAccessor construction time, and + * the constructor must not invoke any operations on the DataAccessor + */ + QueryExecutor(final DataLocator dataLocator, + final ResponseHandler responseHandler, + final MetricsFactory metricsFactory, + final Clock clock, + final OperationsLogger logger) { + Preconditions.checkNotNull(metricsFactory, "The MetricsFactory is null!"); + _dataLocator = Preconditions.checkNotNull(dataLocator, "DataLocator for this entity can not be null"); + _responseHandler = Preconditions.checkNotNull(responseHandler, "The CosmosDBResponseHandler can not be null"); + _clock = Preconditions.checkNotNull(clock, "clock cannot be null"); + _logger = Preconditions.checkNotNull(logger, "The Logger can not be null"); + + // Initialize the metrics prior to the first operation + final CollectionKey activeCollection = _dataLocator.getCollection(); + _metrics = metricsFactory.getMetrics(activeCollection, Constants.METHOD_SQL_QUERY); + } + + /** + * Retrieve documents by their keys using SQL expression + * @param queryOptions: queryOptions specific to GET requests. + * @return BatchGetResult with key after fetching data using SQL expression from cosmos DB + */ + BatchGetResult query(QueryOptions queryOptions) throws CosmosDBDataAccessorException { + Preconditions.checkNotNull(queryOptions, "queryOptions is null!"); + Preconditions.checkNotNull(queryOptions.getDocumentDBQuery(), "SQL query is null!"); + + /* + * For maxBufferedItemCount and maxDegreeOfParallelism: If they are set to less than 0, + * the system automatically decides the number of items to buffer (the number of concurrent operations to run). + */ + final CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions() + .setMaxDegreeOfParallelism(-1) + .setMaxBufferedItemCount(-1); + + // Explicitly set the PartitioningKey in the CosmosQueryRequestOptions if we are querying by the partitioningKey + // This provides a hint to the SDK, and optimizes the query execution + if (queryOptions.getPartitioningKey().isPresent()) { + final PartitionKey partitioningKey = new PartitionKey(queryOptions.getPartitioningKey().get()); + cosmosQueryRequestOptions.setPartitionKey(partitioningKey); + } + + final CollectionKey activeCollection = _dataLocator.getCollection(); + _metrics.logCounterMetric(Metrics.MetricType.CALL_COUNT); + final String query = queryOptions.getDocumentDBQuery(); + long startTime = _clock.millis(); + + try { + final SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(query, + queryOptions.getSqlParameterList().orElse(Collections.emptyList())); + + final List> responseList = _dataLocator.getAsyncContainer(activeCollection) + .queryItems(sqlQuerySpec, cosmosQueryRequestOptions, ObjectNode.class) + .byPage() + .collectList() + .block(); + + if (Objects.nonNull(responseList)) { + String activityId = ""; + if (responseList.size() > 0) { + activityId = responseList.get(0).getActivityId(); + } + _logger.logDebugInfo(Constants.METHOD_SQL_QUERY, query, activeCollection, _clock.millis() - startTime, + activityId, null); + } + return _responseHandler.convertFeedResponse(responseList); + } catch (Exception ex) { + _metrics.error(startTime); + throw new CosmosDBDataAccessorException.Builder() + .setMessage(ERROR_MESSAGE) + .setCause(ex.getCause()) + .build(); + } finally { + _metrics.completed(startTime); + } + } +} diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/ResponseHandler.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/ResponseHandler.java index ff3729e379fe8..ee3f45bc69ff5 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/ResponseHandler.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/ResponseHandler.java @@ -6,6 +6,7 @@ import com.azure.cosmos.CosmosException; import com.azure.cosmos.benchmark.linkedin.impl.exceptions.CosmosDBDataAccessorException; import com.azure.cosmos.benchmark.linkedin.impl.keyextractor.KeyExtractor; +import com.azure.cosmos.benchmark.linkedin.impl.models.BatchGetResult; import com.azure.cosmos.benchmark.linkedin.impl.models.Entity; import com.azure.cosmos.benchmark.linkedin.impl.models.EntityAttributes; import com.azure.cosmos.benchmark.linkedin.impl.models.Result; @@ -13,11 +14,15 @@ import com.azure.cosmos.implementation.Constants; import com.azure.cosmos.implementation.HttpConstants; import com.azure.cosmos.models.CosmosItemResponse; +import com.azure.cosmos.models.FeedResponse; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.AtomicDouble; import java.time.Duration; +import java.util.List; import java.util.Optional; +import java.util.function.Predicate; import javax.annotation.Nonnull; import javax.annotation.Nullable; import javax.annotation.concurrent.ThreadSafe; @@ -72,6 +77,11 @@ Result convertResponse(@Nonnull final K key, @Nullable final CosmosItemRes .build(); } + BatchGetResult convertFeedResponse(final List> responseList) { + Preconditions.checkNotNull(responseList, "The responseList from the CosmosDB SQL API is null!"); + return convertFeedResponse(responseList, key -> false); + } + Result convertException(@Nonnull final K key, @Nonnull final CosmosException exception) { Preconditions.checkNotNull(exception, "Only a non-null CosmosException can be mapped to " + "the Result object representation."); @@ -103,6 +113,28 @@ CosmosDBDataAccessorException createException(@Nonnull final String message, return builder.build(); } + private BatchGetResult convertFeedResponse(@Nonnull final List> responseList, + @Nonnull Predicate includeTombstones) { + + final AtomicDouble requestCharge = new AtomicDouble(0); + final BatchGetResult.Builder resultBuilder = new BatchGetResult.Builder<>(); + for (FeedResponse response : responseList) { + requestCharge.addAndGet(response.getRequestCharge()); + response.getResults() + .forEach(objectNode -> Optional.ofNullable(objectNode) + .map(_keyExtractor::getKey) + // Set the result only if the entity is present after the tombstone filtering + .ifPresent(key -> convertDocument(objectNode, includeTombstones.test(key)) + .ifPresent(entity -> resultBuilder.addResult(key, entity)))); + } + + // Capture all the meters corresponding to this request + final ResultMetadata.Builder metadata = new ResultMetadata.Builder() + .addCostUnits(requestCharge.get()); + return resultBuilder.setMetadata(metadata.build()) + .build(); + } + private Optional> convertDocument(@Nullable final ObjectNode document, final boolean includeTombstone) { diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/models/BatchGetResult.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/models/BatchGetResult.java new file mode 100644 index 0000000000000..2e8cf9c8f9009 --- /dev/null +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/models/BatchGetResult.java @@ -0,0 +1,81 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.benchmark.linkedin.impl.models; + +import com.google.common.base.Preconditions; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Objects; + + +/** + * The result object that contains + * 1. The results of the query + * 2. Metadata about the result + */ +public class BatchGetResult { + private final Map> _results; + private final ResultMetadata _metadata; + + private BatchGetResult(final Map> results, final ResultMetadata metadata) { + _results = results; + _metadata = metadata; + } + + public Map> getResults() { + return _results; + } + + public ResultMetadata getMetadata() { + return _metadata; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + BatchGetResult that = (BatchGetResult) o; + return Objects.equals(_results, that._results) && Objects.equals(_metadata, that._metadata); + } + + @Override + public int hashCode() { + return Objects.hash(_results, _metadata); + } + + /** + * Builder class to ensure an invalid batchGetResult instance can never be created/passed around + */ + public static class Builder { + private Map> _results = new LinkedHashMap<>(); + + private ResultMetadata _metadata; + + public Builder addResult(final K key, Entity result) { + Preconditions.checkNotNull(key, "key cannot be null"); + + _results.put(key, result); + return this; + } + + public Builder setMetadata(final ResultMetadata metadata) { + Preconditions.checkNotNull(metadata, "The metadata cannot be null"); + _metadata = metadata; + + return this; + } + + public BatchGetResult build() { + Preconditions.checkState(Objects.nonNull(_metadata), + "A Result instance can not be created without the result metadata."); + return new BatchGetResult<>(_results, _metadata); + } + } +} diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/models/QueryOptions.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/models/QueryOptions.java new file mode 100644 index 0000000000000..5d4f9b0703455 --- /dev/null +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/models/QueryOptions.java @@ -0,0 +1,166 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.benchmark.linkedin.impl.models; + +import com.azure.cosmos.models.SqlParameter; +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.stream.Collectors; +import java.util.stream.Stream; + + +/** + * QueryOptions are the options you can specify with SQL expression and SQL parameters for CRUD operations on a data store. + */ +public class QueryOptions { + // DocumentDB SQL query expression + // TODO: Replace this with the object structure in our implementation if needed + private final String _documentDBQuery; + + //SQL expression parameters + private final List _sqlParameters; + + // ContinuationToken represents whether there are more matched items. + private final String _continuationToken; + + private final Integer _pageSize; + + private String _partitioningKey; + + private String _stringRepresentation; + + private QueryOptions(final String continuationToken, final String documentDBQuery, + final List sqlParameters, final Integer pageSize, final String partitioningKey) { + _continuationToken = continuationToken; + _documentDBQuery = documentDBQuery; + _sqlParameters = sqlParameters; + _pageSize = pageSize; + _partitioningKey = partitioningKey; + } + + public String getDocumentDBQuery() { + return _documentDBQuery; + } + + public Optional> getSqlParameterList() { + return Optional.ofNullable(_sqlParameters); + } + + public Optional getContinuationToken() { + return Optional.ofNullable(_continuationToken); + } + + public Optional getPageSize() { + return Optional.ofNullable(_pageSize); + } + + public Optional getPartitioningKey() { + return Optional.ofNullable(_partitioningKey); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + QueryOptions that = (QueryOptions) o; + + return Objects.equals(_documentDBQuery, that._documentDBQuery) && Objects.equals(_continuationToken, + that._continuationToken) && Objects.equals(_pageSize, that._pageSize) && Objects.equals(_partitioningKey, + that._partitioningKey) && (mapToKeyValueSqlParams(_sqlParameters).equals( + mapToKeyValueSqlParams(that._sqlParameters))); + } + + @Override + public int hashCode() { + return Objects.hash(_continuationToken, _documentDBQuery, _pageSize, _partitioningKey, + mapToKeyValueSqlParams(_sqlParameters)); + } + + @Override + public String toString() { + if (Objects.isNull(_stringRepresentation)) { + _stringRepresentation = MoreObjects.toStringHelper(this) + .omitNullValues() + .add("documentDBQuery", _documentDBQuery) + .add("sqlParameters", _sqlParameters) + .add("pageSize", _pageSize) + .add("partitioningKey", _partitioningKey) + .toString(); + } + + return _stringRepresentation; + } + + private Map mapToKeyValueSqlParams(final List sqlParameters) { + return Optional.ofNullable(sqlParameters) + .map(Collection::stream) + .orElseGet(Stream::empty) + .collect(Collectors.toMap(SqlParameter::getName, param -> param.getValue(String.class))); + } + + /** + * Builder class to ensure an invalid QueryOptions can never be created/passed around + */ + public static class Builder { + // SQL query expression for query in cosmos DB + private String _documentDBQuery; + + // SQL parameters for building the SQL expression + private List _sqlParameters; + + // ContinuationToken for paging, which indicates there are more items + private String _continuationToken; + + // pageSize for paging, which determines the size of the pages returned in the result. + private Integer _pageSize; + + // partitioningKey to use for narrowing down the search space. + private String _partitioningKey; + + public Builder setDocumentDBQuery(final String documentDBQuery) { + Preconditions.checkNotNull(documentDBQuery, "SQL expression for querying can not be null"); + _documentDBQuery = documentDBQuery; + return this; + } + + public Builder setSqlParameterList(final List sqlParameters) { + _sqlParameters = Preconditions.checkNotNull(sqlParameters, "sqlParameters cannot be null"); + return this; + } + + public Builder setContinuationToken(final String continuationToken) { + _continuationToken = Preconditions.checkNotNull(continuationToken, "continuation cannot be null"); + return this; + } + + public Builder setPageSize(final Integer pageSize) { + _pageSize = Preconditions.checkNotNull(pageSize, "pageSize cannot be null"); + return this; + } + + public Builder setPartitioningKey(final String partitioningKey) { + _partitioningKey = Preconditions.checkNotNull(partitioningKey, "partitioningKey cannot be null"); + return this; + } + + /** + * There are no Preconditions for the QueryOptions + */ + public QueryOptions build() { + Preconditions.checkNotNull(_documentDBQuery, "SQL expression cannot be null for querying"); + return new QueryOptions(_continuationToken, _documentDBQuery, _sqlParameters, _pageSize, _partitioningKey); + } + } +}