diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000000..ec0dab9ffce39 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +# See for instructions on this file https://help.github.com/articles/about-codeowners/ diff --git a/README.md b/README.md index 02243f5344921..729c5d70bc3ca 100644 --- a/README.md +++ b/README.md @@ -29,11 +29,11 @@ New wave of packages that we are currently releasing in **preview**. These libra These preview libraries can be easily identified by their folder, package, and namespaces names starting with `azure-`, e.g. `azure-keyvault`. The libraries released in the July 2019 preview: -- [App Configuration](appconfiguration/client/README.md) -- [Event Hubs](eventhubs/client/README.md) -- [Identity](sdk/identity/azure-identity) -- [Key Vault Keys](keyvault/client/keys/README.md) -- [Key Vault Secrets](keyvault/client/secrets/README.md) +- [App Configuration](sdk/appconfiguration/azure-data-appconfiguration/README.md) +- [Event Hubs](sdk/eventhubs/azure-eventhubs/README.md) +- [Identity](sdk/identity/azure-identity/README.md) +- [Key Vault Keys](sdk/keyvault/azure-keyvault-keys/README.md) +- [Key Vault Secrets](sdk/keyvault/client/azure-keyvault-secrets/README.md) - [Storage Blobs](storage/client/README.md) >NOTE: If you need to ensure your code is ready for production, use one of the stable libraries. diff --git a/eng/.docsettings.yml b/eng/.docsettings.yml index 85f3192438c5d..9a97c9831e858 100644 --- a/eng/.docsettings.yml +++ b/eng/.docsettings.yml @@ -17,36 +17,38 @@ required_readme_sections: - ^Next steps$ - ^Contributing$ known_presence_issues: - - ['cognitiveservices/data-plane/language/bingspellcheck', '#2847'] - - ['cognitiveservices/data-plane/language/luis/authoring', '#2847'] - - ['cognitiveservices/data-plane/language/luis/runtime', '#2847'] - - ['cognitiveservices/data-plane/language/textanalytics', '#2847'] - - ['cognitiveservices/data-plane/search/bingautosuggest', '#2847'] - - ['cognitiveservices/data-plane/search/bingcustomimagesearch', '#2847'] - - ['cognitiveservices/data-plane/search/bingcustomsearch', '#2847'] - - ['cognitiveservices/data-plane/search/bingentitysearch', '#2847'] - - ['cognitiveservices/data-plane/search/bingimagesearch', '#2847'] - - ['cognitiveservices/data-plane/search/bingnewssearch', '#2847'] - - ['cognitiveservices/data-plane/search/bingvideosearch', '#2847'] - - ['cognitiveservices/data-plane/search/bingvisualsearch', '#2847'] - - ['cognitiveservices/data-plane/search/bingwebsearch', '#2847'] - - ['cognitiveservices/data-plane/vision/computervision', '#2847'] - - ['cognitiveservices/data-plane/vision/contentmoderator', '#2847'] - - ['cognitiveservices/data-plane/vision/customvision/prediction', '#2847'] - - ['cognitiveservices/data-plane/vision/customvision/training', '#2847'] - - ['cognitiveservices/data-plane/vision/faceapi', '#2847'] + - ['sdk/cognitiveservices/ms-azure-cs-spellcheck', '#2847'] + - ['sdk/cognitiveservices/ms-azure-cs-luis-authoring', '#2847'] + - ['sdk/cognitiveservices/ms-azure-cs-luis-runtime', '#2847'] + - ['sdk/cognitiveservices/ms-azure-cs-textanalytics', '#2847'] + - ['sdk/cognitiveservices/ms-azure-cs-autosuggest', '#2847'] + - ['sdk/cognitiveservices/ms-azure-cs-customimagesearch', '#2847'] + - ['sdk/cognitiveservices/ms-azure-cs-customsearch', '#2847'] + - ['sdk/cognitiveservices/ms-azure-cs-entitysearch', '#2847'] + - ['sdk/cognitiveservices/ms-azure-cs-imagesearch', '#2847'] + - ['sdk/cognitiveservices/ms-azure-cs-newssearch', '#2847'] + - ['sdk/cognitiveservices/ms-azure-cs-videosearch', '#2847'] + - ['sdk/cognitiveservices/ms-azure-cs-visualsearch', '#2847'] + - ['sdk/cognitiveservices/ms-azure-cs-websearch', '#2847'] + - ['sdk/cognitiveservices/ms-azure-cs-computervision', '#2847'] + - ['sdk/cognitiveservices/ms-azure-cs-contentmoderator', '#2847'] + - ['sdk/cognitiveservices/ms-azure-cs-customvision-prediction', '#2847'] + - ['sdk/cognitiveservices/ms-azure-cs-customvision-training', '#2847'] + - ['sdk/cognitiveservices/ms-azure-cs-faceapi', '#2847'] - ['sdk/keyvault/microsoft-azure-keyvault', '#2847'] - ['sdk/keyvault/microsoft-azure-keyvault-core', '#2847'] - ['sdk/keyvault/microsoft-azure-keyvault-cryptography', '#2847'] - ['sdk/keyvault/microsoft-azure-keyvault-extensions', '#2847'] - ['sdk/keyvault/microsoft-azure-keyvault-webkey', '#2847'] - ['sdk/mediaservices/microsoft-azure-media', '#2847'] + - ['mediaservices/data-plane', '#2847'] - ['core/build-tools', '#2847'] - - ['core/azure-core', '#2847'] - - ['core/azure-core-auth', '#2847'] - - ['core/azure-core-management', '#2847'] + - ['sdk/core/azure-core', '#2847'] + - ['sdk/core/azure-core-auth', '#2847'] + - ['sdk/core/azure-core-management', '#2847'] - ['cosmosdb/data-plane/commons-test-utils', '#2847'] - ['cosmosdb/data-plane/sdk', '#2847'] + - ['sdk/cosmos/sdk', '#2847'] - ['cosmosdb/data-plane/gateway', '#2847'] - ['cosmosdb/data-plane/commons', '#2847'] - ['cosmosdb/data-plane/direct-impl', '#2847'] @@ -55,7 +57,7 @@ known_content_issues: - ['README.md', '#3113'] - ['sdk/applicationinsights/microsoft-azure-applicationinsights-query/README.md', '#3113'] - ['sdk/batch/microsoft-azure-batch/README.md', '#3113'] - - ['eventgrid/data-plane/README.md', '#3113'] + - ['sdk/eventgrid/microsoft-azure-eventgrid/README.md', '#3113'] - ['eventhubs/data-plane/readme.md', '#3113'] - ['eventhubs/data-plane/azure-eventhubs-eph/Readme.md', '#3113'] - ['sdk/authorization/microsoft-azure-authentication-msi-token-provider/readme.md', '#3113'] @@ -64,11 +66,15 @@ known_content_issues: - ['sdk/servicebus/README.md', '#3113'] - ['storage/data-plane/README.md', '#3113'] - ['storage/data-plane/swagger/README.md', '#3113'] - - ['core/README.md', '#3113'] + - ['sdk/core/README.md', '#3113'] - ['cosmosdb/data-plane/README.md', '#3113'] - ['cosmosdb/data-plane/benchmark/README.md', '#3113'] - ['cosmosdb/data-plane/changelog/README.md', '#3113'] - ['cosmosdb/data-plane/faq/README.md', '#3113'] + - ['sdk/cosmos/faq/README.md', '#3113'] + - ['sdk/cosmos/changelog/README.md', '#3113'] + - ['sdk/cosmos/benchmark/README.md', '#3113'] + - ['sdk/cosmos/README.md', '#3113'] package_indexing_exclusion_list: - azure-loganalytics-sample - azure-applicationinsights-query-sample diff --git a/eng/code-quality-reports/pom.xml b/eng/code-quality-reports/pom.xml index 78fe8ebfdc4b6..d3f7756700b82 100755 --- a/eng/code-quality-reports/pom.xml +++ b/eng/code-quality-reports/pom.xml @@ -12,7 +12,6 @@ UTF-8 - 9.2.22.v20170606 https://azuresdkartifacts.blob.core.windows.net/azure-sdk-for-java @@ -61,19 +60,6 @@ - - - org.eclipse.jetty - jetty-maven-plugin - ${jetty-maven-plugin.version} - - 0 - 11079 - STOP - false - ./jetty.xml - - org.apache.maven.plugins maven-site-plugin diff --git a/eng/code-quality-reports/src/main/java/com/azure/tools/checkstyle/checks/ExternalDependencyExposedCheck.java b/eng/code-quality-reports/src/main/java/com/azure/tools/checkstyle/checks/ExternalDependencyExposedCheck.java index 6c4a730d5a99c..b36f28e10be83 100644 --- a/eng/code-quality-reports/src/main/java/com/azure/tools/checkstyle/checks/ExternalDependencyExposedCheck.java +++ b/eng/code-quality-reports/src/main/java/com/azure/tools/checkstyle/checks/ExternalDependencyExposedCheck.java @@ -29,12 +29,12 @@ public class ExternalDependencyExposedCheck extends AbstractCheck { ))); private final Map simpleClassNameToQualifiedNameMap = new HashMap<>(); - private static boolean isImplPackage; + + private boolean isPublicClass; @Override public void beginTree(DetailAST rootAST) { simpleClassNameToQualifiedNameMap.clear(); - isImplPackage = false; } @Override @@ -50,7 +50,6 @@ public int[] getAcceptableTokens() { @Override public int[] getRequiredTokens() { return new int[] { - TokenTypes.PACKAGE_DEF, TokenTypes.IMPORT, TokenTypes.METHOD_DEF }; @@ -58,22 +57,23 @@ public int[] getRequiredTokens() { @Override public void visitToken(DetailAST token) { - if (isImplPackage) { - return; - } - switch (token.getType()) { - case TokenTypes.PACKAGE_DEF: - String packageName = FullIdent.createFullIdent(token.findFirstToken(TokenTypes.DOT)).getText(); - isImplPackage = packageName.contains(".implementation"); - break; case TokenTypes.IMPORT: // Add all imported classes into a map, key is the name of class and value is the full package path of class. final String importClassPath = FullIdent.createFullIdentBelow(token).getText(); final String className = importClassPath.substring(importClassPath.lastIndexOf(".") + 1); simpleClassNameToQualifiedNameMap.put(className, importClassPath); break; + case TokenTypes.CLASS_DEF: + // CLASS_DEF always has MODIFIERS + final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken( + token.findFirstToken(TokenTypes.MODIFIERS)); + isPublicClass = accessModifier.equals(AccessModifier.PUBLIC); + break; case TokenTypes.METHOD_DEF: + if (!isPublicClass) { + return; + } checkNoExternalDependencyExposed(token); break; default: diff --git a/eng/code-quality-reports/src/main/java/com/azure/tools/checkstyle/checks/JavadocThrowsChecks.java b/eng/code-quality-reports/src/main/java/com/azure/tools/checkstyle/checks/JavadocThrowsChecks.java index 01fd42149ae2b..c12eeb540056c 100644 --- a/eng/code-quality-reports/src/main/java/com/azure/tools/checkstyle/checks/JavadocThrowsChecks.java +++ b/eng/code-quality-reports/src/main/java/com/azure/tools/checkstyle/checks/JavadocThrowsChecks.java @@ -140,7 +140,10 @@ private boolean isOverrideAnnotation(DetailAST modifierToken) { return false; } - return "Override".equals(modifierToken.findFirstToken(TokenTypes.IDENT).getText()); + // Possible for an identifier not to exist if it is a nested class (ie. @Parameterized.Parameters(String)). + final DetailAST identifier = modifierToken.findFirstToken(TokenTypes.IDENT); + + return identifier != null && "Override".equals(identifier.getText()); } /* diff --git a/eng/code-quality-reports/src/main/java/com/azure/tools/checkstyle/checks/ServiceClientInstantiationCheck.java b/eng/code-quality-reports/src/main/java/com/azure/tools/checkstyle/checks/ServiceClientInstantiationCheck.java index 38adfaabbade6..02669178d0ca1 100644 --- a/eng/code-quality-reports/src/main/java/com/azure/tools/checkstyle/checks/ServiceClientInstantiationCheck.java +++ b/eng/code-quality-reports/src/main/java/com/azure/tools/checkstyle/checks/ServiceClientInstantiationCheck.java @@ -21,7 +21,7 @@ public class ServiceClientInstantiationCheck extends AbstractCheck { private static final String SERVICE_CLIENT = "ServiceClient"; private static final String BUILDER = "builder"; - private static final String ASYNC_CLIENT ="AsyncClient"; + private static final String ASYNC_CLIENT = "AsyncClient"; private static final String CLIENT = "Client"; private static final String IS_ASYNC = "isAsync"; diff --git a/eng/code-quality-reports/src/main/resources/checkstyle/checkstyle-suppressions.xml b/eng/code-quality-reports/src/main/resources/checkstyle/checkstyle-suppressions.xml index 09da6af09b39e..ac7af8e887727 100755 --- a/eng/code-quality-reports/src/main/resources/checkstyle/checkstyle-suppressions.xml +++ b/eng/code-quality-reports/src/main/resources/checkstyle/checkstyle-suppressions.xml @@ -85,4 +85,12 @@ + + + + + + + + diff --git a/eng/jacoco-test-coverage/pom.xml b/eng/jacoco-test-coverage/pom.xml index b6567bd45d9e5..ea21a08695cb7 100644 --- a/eng/jacoco-test-coverage/pom.xml +++ b/eng/jacoco-test-coverage/pom.xml @@ -24,10 +24,14 @@ 1.0.0-preview.3 + 1.0.0-preview.3 + 1.0.0-preview.3 + 1.0.0-preview.3 1.0.0-preview.1 1.0.0-preview.1 4.0.0-preview.1 5.0.0-preview.2 + 12.0.0-preview.1 @@ -84,6 +88,11 @@ azure-messaging-eventhubs ${azure-messaging-eventhubs.version} + + + + + diff --git a/eng/pipelines/mgmt.yml b/eng/pipelines/mgmt.yml new file mode 100644 index 0000000000000..c9c850dcc8202 --- /dev/null +++ b/eng/pipelines/mgmt.yml @@ -0,0 +1,38 @@ +trigger: + - master + +variables: + MavenGoals: 'clean,compile' + +jobs: + - job: 'Build' + + strategy: + matrix: + Java 8: + ArtifactName: 'packages' + JavaVersion: '1.8' + Java 7: + ArtifactName: 'packages' + JavaVersion: '1.7' + + pool: + vmImage: 'ubuntu-16.04' + + steps: + - task: ShellScript@2 + displayName: 'call mvn for each mgmt sdk individually' + inputs: + scriptPath: "$(System.DefaultWorkingDirectory)/eng/pipelines/scripts/mgmt_sdk_compiler.sh" + workingDirectory: "$(System.DefaultWorkingDirectory)" + failOnStandardError: true + args: "$(JavaVersion) $(MavenGoals)" + + - task: PublishTestResults@2 + condition: succeededOrFailed() + inputs: + mergeTestResults: true + testRunTitle: 'On Java $(JavaVersion)' + + + \ No newline at end of file diff --git a/eng/pipelines/scripts/mgmt_sdk_compiler.sh b/eng/pipelines/scripts/mgmt_sdk_compiler.sh new file mode 100644 index 0000000000000..af6ce2f2c2dd5 --- /dev/null +++ b/eng/pipelines/scripts/mgmt_sdk_compiler.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +#args expected +# $1. Java version : 1.7 or 1,8, defaults to "1.8" +# $2. Goals, defaults to "clean compile", value expected is a comma delemited string eg : "clean,compile" + +echo "#### CWD : " +pwd + +echo "#### versions of java available:" +ls /usr/lib/jvm + +echo "#### Original java home $JAVA_HOME" + +JAVA7HOME="/usr/lib/jvm/zulu-7-azure-amd64" +JAVA8HOME="/usr/lib/jvm/zulu-8-azure-amd64" + +JAVAHOME="$JAVA8HOME" + +MAVENGOALS="clean compile" + +if [ -n "$1" ] && [ "$1" == "1.7" ]; +then + JAVAHOME="$JAVA7HOME"; + echo "runing java 7 build"; +fi + + +if [ -n "$2" ]; +then + TEMP_VAL=$(echo "$2" | sed -r 's/,/ /g') + MAVENGOALS="$TEMP_VAL"; + echo "maven goals overriden to $MAVENGOALS" +fi + +export JAVA_HOME="$JAVAHOME" + +echo "#### Using java at : $JAVA_HOME" + +echo "#### Maven properties:" +mvn --version + +#TODO: +#for some reason the workingdirectory dos not seem to work... +#fix the following cd cmd once we figure out how to get it to work +#change to the root of the sources repo +cd ../../.. + +for i in `ls -d */*/v20* | grep -v "node_modules/*/*"`; +do + echo "######## building folder $i" + cd $i; + mvn --batch-mode -Dgpg.skip -Dmaven.wagon.http.pool=false -Dorg.slf4j.simpleLogger.defaultLogLevel=error -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warns $MAVENGOALS; + if [ $? != 0 ]; + then cd -; exit -1; + else cd -; + fi; +done diff --git a/eng/spotbugs-aggregate-report/pom.xml b/eng/spotbugs-aggregate-report/pom.xml index df95e28e5936a..a8f5addab7e12 100644 --- a/eng/spotbugs-aggregate-report/pom.xml +++ b/eng/spotbugs-aggregate-report/pom.xml @@ -17,7 +17,7 @@ 5.0.1 2.3.1 2.5.1 - 1.2.0 + 1.2.0 2.0.0 10.5.0 @@ -26,7 +26,10 @@ 1.0.0-preview.1 1.0.0-preview.1 4.0.0-preview.1 + 1.0.0-preview.1 + 4.0.0-preview.1 5.0.0-preview.2 + 12.0.0-preview.1 @@ -49,15 +52,27 @@ - ..\..\appconfiguration\client\src\main\java - ..\..\appconfiguration\client\src\samples\java + ..\..\sdk\appconfiguration\azure-data-appconfiguration\src\main\java + ..\..\sdk\appconfiguration\azure-data-appconfiguration\src\samples\java ..\..\core\azure-core\src\main\java ..\..\core\azure-core\src\samples\java ..\..\core\azure-core-amqp\src\main\java ..\..\core\azure-core-management\src\main\java ..\..\core\azure-core-test\src\main\java - ..\..\eventhubs\client\azure-eventhubs\src\main\java - ..\..\eventhubs\client\azure-eventhubs\src\samples\java + ..\..\sdk\eventhubs\azure-eventhubs\src\main\java + ..\..\sdk\eventhubs\azure-eventhubs\src\samples\java + ..\..\sdk\identity\azure-identity\src\main\java + + + + + + + + + + + @@ -76,19 +91,19 @@ - ..\..\batch\data-plane\src\main\java + ..\..\sdk\batch\microsoft-azure-batch\src\main\java ..\..\eventhubs\data-plane\azure-eventhubs\src\main\java ..\..\eventhubs\data-plane\azure-eventhubs-eph\src\main\java ..\..\eventhubs\data-plane\azure-eventhubs-extensions\src\main\java - ..\..\keyvault\data-plane\azure-keyvault\src\main\java - ..\..\keyvault\data-plane\azure-keyvault-cryptography\src\main\java - ..\..\keyvault\data-plane\azure-keyvault-core\src\main\java - ..\..\keyvault\data-plane\azure-keyvault-extensions\src\main\java - ..\..\keyvault\data-plane\azure-keyvault-webkey\src\main\java - ..\..\servicebus\data-plane\azure-servicebus\src\main\java + ..\..\sdk\keyvault\microsoft-azure-keyvault\src\main\java + ..\..\sdk\keyvault\microsoft-azure-keyvault-cryptography\src\main\java + ..\..\sdk\keyvault\microsoft-azure-keyvault-core\src\main\java + ..\..\sdk\keyvault\microsoft-azure-keyvault-extensions\src\main\java + ..\..\sdk\keyvault\microsoft-azure-keyvault-webkey\src\main\java + ..\..\sdk\servicebus\microsoft-azure-servicebus\src\main\java @@ -117,6 +132,11 @@ true + + com.azure + azure-data-appconfiguration + ${azure-data-appconfiguration.version} + com.azure azure-core @@ -139,14 +159,30 @@ com.azure - azure-data-appconfiguration - ${azure-data-appconfiguration.version} + azure-messaging-eventhubs + ${azure-messaging-eventhubs.version} com.azure - azure-messaging-eventhubs - ${azure-messaging-eventhubs.version} + azure-identity + ${azure-identity.version} + + com.azure + azure-keyvault-keys + ${azure-keyvault.version} + + + com.azure + azure-keyvault-secrets + ${azure-keyvault.version} + + + + + + + @@ -175,27 +211,27 @@ com.microsoft.azure azure-keyvault - ${azure-keyvault.version} + ${azure-keyvault.track-one.version} com.microsoft.azure azure-keyvault-cryptography - ${azure-keyvault.version} + ${azure-keyvault.track-one.version} com.microsoft.azure azure-keyvault-core - ${azure-keyvault.version} + ${azure-keyvault.track-one.version} com.microsoft.azure azure-keyvault-extensions - ${azure-keyvault.version} + ${azure-keyvault.track-one.version} com.microsoft.azure azure-keyvault-webkey - ${azure-keyvault.version} + ${azure-keyvault.track-one.version} com.microsoft.azure diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/SetPrefetchCountTest.java b/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/SetPrefetchCountTest.java deleted file mode 100644 index a292d9b2be827..0000000000000 --- a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/SetPrefetchCountTest.java +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.messaging.eventhubs; - -import com.azure.core.amqp.Retry; -import com.azure.core.util.logging.ClientLogger; -import com.azure.messaging.eventhubs.implementation.ApiTestBase; -import com.azure.messaging.eventhubs.implementation.ReactorHandlerProvider; -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestName; -import reactor.core.Disposable; -import reactor.core.publisher.Flux; - -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; - -import static java.nio.charset.StandardCharsets.UTF_8; - -/** - * Verifies we can use various prefetch options with {@link EventHubConsumer}. - */ -public class SetPrefetchCountTest extends ApiTestBase { - private static final String PARTITION_ID = "0"; - - private EventHubClient client; - private EventHubProducer producer; - private EventHubConsumer consumer; - - @Rule - public TestName testName = new TestName(); - - public SetPrefetchCountTest() { - super(new ClientLogger(SetPrefetchCountTest.class)); - } - - @Override - protected String testName() { - return testName.getMethodName(); - } - - @Override - protected void beforeTest() { - skipIfNotRecordMode(); - - final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(getReactorProvider()); - client = new EventHubClient(getConnectionOptions(), getReactorProvider(), handlerProvider); - producer = client.createProducer(); - } - - @Override - protected void afterTest() { - dispose(producer, consumer, client); - } - - /** - * Test that we can use a very large prefetch number with {@link EventHubConsumerOptions} - */ - @Test - public void setLargePrefetchCount() throws InterruptedException { - // Arrange - // Since we cannot test receiving very large prefetch like 10000 in a unit test, DefaultPrefetchCount * 3 was - // chosen - final int eventCount = EventHubConsumerOptions.DEFAULT_PREFETCH_COUNT * 3; - final CountDownLatch countDownLatch = new CountDownLatch(eventCount); - final EventHubConsumerOptions options = new EventHubConsumerOptions() - .retry(Retry.getDefaultRetry()) - .prefetchCount(2000); - - consumer = client.createConsumer(EventHubClient.DEFAULT_CONSUMER_GROUP_NAME, PARTITION_ID, - EventPosition.latest(), options); - - final Disposable subscription = consumer.receive() - .take(eventCount + 1).subscribe(event -> countDownLatch.countDown()); - - // Act - try { - final Flux events = Flux.range(0, eventCount).map(number -> new EventData("c".getBytes(UTF_8))); - producer.send(events).block(); - - countDownLatch.await(45, TimeUnit.SECONDS); - - // Assert - Assert.assertEquals(0, countDownLatch.getCount()); - } finally { - subscription.dispose(); - } - } - - /** - * Test for small prefetch count on EventHubConsumer continues to get messages. - */ - @Test - public void setSmallPrefetchCount() throws InterruptedException { - // Arrange - final int eventCount = 30; - final CountDownLatch countDownLatch = new CountDownLatch(eventCount); - final EventHubConsumerOptions options = new EventHubConsumerOptions().prefetchCount(11); - - consumer = client.createConsumer(EventHubClient.DEFAULT_CONSUMER_GROUP_NAME, PARTITION_ID, - EventPosition.latest(), options); - - final Disposable subscription = consumer.receive() - .take(eventCount + 1).subscribe(event -> countDownLatch.countDown()); - - try { - // Act - final Flux events = Flux.range(0, eventCount) - .map(number -> new EventData("testString".getBytes(UTF_8))); - producer.send(events).block(TIMEOUT); - - countDownLatch.await(45, TimeUnit.SECONDS); - - // Assert - Assert.assertEquals(0, countDownLatch.getCount()); - } finally { - subscription.dispose(); - } - } -} diff --git a/pom.client.xml b/pom.client.xml index 0d04e6b9d0273..d4ed7878ba447 100644 --- a/pom.client.xml +++ b/pom.client.xml @@ -385,6 +385,10 @@ Azure Key Vault com.azure.security.keyvault* + + Azure Storage - Common + com.azure.storage.common* + Azure Storage - Blobs com.azure.storage.blob* @@ -537,11 +541,12 @@ -maxLineLength 120 - -snippetpath ${project.basedir}/applicationconfig/client/src/samples/java + -snippetpath ${project.basedir}/sdk/appconfiguration/azure-data-appconfiguration/src/samples/java -snippetpath ${project.basedir}/core/azure-core/src/samples/java - -snippetpath ${project.basedir}/eventhubs/client/azure-eventhubs/src/samples/java - -snippetpath ${project.basedir}/keyvault/client/keys/src/samples/java - -snippetpath ${project.basedir}/keyvault/client/secrets/src/samples/java + -snippetpath ${project.basedir}/sdk/eventhubs/azure-eventhubs/src/samples/java + -snippetpath ${project.basedir}/sdk/keyvault/azure-keyvault-keys/src/samples/java + -snippetpath ${project.basedir}/sdk/keyvault/azure-keyvault-secrets/src/samples/java + @@ -569,10 +574,8 @@ dependencies issue-management licenses - scm plugins plugin-management - team @@ -737,8 +740,8 @@ ./sdk/appconfiguration/azure-data-appconfiguration - ./core - ./eventhubs/client/azure-eventhubs + ./sdk/core + ./sdk/eventhubs/azure-eventhubs ./sdk/keyvault/azure-keyvault-secrets ./sdk/keyvault/azure-keyvault-keys diff --git a/sdk/appconfiguration/azure-data-appconfiguration/README.md b/sdk/appconfiguration/azure-data-appconfiguration/README.md index 9f9d8d835e29c..8740e54e76211 100644 --- a/sdk/appconfiguration/azure-data-appconfiguration/README.md +++ b/sdk/appconfiguration/azure-data-appconfiguration/README.md @@ -59,7 +59,7 @@ Once you have the value of the connection string you can create the configuratio ```Java ConfigurationClient client = new ConfigurationClientBuilder() - .credentials(new ConfigurationClientCredentials(connectionString)) + .credential(new ConfigurationClientCredentials(connectionString)) .buildClient(); ``` @@ -67,7 +67,7 @@ or ```Java ConfigurationAsyncClient client = new ConfigurationClientBuilder() - .credentials(new ConfigurationClientCredentials(connectionString)) + .credential(new ConfigurationClientCredentials(connectionString)) .buildAsyncClient(); ``` @@ -87,7 +87,7 @@ An application that needs to retrieve startup configurations is better suited us ```Java ConfigurationClient client = new ConfigurationClient() - .credentials(new ConfigurationClientCredentials(appConfigConnectionString)) + .credential(new ConfigurationClientCredentials(appConfigConnectionString)) .buildClient(); String url = client.getSetting(urlKey).value(); @@ -104,7 +104,7 @@ An application that has a large set of configurations that it needs to periodica ```Java ConfigurationAsyncClient client = new ConfigurationClientBuilder() - .credentials(new ConfigurationClientCredentials(appConfigConnectionString)) + .credential(new ConfigurationClientCredentials(appConfigConnectionString)) .buildAsyncClient(); client.listSettings(new SettingSelection().label(periodicUpdateLabel)) @@ -126,7 +126,7 @@ Create a Configuration Setting to be stored in the Configuration Store. There ar - setSetting creates a setting if it doesn't exist or overrides an existing setting. ```Java ConfigurationClient client = new ConfigurationClientBuilder() - .credentials(new ConfigurationClientCredentials(connectionString)) + .credential(new ConfigurationClientCredentials(connectionString)) .buildClient(); ConfigurationSetting setting = client.setSetting("some_key", "some_value"); ``` @@ -136,7 +136,7 @@ ConfigurationSetting setting = client.setSetting("some_key", "some_value"); Retrieve a previously stored Configuration Setting by calling getSetting. ```Java ConfigurationClient client = new ConfigurationClientBuilder() - .credentials(new ConfigurationClientCredentials(connectionString)) + .credential(new ConfigurationClientCredentials(connectionString)) .buildClient(); client.setSetting("some_key", "some_value"); ConfigurationSetting setting = client.getSetting("some_key"); @@ -147,7 +147,7 @@ ConfigurationSetting setting = client.getSetting("some_key"); Update an existing Configuration Setting by calling updateSetting. ```Java ConfigurationClient client = new ConfigurationClientBuilder() - .credentials(new ConfigurationClientCredentials(connectionString)) + .credential(new ConfigurationClientCredentials(connectionString)) .buildClient(); client.setSetting("some_key", "some_value"); ConfigurationSetting setting = client.updateSetting("some_key", "new_value"); @@ -158,7 +158,7 @@ ConfigurationSetting setting = client.updateSetting("some_key", "new_value"); Delete an existing Configuration Setting by calling deleteSetting. ```Java ConfigurationClient client = new ConfigurationClientBuilder() - .credentials(new ConfigurationClientCredentials(connectionString)) + .credential(new ConfigurationClientCredentials(connectionString)) .buildClient(); client.setSetting("some_key", "some_value"); ConfigurationSetting setting = client.deleteSetting("some_key"); diff --git a/sdk/appconfiguration/azure-data-appconfiguration/src/main/java/com/azure/data/appconfiguration/ConfigurationAsyncClient.java b/sdk/appconfiguration/azure-data-appconfiguration/src/main/java/com/azure/data/appconfiguration/ConfigurationAsyncClient.java index 267df4df2f213..470dff124d006 100644 --- a/sdk/appconfiguration/azure-data-appconfiguration/src/main/java/com/azure/data/appconfiguration/ConfigurationAsyncClient.java +++ b/sdk/appconfiguration/azure-data-appconfiguration/src/main/java/com/azure/data/appconfiguration/ConfigurationAsyncClient.java @@ -28,6 +28,9 @@ import java.net.URL; import java.util.Objects; +import static com.azure.core.implementation.util.FluxUtil.fluxContext; +import static com.azure.core.implementation.util.FluxUtil.monoContext; + /** * This class provides a client that contains all the operations for {@link ConfigurationSetting ConfigurationSettings} * in Azure App Configuration Store. Operations allowed by the client are adding, retrieving, updating, and deleting @@ -71,12 +74,7 @@ public final class ConfigurationAsyncClient { * *

Add a setting with the key "prodDBConnection" and value "db_connection".

* - *
-     * client.addSetting("prodDBConnection", "db_connection")
-     *     .subscribe(response -> {
-     *         ConfigurationSetting result = response.value();
-     *         System.out.printf("Key: %s, Value: %s", result.key(), result.value());
-     *     });
+ * {@codesnippet com.azure.data.appconfiguration.configurationasyncclient.addsetting#String-String} * * @param key The key of the configuration setting to add. * @param value The value associated with this configuration setting key. @@ -88,7 +86,8 @@ public final class ConfigurationAsyncClient { */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono> addSetting(String key, String value) { - return addSetting(new ConfigurationSetting().key(key).value(value), Context.NONE); + return monoContext( + context -> addSetting(new ConfigurationSetting().key(key).value(value), context)); } /** @@ -116,7 +115,7 @@ public Mono> addSetting(String key, String value) */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono> addSetting(ConfigurationSetting setting) { - return addSetting(setting, Context.NONE); + return monoContext(context -> addSetting(setting, context)); } /** @@ -189,7 +188,8 @@ Mono> addSetting(ConfigurationSetting setting, Co */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono> setSetting(String key, String value) { - return setSetting(new ConfigurationSetting().key(key).value(value), Context.NONE); + return monoContext( + context -> setSetting(new ConfigurationSetting().key(key).value(value), context)); } /** @@ -233,7 +233,7 @@ public Mono> setSetting(String key, String value) */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono> setSetting(ConfigurationSetting setting) { - return setSetting(setting, Context.NONE); + return monoContext(context -> setSetting(setting, context)); } /** @@ -317,7 +317,8 @@ Mono> setSetting(ConfigurationSetting setting, Co */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono> updateSetting(String key, String value) { - return updateSetting(new ConfigurationSetting().key(key).value(value), Context.NONE); + return monoContext( + context -> updateSetting(new ConfigurationSetting().key(key).value(value), context)); } /** @@ -349,7 +350,7 @@ public Mono> updateSetting(String key, String val */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono> updateSetting(ConfigurationSetting setting) { - return updateSetting(setting, Context.NONE); + return monoContext(context -> updateSetting(setting, context)); } /** @@ -415,7 +416,7 @@ Mono> updateSetting(ConfigurationSetting setting, */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono> getSetting(String key) { - return getSetting(new ConfigurationSetting().key(key), Context.NONE); + return monoContext(context -> getSetting(new ConfigurationSetting().key(key), context)); } /** @@ -442,7 +443,7 @@ public Mono> getSetting(String key) { */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono> getSetting(ConfigurationSetting setting) { - return getSetting(setting, Context.NONE); + return monoContext(context -> getSetting(setting, context)); } /** @@ -501,7 +502,7 @@ Mono> getSetting(ConfigurationSetting setting, Co */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono> deleteSetting(String key) { - return deleteSetting(new ConfigurationSetting().key(key), Context.NONE); + return monoContext(context -> deleteSetting(new ConfigurationSetting().key(key), context)); } /** @@ -535,7 +536,7 @@ public Mono> deleteSetting(String key) { */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono> deleteSetting(ConfigurationSetting setting) { - return deleteSetting(setting, Context.NONE); + return monoContext(context -> deleteSetting(setting, context)); } /** @@ -594,8 +595,10 @@ Mono> deleteSetting(ConfigurationSetting setting, * @return A Flux of ConfigurationSettings that matches the {@code options}. If no options were provided, the Flux * contains all of the current settings in the service. */ + @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux listSettings(SettingSelector options) { - return listSettings(options, Context.NONE); + return new PagedFlux<>(() -> monoContext(context -> listFirstPageSettings(options, context)), + continuationToken -> monoContext(context -> listNextPageSettings(context, continuationToken))); } /** @@ -663,16 +666,14 @@ private Mono> listFirstPageSettings(SettingS * *

Retrieve all revisions of the setting that has the key "prodDBConnection".

* - *
-     * client.listSettingRevisions(new SettingSelector().key("prodDBConnection"))
-     *     .subscribe(setting -> System.out.printf("Key: %s, Value: %s", setting.key(), setting.value()));
+ * {@codesnippet com.azure.data.appconfiguration.configurationasyncclient.listsettingrevisions} * * @param selector Optional. Used to filter configuration setting revisions from the service. * @return Revisions of the ConfigurationSetting */ @ServiceMethod(returns = ReturnType.COLLECTION) public Flux listSettingRevisions(SettingSelector selector) { - return listSettingRevisions(selector, Context.NONE); + return fluxContext(context -> listSettingRevisions(selector, context)); } /** @@ -764,7 +765,9 @@ private static void validateSetting(ConfigurationSetting setting) { * Remaps the exception returned from the service if it is a PRECONDITION_FAILED response. This is performed since * add setting returns PRECONDITION_FAILED when the configuration already exists, all other uses of setKey return * this status when the configuration doesn't exist. + * * @param throwable Error response from the service. + * * @return Exception remapped to a ResourceModifiedException if the throwable was a ResourceNotFoundException, * otherwise the throwable is returned unmodified. */ diff --git a/sdk/appconfiguration/azure-data-appconfiguration/src/main/java/com/azure/data/appconfiguration/ConfigurationClientBuilder.java b/sdk/appconfiguration/azure-data-appconfiguration/src/main/java/com/azure/data/appconfiguration/ConfigurationClientBuilder.java index 8d4bd79e5d1ef..26840826d978c 100644 --- a/sdk/appconfiguration/azure-data-appconfiguration/src/main/java/com/azure/data/appconfiguration/ConfigurationClientBuilder.java +++ b/sdk/appconfiguration/azure-data-appconfiguration/src/main/java/com/azure/data/appconfiguration/ConfigurationClientBuilder.java @@ -4,6 +4,7 @@ package com.azure.data.appconfiguration; import com.azure.core.implementation.annotation.ServiceClientBuilder; +import com.azure.core.util.logging.ClientLogger; import com.azure.data.appconfiguration.credentials.ConfigurationClientCredentials; import com.azure.data.appconfiguration.models.ConfigurationSetting; import com.azure.data.appconfiguration.policy.ConfigurationCredentialsPolicy; @@ -36,9 +37,9 @@ * by calling {@link ConfigurationClientBuilder#buildAsyncClient() buildAsyncClient} and {@link ConfigurationClientBuilder#buildClient() buildClient} respectively * to construct an instance of the desired client. * - *

The client needs the service endpoint of the Azure App Configuration store and access credentials. - * {@link ConfigurationClientCredentials} gives the builder the service endpoint and access credentials it requires to - * construct a client, set the ConfigurationClientCredentials with {@link ConfigurationClientBuilder#credentials(ConfigurationClientCredentials) this}.

+ *

The client needs the service endpoint of the Azure App Configuration store and access credential. + * {@link ConfigurationClientCredentials} gives the builder the service endpoint and access credential it requires to + * construct a client, set the ConfigurationClientCredentials with {@link ConfigurationClientBuilder#credential(ConfigurationClientCredentials) this}.

* *

Instantiating an asynchronous Configuration Client

* @@ -51,7 +52,7 @@ *

Another way to construct the client is using a {@link HttpPipeline}. The pipeline gives the client an authenticated * way to communicate with the service but it doesn't contain the service endpoint. Set the pipeline with * {@link ConfigurationClientBuilder#pipeline(HttpPipeline) this}, additionally set the service endpoint with - * {@link ConfigurationClientBuilder#serviceEndpoint(String) this}. Using a pipeline requires additional setup but + * {@link ConfigurationClientBuilder#endpoint(String) this}. Using a pipeline requires additional setup but * allows for finer control on how the {@link ConfigurationAsyncClient} and {@link ConfigurationClient} it built.

* * {@codesnippet com.azure.data.applicationconfig.configurationclient.pipeline.instantiation} @@ -70,11 +71,12 @@ public final class ConfigurationClientBuilder { private static final String ACCEPT_HEADER = "Accept"; private static final String ACCEPT_HEADER_VALUE = "application/vnd.microsoft.azconfig.kv+json"; + private final ClientLogger logger = new ClientLogger(ConfigurationClientBuilder.class); private final List policies; private final HttpHeaders headers; - private ConfigurationClientCredentials credentials; - private URL serviceEndpoint; + private ConfigurationClientCredentials credential; + private URL endpoint; private HttpClient httpClient; private HttpLogDetailLevel httpLogDetailLevel; private HttpPipeline pipeline; @@ -100,14 +102,14 @@ public ConfigurationClientBuilder() { * *

* If {@link ConfigurationClientBuilder#pipeline(HttpPipeline) pipeline} is set, then the {@code pipeline} and - * {@link ConfigurationClientBuilder#serviceEndpoint(String) serviceEndpoint} are used to create the + * {@link ConfigurationClientBuilder#endpoint(String) endpoint} are used to create the * {@link ConfigurationClient client}. All other builder settings are ignored.

* * @return A ConfigurationClient with the options set from the builder. - * @throws NullPointerException If {@code serviceEndpoint} has not been set. This setting is automatically set when - * {@link ConfigurationClientBuilder#credentials(ConfigurationClientCredentials) credentials} are set through - * the builder. Or can be set explicitly by calling {@link ConfigurationClientBuilder#serviceEndpoint(String)}. - * @throws IllegalStateException If {@link ConfigurationClientBuilder#credentials(ConfigurationClientCredentials)} + * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when + * {@link ConfigurationClientBuilder#credential(ConfigurationClientCredentials) credential} are set through + * the builder. Or can be set explicitly by calling {@link ConfigurationClientBuilder#endpoint(String)}. + * @throws IllegalStateException If {@link ConfigurationClientBuilder#credential(ConfigurationClientCredentials)} * has not been set. */ public ConfigurationClient buildClient() { @@ -120,31 +122,31 @@ public ConfigurationClient buildClient() { * *

* If {@link ConfigurationClientBuilder#pipeline(HttpPipeline) pipeline} is set, then the {@code pipeline} and - * {@link ConfigurationClientBuilder#serviceEndpoint(String) serviceEndpoint} are used to create the + * {@link ConfigurationClientBuilder#endpoint(String) endpoint} are used to create the * {@link ConfigurationAsyncClient client}. All other builder settings are ignored. *

* * @return A ConfigurationAsyncClient with the options set from the builder. - * @throws NullPointerException If {@code serviceEndpoint} has not been set. This setting is automatically set when - * {@link ConfigurationClientBuilder#credentials(ConfigurationClientCredentials) credentials} are set through - * the builder. Or can be set explicitly by calling {@link ConfigurationClientBuilder#serviceEndpoint(String)}. - * @throws IllegalStateException If {@link ConfigurationClientBuilder#credentials(ConfigurationClientCredentials)} + * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when + * {@link ConfigurationClientBuilder#credential(ConfigurationClientCredentials) credential} are set through + * the builder. Or can be set explicitly by calling {@link ConfigurationClientBuilder#endpoint(String)}. + * @throws IllegalStateException If {@link ConfigurationClientBuilder#credential(ConfigurationClientCredentials)} * has not been set. */ public ConfigurationAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? ConfigurationManager.getConfiguration().clone() : configuration; ConfigurationClientCredentials configurationCredentials = getConfigurationCredentials(buildConfiguration); - URL buildServiceEndpoint = getBuildServiceEndpoint(configurationCredentials); + URL buildEndpoint = getBuildEndpoint(configurationCredentials); - Objects.requireNonNull(buildServiceEndpoint); + Objects.requireNonNull(buildEndpoint); if (pipeline != null) { - return new ConfigurationAsyncClient(buildServiceEndpoint, pipeline); + return new ConfigurationAsyncClient(buildEndpoint, pipeline); } - ConfigurationClientCredentials buildCredentials = (credentials == null) ? configurationCredentials : credentials; - if (buildCredentials == null) { - throw new IllegalStateException("'credentials' is required."); + ConfigurationClientCredentials buildCredential = (credential == null) ? configurationCredentials : credential; + if (buildCredential == null) { + logger.logAndThrow(new IllegalStateException("'credential' is required.")); } // Closest to API goes first, closest to wire goes last. @@ -154,7 +156,7 @@ public ConfigurationAsyncClient buildAsyncClient() { policies.add(new RequestIdPolicy()); policies.add(new AddHeadersPolicy(headers)); policies.add(new AddDatePolicy()); - policies.add(new ConfigurationCredentialsPolicy(buildCredentials)); + policies.add(new ConfigurationCredentialsPolicy(buildCredential)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy); @@ -168,34 +170,38 @@ public ConfigurationAsyncClient buildAsyncClient() { .httpClient(httpClient) .build(); - return new ConfigurationAsyncClient(buildServiceEndpoint, pipeline); + return new ConfigurationAsyncClient(buildEndpoint, pipeline); } /** * Sets the service endpoint for the Azure App Configuration instance. * - * @param serviceEndpoint The URL of the Azure App Configuration instance to send {@link ConfigurationSetting} + * @param endpoint The URL of the Azure App Configuration instance to send {@link ConfigurationSetting} * service requests to and receive responses from. * @return The updated ConfigurationClientBuilder object. - * @throws MalformedURLException if {@code serviceEndpoint} is null or it cannot be parsed into a valid URL. + * @throws IllegalArgumentException if {@code endpoint} is null or it cannot be parsed into a valid URL. */ - public ConfigurationClientBuilder serviceEndpoint(String serviceEndpoint) throws MalformedURLException { - this.serviceEndpoint = new URL(serviceEndpoint); + public ConfigurationClientBuilder endpoint(String endpoint) { + try { + this.endpoint = new URL(endpoint); + } catch (MalformedURLException ex) { + logger.logAndThrow(new IllegalArgumentException("'endpoint' must be a valid URL")); + } + return this; } /** - * Sets the credentials to use when authenticating HTTP requests. Also, sets the - * {@link ConfigurationClientBuilder#serviceEndpoint(String) serviceEndpoint} for this ConfigurationClientBuilder. + * Sets the credential to use when authenticating HTTP requests. Also, sets the + * {@link ConfigurationClientBuilder#endpoint(String) endpoint} for this ConfigurationClientBuilder. * - * @param credentials The credentials to use for authenticating HTTP requests. + * @param credential The credential to use for authenticating HTTP requests. * @return The updated ConfigurationClientBuilder object. - * @throws NullPointerException If {@code credentials} is {@code null}. + * @throws NullPointerException If {@code credential} is {@code null}. */ - public ConfigurationClientBuilder credentials(ConfigurationClientCredentials credentials) { - Objects.requireNonNull(credentials); - this.credentials = credentials; - this.serviceEndpoint = credentials.baseUri(); + public ConfigurationClientBuilder credential(ConfigurationClientCredentials credential) { + this.credential = Objects.requireNonNull(credential); + this.endpoint = credential.baseUri(); return this; } @@ -240,7 +246,7 @@ public ConfigurationClientBuilder httpClient(HttpClient client) { * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from - * {@link ConfigurationClientBuilder#serviceEndpoint(String) serviceEndpoint} to build {@link ConfigurationAsyncClient} or {@link ConfigurationClient}. + * {@link ConfigurationClientBuilder#endpoint(String) endpoint} to build {@link ConfigurationAsyncClient} or {@link ConfigurationClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated ConfigurationClientBuilder object. @@ -268,7 +274,7 @@ public ConfigurationClientBuilder configuration(Configuration configuration) { private ConfigurationClientCredentials getConfigurationCredentials(Configuration configuration) { String connectionString = configuration.get("AZURE_APPCONFIG_CONNECTION_STRING"); if (ImplUtils.isNullOrEmpty(connectionString)) { - return credentials; + return credential; } try { @@ -278,9 +284,9 @@ private ConfigurationClientCredentials getConfigurationCredentials(Configuration } } - private URL getBuildServiceEndpoint(ConfigurationClientCredentials buildCredentials) { - if (serviceEndpoint != null) { - return serviceEndpoint; + private URL getBuildEndpoint(ConfigurationClientCredentials buildCredentials) { + if (endpoint != null) { + return endpoint; } else if (buildCredentials != null) { return buildCredentials.baseUri(); } else { diff --git a/sdk/appconfiguration/azure-data-appconfiguration/src/main/java/com/azure/data/appconfiguration/models/Range.java b/sdk/appconfiguration/azure-data-appconfiguration/src/main/java/com/azure/data/appconfiguration/models/Range.java index 93a166235d416..07a7214acb944 100644 --- a/sdk/appconfiguration/azure-data-appconfiguration/src/main/java/com/azure/data/appconfiguration/models/Range.java +++ b/sdk/appconfiguration/azure-data-appconfiguration/src/main/java/com/azure/data/appconfiguration/models/Range.java @@ -2,7 +2,7 @@ // Licensed under the MIT License. package com.azure.data.appconfiguration.models; -import com.azure.core.implementation.annotation.Fluent; +import com.azure.core.implementation.annotation.Immutable; import com.azure.data.appconfiguration.ConfigurationAsyncClient; import com.azure.data.appconfiguration.ConfigurationClient; @@ -14,7 +14,7 @@ * @see ConfigurationClient#listSettingRevisions(SettingSelector) * @see SettingSelector#range(Range) */ -@Fluent +@Immutable public class Range { private final int start; private final int end; diff --git a/sdk/appconfiguration/azure-data-appconfiguration/src/samples/java/ConfigurationSets.java b/sdk/appconfiguration/azure-data-appconfiguration/src/samples/java/ConfigurationSets.java index a818c8da36ebc..d4e5cc18ce6f6 100644 --- a/sdk/appconfiguration/azure-data-appconfiguration/src/samples/java/ConfigurationSets.java +++ b/sdk/appconfiguration/azure-data-appconfiguration/src/samples/java/ConfigurationSets.java @@ -52,7 +52,7 @@ public static void main(String[] args) throws NoSuchAlgorithmException, InvalidK // Instantiate a configuration client that will be used to call the configuration service. ConfigurationAsyncClient client = new ConfigurationClientBuilder() - .credentials(new ConfigurationClientCredentials(connectionString)) + .credential(new ConfigurationClientCredentials(connectionString)) .buildAsyncClient(); // Demonstrates two different complex objects being stored in Azure App Configuration; one used for beta and the diff --git a/sdk/appconfiguration/azure-data-appconfiguration/src/samples/java/HelloWorld.java b/sdk/appconfiguration/azure-data-appconfiguration/src/samples/java/HelloWorld.java index 15bc35ba8531b..8aaa0039a4b1c 100644 --- a/sdk/appconfiguration/azure-data-appconfiguration/src/samples/java/HelloWorld.java +++ b/sdk/appconfiguration/azure-data-appconfiguration/src/samples/java/HelloWorld.java @@ -28,7 +28,7 @@ public static void main(String[] args) throws NoSuchAlgorithmException, InvalidK // Instantiate a client that will be used to call the service. ConfigurationAsyncClient client = new ConfigurationClientBuilder() - .credentials(new ConfigurationClientCredentials(connectionString)) + .credential(new ConfigurationClientCredentials(connectionString)) .buildAsyncClient(); // Name of the key to add to the configuration service. diff --git a/sdk/appconfiguration/azure-data-appconfiguration/src/samples/java/PipelineSample.java b/sdk/appconfiguration/azure-data-appconfiguration/src/samples/java/PipelineSample.java index dc876f7f4c42b..64ca0d7b61ef7 100644 --- a/sdk/appconfiguration/azure-data-appconfiguration/src/samples/java/PipelineSample.java +++ b/sdk/appconfiguration/azure-data-appconfiguration/src/samples/java/PipelineSample.java @@ -44,7 +44,7 @@ public static void main(String[] args) throws NoSuchAlgorithmException, Invalid // We add in a policy to track the type of HTTP method calls we make. // We also want to see the Header information of our HTTP requests, so we specify the detail level. final ConfigurationAsyncClient client = new ConfigurationClientBuilder() - .credentials(new ConfigurationClientCredentials(connectionString)) + .credential(new ConfigurationClientCredentials(connectionString)) .addPolicy(new HttpMethodRequestTrackingPolicy(tracker)) .httpLogDetailLevel(HttpLogDetailLevel.HEADERS) .buildAsyncClient(); diff --git a/sdk/appconfiguration/azure-data-appconfiguration/src/samples/java/com/azure/data/appconfiguration/ConfigurationAsyncClientJavaDocCodeSnippets.java b/sdk/appconfiguration/azure-data-appconfiguration/src/samples/java/com/azure/data/appconfiguration/ConfigurationAsyncClientJavaDocCodeSnippets.java new file mode 100644 index 0000000000000..ae1a97cfcb827 --- /dev/null +++ b/sdk/appconfiguration/azure-data-appconfiguration/src/samples/java/com/azure/data/appconfiguration/ConfigurationAsyncClientJavaDocCodeSnippets.java @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.data.appconfiguration; + +import com.azure.data.appconfiguration.models.ConfigurationSetting; +import com.azure.data.appconfiguration.models.SettingSelector; +import reactor.util.context.Context; + +/** + * Code snippets for {@link ConfigurationAsyncClient} + */ +public class ConfigurationAsyncClientJavaDocCodeSnippets { + + private String key1 = "key1"; + private String key2 = "key2"; + private String value1 = "val1"; + private String value2 = "val2"; + /** + * Code snippets for {@link ConfigurationAsyncClient#addSetting(String, String)} + */ + public void addSettingsCodeSnippet() { + ConfigurationAsyncClient client = getAsyncClient(); + // BEGIN: com.azure.data.appconfiguration.configurationasyncclient.addsetting#String-String + client.addSetting("prodDBConnection", "db_connection") + .subscriberContext(Context.of(key1, value1, key2, value2)) + .subscribe(response -> { + ConfigurationSetting result = response.value(); + System.out.printf("Key: %s, Value: %s", result.key(), result.value()); + }); + // END: com.azure.data.appconfiguration.configurationasyncclient.addsetting#String-String + } + + /** + * Code snippets for {@link ConfigurationAsyncClient#listSettingRevisions(SettingSelector)} + */ + public void listSettingRevisionsCodeSnippet() { + ConfigurationAsyncClient client = getAsyncClient(); + // BEGIN: com.azure.data.appconfiguration.configurationasyncclient.listsettingrevisions + client.listSettingRevisions(new SettingSelector().keys("prodDBConnection")) + .subscriberContext(Context.of(key1, value1, key2, value2)) + .subscribe(setting -> + System.out.printf("Key: %s, Value: %s", setting.key(), setting.value())); + // END: com.azure.data.appconfiguration.configurationasyncclient.listsettingrevisions + } + + /** + * Implementation not provided + * @return {@code null} + */ + private ConfigurationAsyncClient getAsyncClient() { + return new ConfigurationClientBuilder().buildAsyncClient(); + } +} diff --git a/sdk/appconfiguration/azure-data-appconfiguration/src/samples/java/com/azure/data/appconfiguration/ConfigurationClientJavaDocCodeSnippets.java b/sdk/appconfiguration/azure-data-appconfiguration/src/samples/java/com/azure/data/appconfiguration/ConfigurationClientJavaDocCodeSnippets.java index 72bd7346b3e1c..209554c5d4be7 100644 --- a/sdk/appconfiguration/azure-data-appconfiguration/src/samples/java/com/azure/data/appconfiguration/ConfigurationClientJavaDocCodeSnippets.java +++ b/sdk/appconfiguration/azure-data-appconfiguration/src/samples/java/com/azure/data/appconfiguration/ConfigurationClientJavaDocCodeSnippets.java @@ -32,8 +32,8 @@ public ConfigurationClient createAsyncConfigurationClientWithPipeline() throws M ConfigurationClient configurationClient = new ConfigurationClientBuilder() .pipeline(pipeline) - .serviceEndpoint("https://myconfig.azure.net/") - .credentials(new ConfigurationClientCredentials(connectionString)) + .endpoint("https://myconfig.azure.net/") + .credential(new ConfigurationClientCredentials(connectionString)) .buildClient(); // END: com.azure.data.applicationconfig.configurationclient.pipeline.instantiation return configurationClient; @@ -52,7 +52,7 @@ public ConfigurationAsyncClient createAsyncConfigurationClient() { String connectionString = getConnectionString(); // BEGIN: com.azure.data.applicationconfig.async.configurationclient.instantiation ConfigurationAsyncClient configurationAsyncClient = new ConfigurationClientBuilder() - .credentials(new ConfigurationClientCredentials(connectionString)) + .credential(new ConfigurationClientCredentials(connectionString)) .buildAsyncClient(); // END: com.azure.data.applicationconfig.async.configurationclient.instantiation return configurationAsyncClient; @@ -71,7 +71,7 @@ public ConfigurationClient createSyncConfigurationClient() { String connectionString = getConnectionString(); // BEGIN: com.azure.data.applicationconfig.configurationclient.instantiation ConfigurationClient configurationClient = new ConfigurationClientBuilder() - .credentials(new ConfigurationClientCredentials(connectionString)) + .credential(new ConfigurationClientCredentials(connectionString)) .buildClient(); // END: com.azure.data.applicationconfig.configurationclient.instantiation return configurationClient; diff --git a/sdk/appconfiguration/azure-data-appconfiguration/src/test/java/com/azure/data/appconfiguration/ConfigurationAsyncClientTest.java b/sdk/appconfiguration/azure-data-appconfiguration/src/test/java/com/azure/data/appconfiguration/ConfigurationAsyncClientTest.java index 32f2e059d4c9c..8026fb83eecc0 100644 --- a/sdk/appconfiguration/azure-data-appconfiguration/src/test/java/com/azure/data/appconfiguration/ConfigurationAsyncClientTest.java +++ b/sdk/appconfiguration/azure-data-appconfiguration/src/test/java/com/azure/data/appconfiguration/ConfigurationAsyncClientTest.java @@ -36,13 +36,13 @@ protected void beforeTest() { if (interceptorManager.isPlaybackMode()) { client = clientSetup(credentials -> new ConfigurationClientBuilder() - .credentials(credentials) + .credential(credentials) .httpClient(interceptorManager.getPlaybackClient()) .httpLogDetailLevel(HttpLogDetailLevel.BODY_AND_HEADERS) .buildAsyncClient()); } else { client = clientSetup(credentials -> new ConfigurationClientBuilder() - .credentials(credentials) + .credential(credentials) .httpClient(HttpClient.createDefault().wiretap(true)) .httpLogDetailLevel(HttpLogDetailLevel.BODY_AND_HEADERS) .addPolicy(interceptorManager.getRecordPolicy()) @@ -101,8 +101,8 @@ public void addSettingEmptyValue() { * Verifies that an exception is thrown when null key is passed. */ public void addSettingNullKey() { - assertRunnableThrowsException(() -> client.addSetting(null, "A Value"), IllegalArgumentException.class); - assertRunnableThrowsException(() -> client.addSetting(null), NullPointerException.class); + assertRunnableThrowsException(() -> client.addSetting(null, "A Value").block(), IllegalArgumentException.class); + assertRunnableThrowsException(() -> client.addSetting(null).block(), NullPointerException.class); } /** @@ -179,8 +179,8 @@ public void setSettingEmptyValue() { * Verifies that an exception is thrown when null key is passed. */ public void setSettingNullKey() { - assertRunnableThrowsException(() -> client.setSetting(null, "A Value"), IllegalArgumentException.class); - assertRunnableThrowsException(() -> client.setSetting(null), NullPointerException.class); + assertRunnableThrowsException(() -> client.setSetting(null, "A Value").block(), IllegalArgumentException.class); + assertRunnableThrowsException(() -> client.setSetting(null).block(), NullPointerException.class); } /** @@ -224,8 +224,8 @@ public void updateSettingOverload() { * Verifies that an exception is thrown when null key is passed. */ public void updateSettingNullKey() { - assertRunnableThrowsException(() -> client.updateSetting(null, "A Value"), IllegalArgumentException.class); - assertRunnableThrowsException(() -> client.updateSetting(null), NullPointerException.class); + assertRunnableThrowsException(() -> client.updateSetting(null, "A Value").block(), IllegalArgumentException.class); + assertRunnableThrowsException(() -> client.updateSetting(null).block(), NullPointerException.class); } /** @@ -365,8 +365,8 @@ public void deleteSettingWithETag() { * Test the API will not make a delete call without having a key passed, an IllegalArgumentException should be thrown. */ public void deleteSettingNullKey() { - assertRunnableThrowsException(() -> client.deleteSetting((String) null), IllegalArgumentException.class); - assertRunnableThrowsException(() -> client.deleteSetting((ConfigurationSetting) null), NullPointerException.class); + assertRunnableThrowsException(() -> client.deleteSetting((String) null).block(), IllegalArgumentException.class); + assertRunnableThrowsException(() -> client.deleteSetting((ConfigurationSetting) null).block(), NullPointerException.class); } /** diff --git a/sdk/appconfiguration/azure-data-appconfiguration/src/test/java/com/azure/data/appconfiguration/ConfigurationClientTest.java b/sdk/appconfiguration/azure-data-appconfiguration/src/test/java/com/azure/data/appconfiguration/ConfigurationClientTest.java index 49c08b0ba9869..32da0d6c5babc 100644 --- a/sdk/appconfiguration/azure-data-appconfiguration/src/test/java/com/azure/data/appconfiguration/ConfigurationClientTest.java +++ b/sdk/appconfiguration/azure-data-appconfiguration/src/test/java/com/azure/data/appconfiguration/ConfigurationClientTest.java @@ -31,13 +31,13 @@ protected void beforeTest() { if (interceptorManager.isPlaybackMode()) { client = clientSetup(credentials -> new ConfigurationClientBuilder() - .credentials(credentials) + .credential(credentials) .httpClient(interceptorManager.getPlaybackClient()) .httpLogDetailLevel(HttpLogDetailLevel.BODY_AND_HEADERS) .buildClient()); } else { client = clientSetup(credentials -> new ConfigurationClientBuilder() - .credentials(credentials) + .credential(credentials) .httpClient(HttpClient.createDefault().wiretap(true)) .httpLogDetailLevel(HttpLogDetailLevel.BODY_AND_HEADERS) .addPolicy(interceptorManager.getRecordPolicy()) diff --git a/sdk/appconfiguration/pom.service.xml b/sdk/appconfiguration/pom.service.xml index e33082c71b929..5e032d27570c0 100644 --- a/sdk/appconfiguration/pom.service.xml +++ b/sdk/appconfiguration/pom.service.xml @@ -9,7 +9,7 @@ pom 1.0.0 + ../core azure-data-appconfiguration - ../../core diff --git a/cognitiveservices/data-plane/search/bingautosuggest/pom.xml b/sdk/cognitiveservices/ms-azure-cs-autosuggest/pom.xml similarity index 98% rename from cognitiveservices/data-plane/search/bingautosuggest/pom.xml rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/pom.xml index 115e1708562a9..5b91d450eed0d 100644 --- a/cognitiveservices/data-plane/search/bingautosuggest/pom.xml +++ b/sdk/cognitiveservices/ms-azure-cs-autosuggest/pom.xml @@ -9,7 +9,7 @@ com.microsoft.azure.cognitiveservices azure-cognitiveservices-parent 1.0.2 - ../../pom.xml + ../pom.xml azure-cognitiveservices-autosuggest 1.0.2-beta diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/BingAutoSuggestSearch.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/BingAutoSuggestSearch.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/BingAutoSuggestSearch.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/BingAutoSuggestSearch.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/BingAutoSuggestSearchAPI.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/BingAutoSuggestSearchAPI.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/BingAutoSuggestSearchAPI.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/BingAutoSuggestSearchAPI.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/BingAutoSuggestSearchManager.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/BingAutoSuggestSearchManager.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/BingAutoSuggestSearchManager.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/BingAutoSuggestSearchManager.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/implementation/BingAutoSuggestSearchAPIImpl.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/implementation/BingAutoSuggestSearchAPIImpl.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/implementation/BingAutoSuggestSearchAPIImpl.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/implementation/BingAutoSuggestSearchAPIImpl.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/implementation/BingAutoSuggestSearchImpl.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/implementation/BingAutoSuggestSearchImpl.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/implementation/BingAutoSuggestSearchImpl.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/implementation/BingAutoSuggestSearchImpl.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/implementation/package-info.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/implementation/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/implementation/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/implementation/package-info.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Action.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Action.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Action.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Action.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Answer.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Answer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Answer.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Answer.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/AutoSuggestOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/AutoSuggestOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/AutoSuggestOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/AutoSuggestOptionalParameter.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/CreativeWork.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/CreativeWork.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/CreativeWork.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/CreativeWork.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Error.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Error.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Error.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Error.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ErrorCode.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ErrorCode.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ErrorCode.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ErrorCode.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ErrorResponse.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ErrorResponse.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ErrorResponse.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ErrorResponse.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ErrorResponseException.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ErrorResponseException.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ErrorResponseException.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ErrorResponseException.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Identifiable.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Identifiable.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Identifiable.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Identifiable.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/QueryContext.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/QueryContext.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/QueryContext.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/QueryContext.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Response.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Response.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Response.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Response.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ResponseBase.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ResponseBase.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ResponseBase.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ResponseBase.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ResponseFormat.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ResponseFormat.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ResponseFormat.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ResponseFormat.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/SafeSearch.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/SafeSearch.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/SafeSearch.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/SafeSearch.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ScenarioType.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ScenarioType.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ScenarioType.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/ScenarioType.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/SearchAction.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/SearchAction.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/SearchAction.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/SearchAction.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/SearchKind.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/SearchKind.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/SearchKind.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/SearchKind.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/SearchResultsAnswer.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/SearchResultsAnswer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/SearchResultsAnswer.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/SearchResultsAnswer.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Suggestions.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Suggestions.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Suggestions.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Suggestions.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/SuggestionsSuggestionGroup.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/SuggestionsSuggestionGroup.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/SuggestionsSuggestionGroup.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/SuggestionsSuggestionGroup.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Thing.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Thing.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Thing.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/Thing.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/package-info.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/models/package-info.java diff --git a/cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/package-info.java b/sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingautosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-autosuggest/src/main/java/com/microsoft/azure/cognitiveservices/search/autosuggest/package-info.java diff --git a/cognitiveservices/data-plane/vision/computervision/pom.xml b/sdk/cognitiveservices/ms-azure-cs-computervision/pom.xml old mode 100755 new mode 100644 similarity index 98% rename from cognitiveservices/data-plane/vision/computervision/pom.xml rename to sdk/cognitiveservices/ms-azure-cs-computervision/pom.xml index f585c51f98577..67d7daafb80bc --- a/cognitiveservices/data-plane/vision/computervision/pom.xml +++ b/sdk/cognitiveservices/ms-azure-cs-computervision/pom.xml @@ -11,7 +11,7 @@ com.microsoft.azure.cognitiveservices azure-cognitiveservices-parent 1.0.2 - ../../pom.xml + ../pom.xml azure-cognitiveservices-computervision 1.0.2-beta diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVision.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVision.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVision.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVision.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionClient.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionClient.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionClient.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionClient.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionManager.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionManager.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionManager.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionManager.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ComputerVisionClientImpl.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ComputerVisionClientImpl.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ComputerVisionClientImpl.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ComputerVisionClientImpl.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ComputerVisionImpl.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ComputerVisionImpl.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ComputerVisionImpl.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ComputerVisionImpl.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/package-info.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/package-info.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/package-info.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AdultInfo.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AdultInfo.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AdultInfo.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AdultInfo.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AnalyzeImageByDomainInStreamOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AnalyzeImageByDomainInStreamOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AnalyzeImageByDomainInStreamOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AnalyzeImageByDomainInStreamOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AnalyzeImageByDomainOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AnalyzeImageByDomainOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AnalyzeImageByDomainOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AnalyzeImageByDomainOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AnalyzeImageInStreamOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AnalyzeImageInStreamOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AnalyzeImageInStreamOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AnalyzeImageInStreamOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AnalyzeImageOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AnalyzeImageOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AnalyzeImageOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AnalyzeImageOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Category.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Category.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Category.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Category.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CategoryDetail.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CategoryDetail.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CategoryDetail.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CategoryDetail.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CelebritiesModel.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CelebritiesModel.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CelebritiesModel.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CelebritiesModel.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CelebrityResults.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CelebrityResults.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CelebrityResults.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CelebrityResults.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ColorInfo.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ColorInfo.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ColorInfo.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ColorInfo.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ComputerVisionError.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ComputerVisionError.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ComputerVisionError.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ComputerVisionError.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ComputerVisionErrorCodes.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ComputerVisionErrorCodes.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ComputerVisionErrorCodes.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ComputerVisionErrorCodes.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ComputerVisionErrorException.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ComputerVisionErrorException.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ComputerVisionErrorException.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ComputerVisionErrorException.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/DescribeImageInStreamOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/DescribeImageInStreamOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/DescribeImageInStreamOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/DescribeImageInStreamOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/DescribeImageOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/DescribeImageOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/DescribeImageOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/DescribeImageOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Details.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Details.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Details.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Details.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/DomainModelResults.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/DomainModelResults.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/DomainModelResults.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/DomainModelResults.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/FaceDescription.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/FaceDescription.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/FaceDescription.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/FaceDescription.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/FaceRectangle.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/FaceRectangle.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/FaceRectangle.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/FaceRectangle.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Gender.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Gender.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Gender.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Gender.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/GenerateThumbnailInStreamOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/GenerateThumbnailInStreamOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/GenerateThumbnailInStreamOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/GenerateThumbnailInStreamOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/GenerateThumbnailOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/GenerateThumbnailOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/GenerateThumbnailOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/GenerateThumbnailOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageAnalysis.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageAnalysis.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageAnalysis.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageAnalysis.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageCaption.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageCaption.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageCaption.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageCaption.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageDescription.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageDescription.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageDescription.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageDescription.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageDescriptionDetails.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageDescriptionDetails.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageDescriptionDetails.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageDescriptionDetails.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageMetadata.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageMetadata.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageMetadata.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageMetadata.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageTag.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageTag.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageTag.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageTag.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageType.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageType.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageType.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageType.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageUrl.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageUrl.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageUrl.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageUrl.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/LandmarkResults.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/LandmarkResults.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/LandmarkResults.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/LandmarkResults.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/LandmarksModel.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/LandmarksModel.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/LandmarksModel.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/LandmarksModel.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Line.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Line.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Line.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Line.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ListModelsResult.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ListModelsResult.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ListModelsResult.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ListModelsResult.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ModelDescription.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ModelDescription.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ModelDescription.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ModelDescription.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrLanguages.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrLanguages.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrLanguages.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrLanguages.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrLine.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrLine.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrLine.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrLine.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrRegion.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrRegion.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrRegion.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrRegion.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrResult.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrResult.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrResult.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrResult.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrWord.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrWord.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrWord.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrWord.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognitionResult.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognitionResult.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognitionResult.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognitionResult.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizePrintedTextInStreamOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizePrintedTextInStreamOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizePrintedTextInStreamOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizePrintedTextInStreamOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizePrintedTextOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizePrintedTextOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizePrintedTextOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizePrintedTextOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizeTextHeaders.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizeTextHeaders.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizeTextHeaders.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizeTextHeaders.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizeTextInStreamHeaders.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizeTextInStreamHeaders.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizeTextInStreamHeaders.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizeTextInStreamHeaders.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TagImageInStreamOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TagImageInStreamOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TagImageInStreamOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TagImageInStreamOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TagImageOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TagImageOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TagImageOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TagImageOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TagResult.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TagResult.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TagResult.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TagResult.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TextOperationResult.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TextOperationResult.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TextOperationResult.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TextOperationResult.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TextOperationStatusCodes.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TextOperationStatusCodes.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TextOperationStatusCodes.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TextOperationStatusCodes.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TextRecognitionMode.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TextRecognitionMode.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TextRecognitionMode.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TextRecognitionMode.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/VisualFeatureTypes.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/VisualFeatureTypes.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/VisualFeatureTypes.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/VisualFeatureTypes.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Word.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Word.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Word.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Word.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/package-info.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/package-info.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/package-info.java diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/package-info.java b/sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/package-info.java similarity index 100% rename from cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/package-info.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/pom.xml b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/pom.xml old mode 100755 new mode 100644 similarity index 98% rename from cognitiveservices/data-plane/vision/contentmoderator/pom.xml rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/pom.xml index 4d970865955fc..0678ef83bea88 --- a/cognitiveservices/data-plane/vision/contentmoderator/pom.xml +++ b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/pom.xml @@ -11,7 +11,7 @@ com.microsoft.azure.cognitiveservices azure-cognitiveservices-parent 1.0.2 - ../../pom.xml + ../pom.xml azure-cognitiveservices-contentmoderator 1.0.2-beta diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ContentModeratorClient.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ContentModeratorClient.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ContentModeratorClient.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ContentModeratorClient.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ContentModeratorManager.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ContentModeratorManager.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ContentModeratorManager.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ContentModeratorManager.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ImageModerations.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ImageModerations.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ImageModerations.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ImageModerations.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ListManagementImageLists.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ListManagementImageLists.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ListManagementImageLists.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ListManagementImageLists.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ListManagementImages.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ListManagementImages.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ListManagementImages.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ListManagementImages.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ListManagementTermLists.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ListManagementTermLists.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ListManagementTermLists.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ListManagementTermLists.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ListManagementTerms.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ListManagementTerms.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ListManagementTerms.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/ListManagementTerms.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/Reviews.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/Reviews.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/Reviews.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/Reviews.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/TextModerations.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/TextModerations.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/TextModerations.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/TextModerations.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ContentModeratorClientImpl.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ContentModeratorClientImpl.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ContentModeratorClientImpl.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ContentModeratorClientImpl.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ImageModerationsImpl.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ImageModerationsImpl.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ImageModerationsImpl.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ImageModerationsImpl.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ListManagementImageListsImpl.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ListManagementImageListsImpl.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ListManagementImageListsImpl.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ListManagementImageListsImpl.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ListManagementImagesImpl.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ListManagementImagesImpl.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ListManagementImagesImpl.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ListManagementImagesImpl.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ListManagementTermListsImpl.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ListManagementTermListsImpl.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ListManagementTermListsImpl.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ListManagementTermListsImpl.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ListManagementTermsImpl.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ListManagementTermsImpl.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ListManagementTermsImpl.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ListManagementTermsImpl.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ReviewsImpl.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ReviewsImpl.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ReviewsImpl.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/ReviewsImpl.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/TextModerationsImpl.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/TextModerationsImpl.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/TextModerationsImpl.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/TextModerationsImpl.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/package-info.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/package-info.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/implementation/package-info.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/APIError.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/APIError.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/APIError.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/APIError.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/APIErrorException.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/APIErrorException.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/APIErrorException.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/APIErrorException.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddImageFileInputOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddImageFileInputOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddImageFileInputOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddImageFileInputOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddImageOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddImageOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddImageOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddImageOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddImageUrlInputOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddImageUrlInputOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddImageUrlInputOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddImageUrlInputOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddVideoFrameOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddVideoFrameOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddVideoFrameOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddVideoFrameOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddVideoFrameStreamOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddVideoFrameStreamOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddVideoFrameStreamOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddVideoFrameStreamOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddVideoFrameUrlOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddVideoFrameUrlOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddVideoFrameUrlOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AddVideoFrameUrlOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Address.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Address.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Address.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Address.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AzureRegionBaseUrl.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AzureRegionBaseUrl.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AzureRegionBaseUrl.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/AzureRegionBaseUrl.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/BodyMetadata.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/BodyMetadata.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/BodyMetadata.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/BodyMetadata.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/BodyModel.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/BodyModel.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/BodyModel.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/BodyModel.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/BodyModelModel.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/BodyModelModel.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/BodyModelModel.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/BodyModelModel.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Candidate.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Candidate.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Candidate.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Candidate.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Classification.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Classification.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Classification.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Classification.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ClassificationCategory1.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ClassificationCategory1.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ClassificationCategory1.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ClassificationCategory1.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ClassificationCategory2.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ClassificationCategory2.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ClassificationCategory2.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ClassificationCategory2.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ClassificationCategory3.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ClassificationCategory3.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ClassificationCategory3.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ClassificationCategory3.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Content.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Content.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Content.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Content.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateJobOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateJobOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateJobOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateJobOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateReviewBodyItem.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateReviewBodyItem.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateReviewBodyItem.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateReviewBodyItem.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateReviewBodyItemMetadataItem.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateReviewBodyItemMetadataItem.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateReviewBodyItemMetadataItem.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateReviewBodyItemMetadataItem.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateReviewsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateReviewsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateReviewsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateReviewsOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsBodyItem.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsBodyItem.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsBodyItem.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsBodyItem.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsBodyItemMetadataItem.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsBodyItemMetadataItem.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsBodyItemMetadataItem.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsBodyItemMetadataItem.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsBodyItemVideoFramesItem.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsBodyItemVideoFramesItem.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsBodyItemVideoFramesItem.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsBodyItemVideoFramesItem.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsBodyItemVideoFramesItemMetadataItem.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsBodyItemVideoFramesItemMetadataItem.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsBodyItemVideoFramesItemMetadataItem.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsBodyItemVideoFramesItemMetadataItem.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsBodyItemVideoFramesItemReviewerResultTagsItem.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsBodyItemVideoFramesItemReviewerResultTagsItem.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsBodyItemVideoFramesItemReviewerResultTagsItem.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsBodyItemVideoFramesItemReviewerResultTagsItem.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/CreateVideoReviewsOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/DetectedLanguage.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/DetectedLanguage.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/DetectedLanguage.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/DetectedLanguage.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/DetectedTerms.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/DetectedTerms.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/DetectedTerms.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/DetectedTerms.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Email.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Email.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Email.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Email.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Error.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Error.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Error.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Error.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Evaluate.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Evaluate.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Evaluate.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Evaluate.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/EvaluateFileInputOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/EvaluateFileInputOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/EvaluateFileInputOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/EvaluateFileInputOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/EvaluateMethodOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/EvaluateMethodOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/EvaluateMethodOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/EvaluateMethodOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/EvaluateUrlInputOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/EvaluateUrlInputOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/EvaluateUrlInputOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/EvaluateUrlInputOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Face.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Face.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Face.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Face.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/FindFacesFileInputOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/FindFacesFileInputOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/FindFacesFileInputOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/FindFacesFileInputOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/FindFacesOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/FindFacesOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/FindFacesOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/FindFacesOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/FindFacesUrlInputOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/FindFacesUrlInputOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/FindFacesUrlInputOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/FindFacesUrlInputOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/FoundFaces.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/FoundFaces.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/FoundFaces.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/FoundFaces.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Frame.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Frame.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Frame.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Frame.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Frames.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Frames.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Frames.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Frames.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/GetAllTermsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/GetAllTermsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/GetAllTermsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/GetAllTermsOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/GetVideoFramesOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/GetVideoFramesOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/GetVideoFramesOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/GetVideoFramesOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/IPA.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/IPA.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/IPA.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/IPA.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Image.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Image.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Image.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Image.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ImageAdditionalInfoItem.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ImageAdditionalInfoItem.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ImageAdditionalInfoItem.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ImageAdditionalInfoItem.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ImageIds.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ImageIds.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ImageIds.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ImageIds.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ImageList.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ImageList.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ImageList.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ImageList.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ImageListMetadata.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ImageListMetadata.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ImageListMetadata.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ImageListMetadata.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Job.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Job.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Job.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Job.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/JobExecutionReportDetails.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/JobExecutionReportDetails.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/JobExecutionReportDetails.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/JobExecutionReportDetails.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/JobId.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/JobId.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/JobId.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/JobId.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/JobListResult.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/JobListResult.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/JobListResult.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/JobListResult.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/KeyValuePair.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/KeyValuePair.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/KeyValuePair.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/KeyValuePair.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Match.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Match.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Match.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Match.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/MatchFileInputOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/MatchFileInputOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/MatchFileInputOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/MatchFileInputOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/MatchMethodOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/MatchMethodOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/MatchMethodOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/MatchMethodOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/MatchResponse.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/MatchResponse.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/MatchResponse.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/MatchResponse.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/MatchUrlInputOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/MatchUrlInputOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/MatchUrlInputOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/MatchUrlInputOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/OCR.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/OCR.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/OCR.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/OCR.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/OCRFileInputOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/OCRFileInputOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/OCRFileInputOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/OCRFileInputOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/OCRMethodOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/OCRMethodOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/OCRMethodOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/OCRMethodOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/OCRUrlInputOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/OCRUrlInputOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/OCRUrlInputOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/OCRUrlInputOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/PII.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/PII.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/PII.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/PII.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Phone.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Phone.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Phone.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Phone.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/RefreshIndex.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/RefreshIndex.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/RefreshIndex.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/RefreshIndex.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/RefreshIndexAdvancedInfoItem.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/RefreshIndexAdvancedInfoItem.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/RefreshIndexAdvancedInfoItem.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/RefreshIndexAdvancedInfoItem.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Review.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Review.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Review.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Review.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/SSN.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/SSN.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/SSN.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/SSN.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Screen.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Screen.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Screen.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Screen.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ScreenTextOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ScreenTextOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ScreenTextOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/ScreenTextOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Status.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Status.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Status.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Status.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Tag.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Tag.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Tag.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Tag.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TermList.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TermList.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TermList.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TermList.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TermListMetadata.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TermListMetadata.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TermListMetadata.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TermListMetadata.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Terms.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Terms.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Terms.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/Terms.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TermsData.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TermsData.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TermsData.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TermsData.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TermsInList.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TermsInList.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TermsInList.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TermsInList.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TermsPaging.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TermsPaging.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TermsPaging.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TermsPaging.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TranscriptModerationBodyItem.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TranscriptModerationBodyItem.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TranscriptModerationBodyItem.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TranscriptModerationBodyItem.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TranscriptModerationBodyItemTermsItem.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TranscriptModerationBodyItemTermsItem.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TranscriptModerationBodyItemTermsItem.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/TranscriptModerationBodyItemTermsItem.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/VideoFrameBodyItem.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/VideoFrameBodyItem.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/VideoFrameBodyItem.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/VideoFrameBodyItem.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/VideoFrameBodyItemMetadataItem.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/VideoFrameBodyItemMetadataItem.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/VideoFrameBodyItemMetadataItem.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/VideoFrameBodyItemMetadataItem.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/VideoFrameBodyItemReviewerResultTagsItem.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/VideoFrameBodyItemReviewerResultTagsItem.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/VideoFrameBodyItemReviewerResultTagsItem.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/VideoFrameBodyItemReviewerResultTagsItem.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/package-info.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/package-info.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/models/package-info.java diff --git a/cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/package-info.java b/sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/package-info.java similarity index 100% rename from cognitiveservices/data-plane/vision/contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-contentmoderator/src/main/java/com/microsoft/azure/cognitiveservices/vision/contentmoderator/package-info.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/pom.xml b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/pom.xml similarity index 98% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/pom.xml rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/pom.xml index 4362d9179db56..2d99abaad5d71 100644 --- a/cognitiveservices/data-plane/search/bingcustomimagesearch/pom.xml +++ b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/pom.xml @@ -9,7 +9,7 @@ com.microsoft.azure.cognitiveservices azure-cognitiveservices-parent 1.0.2 - ../../pom.xml + ../pom.xml azure-cognitiveservices-customimagesearch 1.0.2-beta diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/BingCustomImageSearchAPI.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/BingCustomImageSearchAPI.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/BingCustomImageSearchAPI.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/BingCustomImageSearchAPI.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/BingCustomImageSearchManager.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/BingCustomImageSearchManager.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/BingCustomImageSearchManager.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/BingCustomImageSearchManager.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/BingCustomInstances.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/BingCustomInstances.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/BingCustomInstances.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/BingCustomInstances.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/implementation/BingCustomImageSearchAPIImpl.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/implementation/BingCustomImageSearchAPIImpl.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/implementation/BingCustomImageSearchAPIImpl.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/implementation/BingCustomImageSearchAPIImpl.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/implementation/BingCustomInstancesImpl.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/implementation/BingCustomInstancesImpl.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/implementation/BingCustomInstancesImpl.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/implementation/BingCustomInstancesImpl.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/implementation/package-info.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/implementation/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/implementation/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/implementation/package-info.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Answer.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Answer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Answer.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Answer.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/CreativeWork.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/CreativeWork.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/CreativeWork.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/CreativeWork.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Error.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Error.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Error.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Error.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ErrorCode.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ErrorCode.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ErrorCode.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ErrorCode.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ErrorResponse.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ErrorResponse.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ErrorResponse.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ErrorResponse.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ErrorResponseException.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ErrorResponseException.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ErrorResponseException.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ErrorResponseException.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ErrorSubCode.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ErrorSubCode.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ErrorSubCode.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ErrorSubCode.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Freshness.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Freshness.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Freshness.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Freshness.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Identifiable.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Identifiable.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Identifiable.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Identifiable.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageAspect.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageAspect.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageAspect.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageAspect.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageColor.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageColor.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageColor.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageColor.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageContent.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageContent.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageContent.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageContent.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageLicense.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageLicense.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageLicense.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageLicense.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageObject.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageObject.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageObject.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageObject.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageSearchOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageSearchOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageSearchOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageSearchOptionalParameter.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageSize.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageSize.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageSize.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageSize.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageType.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageType.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageType.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ImageType.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Images.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Images.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Images.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Images.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/MediaObject.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/MediaObject.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/MediaObject.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/MediaObject.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Query.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Query.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Query.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Query.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Response.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Response.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Response.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Response.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ResponseBase.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ResponseBase.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ResponseBase.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/ResponseBase.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/SafeSearch.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/SafeSearch.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/SafeSearch.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/SafeSearch.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/SearchResultsAnswer.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/SearchResultsAnswer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/SearchResultsAnswer.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/SearchResultsAnswer.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Thing.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Thing.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Thing.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/Thing.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/WebPage.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/WebPage.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/WebPage.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/WebPage.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/package-info.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/models/package-info.java diff --git a/cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/package-info.java b/sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-customimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customimagesearch/package-info.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/pom.xml b/sdk/cognitiveservices/ms-azure-cs-customsearch/pom.xml similarity index 98% rename from cognitiveservices/data-plane/search/bingcustomsearch/pom.xml rename to sdk/cognitiveservices/ms-azure-cs-customsearch/pom.xml index 14c184488025a..5b6ac07a479a8 100644 --- a/cognitiveservices/data-plane/search/bingcustomsearch/pom.xml +++ b/sdk/cognitiveservices/ms-azure-cs-customsearch/pom.xml @@ -9,7 +9,7 @@ com.microsoft.azure.cognitiveservices azure-cognitiveservices-parent 1.0.2 - ../../pom.xml + ../pom.xml azure-cognitiveservices-customsearch 1.0.2 diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/BingCustomInstances.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/BingCustomInstances.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/BingCustomInstances.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/BingCustomInstances.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/BingCustomSearchAPI.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/BingCustomSearchAPI.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/BingCustomSearchAPI.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/BingCustomSearchAPI.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/BingCustomSearchManager.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/BingCustomSearchManager.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/BingCustomSearchManager.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/BingCustomSearchManager.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/implementation/BingCustomInstancesImpl.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/implementation/BingCustomInstancesImpl.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/implementation/BingCustomInstancesImpl.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/implementation/BingCustomInstancesImpl.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/implementation/BingCustomSearchAPIImpl.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/implementation/BingCustomSearchAPIImpl.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/implementation/BingCustomSearchAPIImpl.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/implementation/BingCustomSearchAPIImpl.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/implementation/package-info.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/implementation/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/implementation/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/implementation/package-info.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Answer.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Answer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Answer.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Answer.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/CreativeWork.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/CreativeWork.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/CreativeWork.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/CreativeWork.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Error.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Error.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Error.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Error.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/ErrorCode.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/ErrorCode.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/ErrorCode.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/ErrorCode.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/ErrorResponse.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/ErrorResponse.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/ErrorResponse.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/ErrorResponse.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/ErrorResponseException.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/ErrorResponseException.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/ErrorResponseException.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/ErrorResponseException.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/ErrorSubCode.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/ErrorSubCode.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/ErrorSubCode.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/ErrorSubCode.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Identifiable.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Identifiable.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Identifiable.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Identifiable.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Query.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Query.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Query.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Query.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/QueryContext.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/QueryContext.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/QueryContext.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/QueryContext.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Response.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Response.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Response.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Response.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/ResponseBase.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/ResponseBase.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/ResponseBase.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/ResponseBase.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/SafeSearch.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/SafeSearch.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/SafeSearch.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/SafeSearch.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/SearchOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/SearchOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/SearchOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/SearchOptionalParameter.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/SearchResponse.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/SearchResponse.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/SearchResponse.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/SearchResponse.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/SearchResultsAnswer.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/SearchResultsAnswer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/SearchResultsAnswer.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/SearchResultsAnswer.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/TextFormat.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/TextFormat.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/TextFormat.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/TextFormat.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Thing.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Thing.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Thing.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/Thing.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/WebMetaTag.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/WebMetaTag.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/WebMetaTag.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/WebMetaTag.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/WebPage.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/WebPage.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/WebPage.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/WebPage.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/WebWebAnswer.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/WebWebAnswer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/WebWebAnswer.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/WebWebAnswer.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/package-info.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/models/package-info.java diff --git a/cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/package-info.java b/sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingcustomsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-customsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/customsearch/package-info.java diff --git a/cognitiveservices/data-plane/vision/customvision/prediction/pom.xml b/sdk/cognitiveservices/ms-azure-cs-customvision-prediction/pom.xml similarity index 98% rename from cognitiveservices/data-plane/vision/customvision/prediction/pom.xml rename to sdk/cognitiveservices/ms-azure-cs-customvision-prediction/pom.xml index e208aaa0b909d..f4ee4cec9cb4f 100644 --- a/cognitiveservices/data-plane/vision/customvision/prediction/pom.xml +++ b/sdk/cognitiveservices/ms-azure-cs-customvision-prediction/pom.xml @@ -11,7 +11,7 @@ com.microsoft.azure.cognitiveservices azure-cognitiveservices-parent 1.0.2 - ../../../pom.xml + ../pom.xml azure-cognitiveservices-customvision-prediction 1.0.2-beta diff --git a/cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/CustomVisionPredictionManager.java b/sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/CustomVisionPredictionManager.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/CustomVisionPredictionManager.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/CustomVisionPredictionManager.java diff --git a/cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/PredictionEndpoint.java b/sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/PredictionEndpoint.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/PredictionEndpoint.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/PredictionEndpoint.java diff --git a/cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/Predictions.java b/sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/Predictions.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/Predictions.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/Predictions.java diff --git a/cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/implementation/PredictionEndpointImpl.java b/sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/implementation/PredictionEndpointImpl.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/implementation/PredictionEndpointImpl.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/implementation/PredictionEndpointImpl.java diff --git a/cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/implementation/PredictionsImpl.java b/sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/implementation/PredictionsImpl.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/implementation/PredictionsImpl.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/implementation/PredictionsImpl.java diff --git a/cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/implementation/package-info.java b/sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/implementation/package-info.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/implementation/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/implementation/package-info.java diff --git a/cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/BoundingBox.java b/sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/BoundingBox.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/BoundingBox.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/BoundingBox.java diff --git a/cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/ImagePrediction.java b/sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/ImagePrediction.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/ImagePrediction.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/ImagePrediction.java diff --git a/cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/ImageUrl.java b/sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/ImageUrl.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/ImageUrl.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/ImageUrl.java diff --git a/cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/PredictImageOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/PredictImageOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/PredictImageOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/PredictImageOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/PredictImageUrlOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/PredictImageUrlOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/PredictImageUrlOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/PredictImageUrlOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/PredictImageUrlWithNoStoreOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/PredictImageUrlWithNoStoreOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/PredictImageUrlWithNoStoreOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/PredictImageUrlWithNoStoreOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/PredictImageWithNoStoreOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/PredictImageWithNoStoreOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/PredictImageWithNoStoreOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/PredictImageWithNoStoreOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/Prediction.java b/sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/Prediction.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/Prediction.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/Prediction.java diff --git a/cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/package-info.java b/sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/package-info.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/models/package-info.java diff --git a/cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/package-info.java b/sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/package-info.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-prediction/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/prediction/package-info.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/pom.xml b/sdk/cognitiveservices/ms-azure-cs-customvision-training/pom.xml similarity index 98% rename from cognitiveservices/data-plane/vision/customvision/training/pom.xml rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/pom.xml index 9330c16a23f79..491e94891644b 100644 --- a/cognitiveservices/data-plane/vision/customvision/training/pom.xml +++ b/sdk/cognitiveservices/ms-azure-cs-customvision-training/pom.xml @@ -11,7 +11,7 @@ com.microsoft.azure.cognitiveservices azure-cognitiveservices-parent 1.0.2 - ../../../pom.xml + ../pom.xml azure-cognitiveservices-customvision-training 1.0.2-beta diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/CustomVisionTrainingManager.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/CustomVisionTrainingManager.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/CustomVisionTrainingManager.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/CustomVisionTrainingManager.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/TrainingApi.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/TrainingApi.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/TrainingApi.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/TrainingApi.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/Trainings.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/Trainings.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/Trainings.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/Trainings.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/implementation/TrainingApiImpl.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/implementation/TrainingApiImpl.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/implementation/TrainingApiImpl.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/implementation/TrainingApiImpl.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/implementation/TrainingsImpl.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/implementation/TrainingsImpl.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/implementation/TrainingsImpl.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/implementation/TrainingsImpl.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/implementation/package-info.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/implementation/package-info.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/implementation/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/implementation/package-info.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/BoundingBox.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/BoundingBox.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/BoundingBox.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/BoundingBox.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Classifier.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Classifier.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Classifier.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Classifier.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/CreateImageRegionsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/CreateImageRegionsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/CreateImageRegionsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/CreateImageRegionsOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/CreateImageTagsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/CreateImageTagsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/CreateImageTagsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/CreateImageTagsOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/CreateImagesFromDataOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/CreateImagesFromDataOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/CreateImagesFromDataOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/CreateImagesFromDataOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/CreateProjectOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/CreateProjectOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/CreateProjectOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/CreateProjectOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/CreateTagOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/CreateTagOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/CreateTagOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/CreateTagOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Domain.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Domain.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Domain.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Domain.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/DomainType.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/DomainType.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/DomainType.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/DomainType.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Export.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Export.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Export.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Export.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ExportFlavor.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ExportFlavor.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ExportFlavor.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ExportFlavor.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ExportIterationOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ExportIterationOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ExportIterationOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ExportIterationOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ExportPlatform.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ExportPlatform.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ExportPlatform.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ExportPlatform.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ExportStatusModel.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ExportStatusModel.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ExportStatusModel.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ExportStatusModel.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetImagePerformanceCountOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetImagePerformanceCountOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetImagePerformanceCountOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetImagePerformanceCountOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetImagePerformancesOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetImagePerformancesOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetImagePerformancesOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetImagePerformancesOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetImagesByIdsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetImagesByIdsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetImagesByIdsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetImagesByIdsOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetIterationPerformanceOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetIterationPerformanceOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetIterationPerformanceOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetIterationPerformanceOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetTagOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetTagOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetTagOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetTagOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetTaggedImageCountOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetTaggedImageCountOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetTaggedImageCountOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetTaggedImageCountOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetTaggedImagesOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetTaggedImagesOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetTaggedImagesOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetTaggedImagesOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetTagsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetTagsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetTagsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetTagsOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetUntaggedImageCountOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetUntaggedImageCountOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetUntaggedImageCountOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetUntaggedImageCountOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetUntaggedImagesOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetUntaggedImagesOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetUntaggedImagesOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/GetUntaggedImagesOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Image.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Image.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Image.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Image.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageCreateResult.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageCreateResult.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageCreateResult.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageCreateResult.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageCreateSummary.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageCreateSummary.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageCreateSummary.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageCreateSummary.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageFileCreateBatch.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageFileCreateBatch.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageFileCreateBatch.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageFileCreateBatch.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageFileCreateEntry.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageFileCreateEntry.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageFileCreateEntry.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageFileCreateEntry.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageIdCreateBatch.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageIdCreateBatch.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageIdCreateBatch.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageIdCreateBatch.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageIdCreateEntry.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageIdCreateEntry.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageIdCreateEntry.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageIdCreateEntry.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImagePerformance.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImagePerformance.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImagePerformance.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImagePerformance.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImagePrediction.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImagePrediction.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImagePrediction.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImagePrediction.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegion.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegion.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegion.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegion.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegionCreateBatch.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegionCreateBatch.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegionCreateBatch.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegionCreateBatch.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegionCreateEntry.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegionCreateEntry.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegionCreateEntry.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegionCreateEntry.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegionCreateResult.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegionCreateResult.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegionCreateResult.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegionCreateResult.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegionCreateSummary.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegionCreateSummary.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegionCreateSummary.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegionCreateSummary.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegionProposal.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegionProposal.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegionProposal.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageRegionProposal.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageTag.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageTag.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageTag.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageTag.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageTagCreateBatch.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageTagCreateBatch.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageTagCreateBatch.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageTagCreateBatch.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageTagCreateEntry.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageTagCreateEntry.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageTagCreateEntry.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageTagCreateEntry.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageTagCreateSummary.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageTagCreateSummary.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageTagCreateSummary.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageTagCreateSummary.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageUploadStatus.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageUploadStatus.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageUploadStatus.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageUploadStatus.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageUrl.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageUrl.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageUrl.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageUrl.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageUrlCreateBatch.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageUrlCreateBatch.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageUrlCreateBatch.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageUrlCreateBatch.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageUrlCreateEntry.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageUrlCreateEntry.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageUrlCreateEntry.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ImageUrlCreateEntry.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Iteration.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Iteration.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Iteration.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Iteration.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/IterationPerformance.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/IterationPerformance.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/IterationPerformance.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/IterationPerformance.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/OrderBy.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/OrderBy.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/OrderBy.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/OrderBy.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Prediction.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Prediction.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Prediction.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Prediction.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/PredictionQueryResult.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/PredictionQueryResult.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/PredictionQueryResult.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/PredictionQueryResult.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/PredictionQueryTag.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/PredictionQueryTag.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/PredictionQueryTag.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/PredictionQueryTag.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/PredictionQueryToken.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/PredictionQueryToken.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/PredictionQueryToken.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/PredictionQueryToken.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Project.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Project.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Project.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Project.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ProjectSettings.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ProjectSettings.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ProjectSettings.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/ProjectSettings.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/QuickTestImageOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/QuickTestImageOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/QuickTestImageOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/QuickTestImageOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/QuickTestImageUrlOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/QuickTestImageUrlOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/QuickTestImageUrlOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/QuickTestImageUrlOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Region.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Region.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Region.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Region.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/RegionProposal.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/RegionProposal.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/RegionProposal.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/RegionProposal.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/StoredImagePrediction.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/StoredImagePrediction.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/StoredImagePrediction.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/StoredImagePrediction.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Tag.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Tag.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Tag.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/Tag.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/TagPerformance.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/TagPerformance.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/TagPerformance.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/TagPerformance.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/package-info.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/package-info.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/models/package-info.java diff --git a/cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/package-info.java b/sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/package-info.java similarity index 100% rename from cognitiveservices/data-plane/vision/customvision/training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-customvision-training/src/main/java/com/microsoft/azure/cognitiveservices/vision/customvision/training/package-info.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/pom.xml b/sdk/cognitiveservices/ms-azure-cs-entitysearch/pom.xml similarity index 98% rename from cognitiveservices/data-plane/search/bingentitysearch/pom.xml rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/pom.xml index 58421872540f0..e43ccb17c512e 100644 --- a/cognitiveservices/data-plane/search/bingentitysearch/pom.xml +++ b/sdk/cognitiveservices/ms-azure-cs-entitysearch/pom.xml @@ -9,7 +9,7 @@ com.microsoft.azure.cognitiveservices azure-cognitiveservices-parent 1.0.2 - ../../pom.xml + ../pom.xml azure-cognitiveservices-entitysearch 1.0.2 diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/BingEntities.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/BingEntities.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/BingEntities.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/BingEntities.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/BingEntitySearchAPI.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/BingEntitySearchAPI.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/BingEntitySearchAPI.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/BingEntitySearchAPI.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/BingEntitySearchManager.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/BingEntitySearchManager.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/BingEntitySearchManager.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/BingEntitySearchManager.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/implementation/BingEntitiesImpl.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/implementation/BingEntitiesImpl.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/implementation/BingEntitiesImpl.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/implementation/BingEntitiesImpl.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/implementation/BingEntitySearchAPIImpl.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/implementation/BingEntitySearchAPIImpl.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/implementation/BingEntitySearchAPIImpl.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/implementation/BingEntitySearchAPIImpl.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/implementation/package-info.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/implementation/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/implementation/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/implementation/package-info.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Airport.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Airport.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Airport.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Airport.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Answer.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Answer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Answer.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Answer.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/AnswerType.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/AnswerType.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/AnswerType.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/AnswerType.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/CivicStructure.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/CivicStructure.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/CivicStructure.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/CivicStructure.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesAttribution.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesAttribution.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesAttribution.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesAttribution.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesContractualRule.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesContractualRule.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesContractualRule.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesContractualRule.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesLicenseAttribution.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesLicenseAttribution.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesLicenseAttribution.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesLicenseAttribution.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesLinkAttribution.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesLinkAttribution.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesLinkAttribution.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesLinkAttribution.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesMediaAttribution.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesMediaAttribution.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesMediaAttribution.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesMediaAttribution.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesTextAttribution.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesTextAttribution.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesTextAttribution.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ContractualRulesTextAttribution.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/CreativeWork.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/CreativeWork.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/CreativeWork.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/CreativeWork.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntertainmentBusiness.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntertainmentBusiness.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntertainmentBusiness.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntertainmentBusiness.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntitiesEntityPresentationInfo.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntitiesEntityPresentationInfo.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntitiesEntityPresentationInfo.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntitiesEntityPresentationInfo.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntitiesModel.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntitiesModel.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntitiesModel.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntitiesModel.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntityQueryScenario.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntityQueryScenario.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntityQueryScenario.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntityQueryScenario.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntityScenario.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntityScenario.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntityScenario.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntityScenario.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntityType.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntityType.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntityType.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/EntityType.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Error.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Error.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Error.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Error.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ErrorCode.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ErrorCode.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ErrorCode.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ErrorCode.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ErrorResponse.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ErrorResponse.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ErrorResponse.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ErrorResponse.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ErrorResponseException.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ErrorResponseException.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ErrorResponseException.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ErrorResponseException.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ErrorSubCode.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ErrorSubCode.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ErrorSubCode.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ErrorSubCode.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/FoodEstablishment.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/FoodEstablishment.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/FoodEstablishment.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/FoodEstablishment.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Hotel.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Hotel.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Hotel.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Hotel.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Identifiable.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Identifiable.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Identifiable.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Identifiable.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ImageObject.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ImageObject.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ImageObject.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ImageObject.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Intangible.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Intangible.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Intangible.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Intangible.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/License.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/License.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/License.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/License.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/LocalBusiness.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/LocalBusiness.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/LocalBusiness.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/LocalBusiness.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/LodgingBusiness.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/LodgingBusiness.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/LodgingBusiness.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/LodgingBusiness.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/MediaObject.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/MediaObject.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/MediaObject.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/MediaObject.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/MovieTheater.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/MovieTheater.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/MovieTheater.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/MovieTheater.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Organization.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Organization.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Organization.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Organization.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Place.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Place.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Place.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Place.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Places.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Places.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Places.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Places.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/PostalAddress.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/PostalAddress.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/PostalAddress.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/PostalAddress.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/QueryContext.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/QueryContext.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/QueryContext.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/QueryContext.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Response.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Response.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Response.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Response.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ResponseBase.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ResponseBase.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ResponseBase.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ResponseBase.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ResponseFormat.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ResponseFormat.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ResponseFormat.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/ResponseFormat.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Restaurant.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Restaurant.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Restaurant.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Restaurant.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/SafeSearch.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/SafeSearch.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/SafeSearch.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/SafeSearch.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/SearchOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/SearchOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/SearchOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/SearchOptionalParameter.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/SearchResponse.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/SearchResponse.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/SearchResponse.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/SearchResponse.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/SearchResultsAnswer.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/SearchResultsAnswer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/SearchResultsAnswer.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/SearchResultsAnswer.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/StructuredValue.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/StructuredValue.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/StructuredValue.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/StructuredValue.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Thing.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Thing.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Thing.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/Thing.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/TouristAttraction.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/TouristAttraction.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/TouristAttraction.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/TouristAttraction.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/package-info.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/models/package-info.java diff --git a/cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/package-info.java b/sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingentitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-entitysearch/src/main/java/com/microsoft/azure/cognitiveservices/search/entitysearch/package-info.java diff --git a/cognitiveservices/data-plane/vision/faceapi/pom.xml b/sdk/cognitiveservices/ms-azure-cs-faceapi/pom.xml similarity index 98% rename from cognitiveservices/data-plane/vision/faceapi/pom.xml rename to sdk/cognitiveservices/ms-azure-cs-faceapi/pom.xml index 9da7c43166699..75833fdba5817 100644 --- a/cognitiveservices/data-plane/vision/faceapi/pom.xml +++ b/sdk/cognitiveservices/ms-azure-cs-faceapi/pom.xml @@ -9,7 +9,7 @@ com.microsoft.azure.cognitiveservices azure-cognitiveservices-parent 1.0.2 - ../../pom.xml + ../pom.xml azure-cognitiveservices-faceapi 1.0.2-beta diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/FaceAPI.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/FaceAPI.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/FaceAPI.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/FaceAPI.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/FaceAPIManager.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/FaceAPIManager.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/FaceAPIManager.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/FaceAPIManager.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/FaceLists.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/FaceLists.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/FaceLists.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/FaceLists.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/Faces.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/Faces.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/Faces.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/Faces.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/PersonGroupPersons.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/PersonGroupPersons.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/PersonGroupPersons.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/PersonGroupPersons.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/PersonGroups.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/PersonGroups.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/PersonGroups.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/PersonGroups.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/FaceAPIImpl.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/FaceAPIImpl.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/FaceAPIImpl.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/FaceAPIImpl.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/FaceListsImpl.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/FaceListsImpl.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/FaceListsImpl.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/FaceListsImpl.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/FacesImpl.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/FacesImpl.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/FacesImpl.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/FacesImpl.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/PersonGroupPersonsImpl.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/PersonGroupPersonsImpl.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/PersonGroupPersonsImpl.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/PersonGroupPersonsImpl.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/PersonGroupsImpl.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/PersonGroupsImpl.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/PersonGroupsImpl.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/PersonGroupsImpl.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/package-info.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/package-info.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/implementation/package-info.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/APIError.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/APIError.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/APIError.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/APIError.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/APIErrorException.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/APIErrorException.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/APIErrorException.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/APIErrorException.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Accessory.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Accessory.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Accessory.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Accessory.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AccessoryType.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AccessoryType.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AccessoryType.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AccessoryType.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AddFaceFromStreamOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AddFaceFromStreamOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AddFaceFromStreamOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AddFaceFromStreamOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AddFaceFromUrlOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AddFaceFromUrlOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AddFaceFromUrlOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AddFaceFromUrlOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AddPersonFaceFromStreamOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AddPersonFaceFromStreamOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AddPersonFaceFromStreamOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AddPersonFaceFromStreamOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AddPersonFaceFromUrlOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AddPersonFaceFromUrlOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AddPersonFaceFromUrlOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AddPersonFaceFromUrlOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AzureRegions.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AzureRegions.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AzureRegions.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/AzureRegions.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Blur.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Blur.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Blur.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Blur.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/BlurLevel.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/BlurLevel.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/BlurLevel.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/BlurLevel.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Coordinate.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Coordinate.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Coordinate.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Coordinate.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/CreateFaceListsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/CreateFaceListsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/CreateFaceListsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/CreateFaceListsOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/CreatePersonGroupPersonsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/CreatePersonGroupPersonsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/CreatePersonGroupPersonsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/CreatePersonGroupPersonsOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/CreatePersonGroupsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/CreatePersonGroupsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/CreatePersonGroupsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/CreatePersonGroupsOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/DetectWithStreamOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/DetectWithStreamOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/DetectWithStreamOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/DetectWithStreamOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/DetectWithUrlOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/DetectWithUrlOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/DetectWithUrlOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/DetectWithUrlOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/DetectedFace.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/DetectedFace.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/DetectedFace.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/DetectedFace.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Emotion.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Emotion.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Emotion.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Emotion.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Error.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Error.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Error.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Error.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Exposure.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Exposure.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Exposure.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Exposure.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/ExposureLevel.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/ExposureLevel.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/ExposureLevel.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/ExposureLevel.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FaceAttributeType.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FaceAttributeType.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FaceAttributeType.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FaceAttributeType.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FaceAttributes.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FaceAttributes.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FaceAttributes.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FaceAttributes.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FaceLandmarks.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FaceLandmarks.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FaceLandmarks.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FaceLandmarks.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FaceList.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FaceList.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FaceList.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FaceList.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FaceRectangle.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FaceRectangle.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FaceRectangle.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FaceRectangle.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FacialHair.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FacialHair.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FacialHair.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FacialHair.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FindSimilarMatchMode.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FindSimilarMatchMode.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FindSimilarMatchMode.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FindSimilarMatchMode.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FindSimilarOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FindSimilarOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FindSimilarOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FindSimilarOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FindSimilarRequest.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FindSimilarRequest.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FindSimilarRequest.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/FindSimilarRequest.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Gender.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Gender.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Gender.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Gender.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/GlassesType.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/GlassesType.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/GlassesType.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/GlassesType.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/GroupRequest.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/GroupRequest.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/GroupRequest.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/GroupRequest.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/GroupResult.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/GroupResult.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/GroupResult.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/GroupResult.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Hair.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Hair.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Hair.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Hair.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/HairColor.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/HairColor.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/HairColor.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/HairColor.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/HairColorType.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/HairColorType.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/HairColorType.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/HairColorType.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/HeadPose.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/HeadPose.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/HeadPose.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/HeadPose.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/IdentifyCandidate.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/IdentifyCandidate.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/IdentifyCandidate.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/IdentifyCandidate.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/IdentifyOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/IdentifyOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/IdentifyOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/IdentifyOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/IdentifyRequest.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/IdentifyRequest.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/IdentifyRequest.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/IdentifyRequest.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/IdentifyResult.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/IdentifyResult.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/IdentifyResult.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/IdentifyResult.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/ImageUrl.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/ImageUrl.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/ImageUrl.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/ImageUrl.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/ListPersonGroupPersonsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/ListPersonGroupPersonsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/ListPersonGroupPersonsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/ListPersonGroupPersonsOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/ListPersonGroupsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/ListPersonGroupsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/ListPersonGroupsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/ListPersonGroupsOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Makeup.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Makeup.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Makeup.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Makeup.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/NameAndUserDataContract.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/NameAndUserDataContract.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/NameAndUserDataContract.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/NameAndUserDataContract.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Noise.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Noise.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Noise.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Noise.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/NoiseLevel.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/NoiseLevel.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/NoiseLevel.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/NoiseLevel.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Occlusion.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Occlusion.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Occlusion.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Occlusion.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/PersistedFace.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/PersistedFace.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/PersistedFace.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/PersistedFace.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Person.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Person.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Person.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/Person.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/PersonGroup.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/PersonGroup.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/PersonGroup.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/PersonGroup.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/SimilarFace.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/SimilarFace.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/SimilarFace.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/SimilarFace.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/TrainingStatus.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/TrainingStatus.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/TrainingStatus.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/TrainingStatus.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/TrainingStatusType.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/TrainingStatusType.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/TrainingStatusType.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/TrainingStatusType.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/UpdateFaceListsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/UpdateFaceListsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/UpdateFaceListsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/UpdateFaceListsOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/UpdateFaceOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/UpdateFaceOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/UpdateFaceOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/UpdateFaceOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/UpdatePersonFaceRequest.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/UpdatePersonFaceRequest.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/UpdatePersonFaceRequest.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/UpdatePersonFaceRequest.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/UpdatePersonGroupPersonsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/UpdatePersonGroupPersonsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/UpdatePersonGroupPersonsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/UpdatePersonGroupPersonsOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/UpdatePersonGroupsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/UpdatePersonGroupsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/UpdatePersonGroupsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/UpdatePersonGroupsOptionalParameter.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/VerifyFaceToFaceRequest.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/VerifyFaceToFaceRequest.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/VerifyFaceToFaceRequest.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/VerifyFaceToFaceRequest.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/VerifyFaceToPersonRequest.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/VerifyFaceToPersonRequest.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/VerifyFaceToPersonRequest.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/VerifyFaceToPersonRequest.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/VerifyResult.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/VerifyResult.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/VerifyResult.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/VerifyResult.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/package-info.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/package-info.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/models/package-info.java diff --git a/cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/package-info.java b/sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/package-info.java similarity index 100% rename from cognitiveservices/data-plane/vision/faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-faceapi/src/main/java/com/microsoft/azure/cognitiveservices/vision/faceapi/package-info.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/pom.xml b/sdk/cognitiveservices/ms-azure-cs-imagesearch/pom.xml similarity index 98% rename from cognitiveservices/data-plane/search/bingimagesearch/pom.xml rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/pom.xml index 6ec62a353a1e5..629742c83a1d1 100644 --- a/cognitiveservices/data-plane/search/bingimagesearch/pom.xml +++ b/sdk/cognitiveservices/ms-azure-cs-imagesearch/pom.xml @@ -9,7 +9,7 @@ com.microsoft.azure.cognitiveservices azure-cognitiveservices-parent 1.0.2 - ../../pom.xml + ../pom.xml azure-cognitiveservices-imagesearch 1.0.2 diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/BingImageSearchAPI.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/BingImageSearchAPI.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/BingImageSearchAPI.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/BingImageSearchAPI.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/BingImageSearchManager.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/BingImageSearchManager.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/BingImageSearchManager.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/BingImageSearchManager.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/BingImages.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/BingImages.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/BingImages.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/BingImages.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/implementation/BingImageSearchAPIImpl.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/implementation/BingImageSearchAPIImpl.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/implementation/BingImageSearchAPIImpl.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/implementation/BingImageSearchAPIImpl.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/implementation/BingImagesImpl.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/implementation/BingImagesImpl.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/implementation/BingImagesImpl.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/implementation/BingImagesImpl.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/implementation/package-info.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/implementation/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/implementation/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/implementation/package-info.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/AggregateOffer.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/AggregateOffer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/AggregateOffer.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/AggregateOffer.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/AggregateRating.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/AggregateRating.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/AggregateRating.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/AggregateRating.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Answer.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Answer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Answer.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Answer.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/CollectionPage.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/CollectionPage.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/CollectionPage.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/CollectionPage.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/CreativeWork.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/CreativeWork.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/CreativeWork.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/CreativeWork.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Currency.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Currency.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Currency.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Currency.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/DetailsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/DetailsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/DetailsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/DetailsOptionalParameter.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Error.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Error.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Error.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Error.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ErrorCode.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ErrorCode.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ErrorCode.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ErrorCode.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ErrorResponse.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ErrorResponse.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ErrorResponse.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ErrorResponse.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ErrorResponseException.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ErrorResponseException.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ErrorResponseException.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ErrorResponseException.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ErrorSubCode.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ErrorSubCode.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ErrorSubCode.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ErrorSubCode.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Freshness.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Freshness.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Freshness.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Freshness.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Identifiable.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Identifiable.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Identifiable.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Identifiable.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageAspect.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageAspect.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageAspect.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageAspect.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageColor.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageColor.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageColor.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageColor.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageContent.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageContent.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageContent.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageContent.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageCropType.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageCropType.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageCropType.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageCropType.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageGallery.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageGallery.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageGallery.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageGallery.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageInsightModule.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageInsightModule.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageInsightModule.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageInsightModule.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageInsights.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageInsights.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageInsights.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageInsights.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageInsightsImageCaption.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageInsightsImageCaption.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageInsightsImageCaption.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageInsightsImageCaption.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageLicense.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageLicense.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageLicense.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageLicense.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageObject.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageObject.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageObject.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageObject.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageSize.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageSize.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageSize.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageSize.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageTagsModule.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageTagsModule.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageTagsModule.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageTagsModule.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageType.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageType.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageType.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImageType.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImagesImageMetadata.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImagesImageMetadata.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImagesImageMetadata.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImagesImageMetadata.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImagesModel.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImagesModel.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImagesModel.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImagesModel.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImagesModule.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImagesModule.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImagesModule.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ImagesModule.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/InsightsTag.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/InsightsTag.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/InsightsTag.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/InsightsTag.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Intangible.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Intangible.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Intangible.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Intangible.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ItemAvailability.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ItemAvailability.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ItemAvailability.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ItemAvailability.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/MediaObject.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/MediaObject.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/MediaObject.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/MediaObject.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/NormalizedRectangle.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/NormalizedRectangle.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/NormalizedRectangle.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/NormalizedRectangle.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Offer.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Offer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Offer.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Offer.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Organization.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Organization.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Organization.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Organization.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Person.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Person.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Person.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Person.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/PivotSuggestions.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/PivotSuggestions.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/PivotSuggestions.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/PivotSuggestions.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/PropertiesItem.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/PropertiesItem.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/PropertiesItem.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/PropertiesItem.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Query.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Query.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Query.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Query.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Rating.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Rating.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Rating.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Rating.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Recipe.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Recipe.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Recipe.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Recipe.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RecipesModule.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RecipesModule.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RecipesModule.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RecipesModule.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RecognizedEntitiesModule.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RecognizedEntitiesModule.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RecognizedEntitiesModule.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RecognizedEntitiesModule.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RecognizedEntity.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RecognizedEntity.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RecognizedEntity.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RecognizedEntity.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RecognizedEntityGroup.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RecognizedEntityGroup.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RecognizedEntityGroup.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RecognizedEntityGroup.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RecognizedEntityRegion.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RecognizedEntityRegion.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RecognizedEntityRegion.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RecognizedEntityRegion.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RelatedCollectionsModule.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RelatedCollectionsModule.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RelatedCollectionsModule.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RelatedCollectionsModule.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RelatedSearchesModule.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RelatedSearchesModule.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RelatedSearchesModule.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/RelatedSearchesModule.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Response.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Response.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Response.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Response.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ResponseBase.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ResponseBase.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ResponseBase.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/ResponseBase.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/SafeSearch.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/SafeSearch.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/SafeSearch.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/SafeSearch.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/SearchOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/SearchOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/SearchOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/SearchOptionalParameter.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/SearchResultsAnswer.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/SearchResultsAnswer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/SearchResultsAnswer.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/SearchResultsAnswer.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/StructuredValue.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/StructuredValue.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/StructuredValue.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/StructuredValue.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Thing.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Thing.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Thing.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/Thing.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/TrendingImages.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/TrendingImages.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/TrendingImages.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/TrendingImages.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/TrendingImagesCategory.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/TrendingImagesCategory.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/TrendingImagesCategory.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/TrendingImagesCategory.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/TrendingImagesTile.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/TrendingImagesTile.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/TrendingImagesTile.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/TrendingImagesTile.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/TrendingOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/TrendingOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/TrendingOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/TrendingOptionalParameter.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/WebPage.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/WebPage.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/WebPage.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/WebPage.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/package-info.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/models/package-info.java diff --git a/cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/package-info.java b/sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingimagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-imagesearch/src/main/java/com/microsoft/azure/cognitiveservices/search/imagesearch/package-info.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/pom.xml b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/pom.xml similarity index 98% rename from cognitiveservices/data-plane/language/luis/authoring/pom.xml rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/pom.xml index 878d915c0ed1b..9989b6af3ece1 100644 --- a/cognitiveservices/data-plane/language/luis/authoring/pom.xml +++ b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/pom.xml @@ -9,7 +9,7 @@ com.microsoft.azure.cognitiveservices azure-cognitiveservices-parent 1.0.2 - ../../../pom.xml + ../pom.xml azure-cognitiveservices-luis-authoring 1.0.2-beta diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Apps.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Apps.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Apps.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Apps.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/EndpointAPI.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/EndpointAPI.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/EndpointAPI.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/EndpointAPI.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Examples.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Examples.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Examples.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Examples.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Features.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Features.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Features.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Features.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/LUISAuthoringClient.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/LUISAuthoringClient.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/LUISAuthoringClient.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/LUISAuthoringClient.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/LUISAuthoringManager.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/LUISAuthoringManager.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/LUISAuthoringManager.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/LUISAuthoringManager.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Models.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Models.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Models.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Models.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Patterns.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Patterns.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Patterns.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Patterns.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Permissions.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Permissions.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Permissions.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Permissions.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Trains.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Trains.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Trains.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Trains.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Versions.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Versions.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Versions.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/Versions.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/AppsImpl.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/AppsImpl.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/AppsImpl.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/AppsImpl.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/ExamplesImpl.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/ExamplesImpl.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/ExamplesImpl.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/ExamplesImpl.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/FeaturesImpl.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/FeaturesImpl.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/FeaturesImpl.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/FeaturesImpl.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/LUISAuthoringClientImpl.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/LUISAuthoringClientImpl.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/LUISAuthoringClientImpl.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/LUISAuthoringClientImpl.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/ModelsImpl.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/ModelsImpl.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/ModelsImpl.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/ModelsImpl.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/PatternsImpl.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/PatternsImpl.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/PatternsImpl.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/PatternsImpl.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/PermissionsImpl.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/PermissionsImpl.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/PermissionsImpl.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/PermissionsImpl.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/TrainsImpl.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/TrainsImpl.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/TrainsImpl.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/TrainsImpl.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/VersionsImpl.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/VersionsImpl.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/VersionsImpl.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/VersionsImpl.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/package-info.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/package-info.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/implementation/package-info.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddCompositeEntityChildOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddCompositeEntityChildOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddCompositeEntityChildOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddCompositeEntityChildOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddCustomPrebuiltDomainModelsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddCustomPrebuiltDomainModelsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddCustomPrebuiltDomainModelsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddCustomPrebuiltDomainModelsOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddEntityOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddEntityOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddEntityOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddEntityOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddExplicitListItemOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddExplicitListItemOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddExplicitListItemOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddExplicitListItemOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddHierarchicalEntityChildOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddHierarchicalEntityChildOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddHierarchicalEntityChildOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddHierarchicalEntityChildOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddIntentOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddIntentOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddIntentOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddIntentOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddPermissionsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddPermissionsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddPermissionsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AddPermissionsOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationCreateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationCreateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationCreateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationCreateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationInfoResponse.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationInfoResponse.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationInfoResponse.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationInfoResponse.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationPublishObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationPublishObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationPublishObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationPublishObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationSettingUpdateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationSettingUpdateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationSettingUpdateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationSettingUpdateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationSettings.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationSettings.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationSettings.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationSettings.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationUpdateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationUpdateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationUpdateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ApplicationUpdateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AvailableCulture.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AvailableCulture.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AvailableCulture.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AvailableCulture.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AvailablePrebuiltEntityModel.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AvailablePrebuiltEntityModel.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AvailablePrebuiltEntityModel.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/AvailablePrebuiltEntityModel.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/BatchLabelExample.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/BatchLabelExample.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/BatchLabelExample.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/BatchLabelExample.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ChildEntity.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ChildEntity.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ChildEntity.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ChildEntity.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CloneOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CloneOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CloneOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CloneOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ClosedList.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ClosedList.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ClosedList.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ClosedList.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ClosedListEntityExtractor.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ClosedListEntityExtractor.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ClosedListEntityExtractor.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ClosedListEntityExtractor.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ClosedListModelCreateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ClosedListModelCreateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ClosedListModelCreateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ClosedListModelCreateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ClosedListModelPatchObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ClosedListModelPatchObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ClosedListModelPatchObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ClosedListModelPatchObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ClosedListModelUpdateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ClosedListModelUpdateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ClosedListModelUpdateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ClosedListModelUpdateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CollaboratorsArray.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CollaboratorsArray.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CollaboratorsArray.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CollaboratorsArray.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CompositeChildModelCreateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CompositeChildModelCreateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CompositeChildModelCreateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CompositeChildModelCreateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CompositeEntityExtractor.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CompositeEntityExtractor.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CompositeEntityExtractor.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CompositeEntityExtractor.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CompositeEntityModel.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CompositeEntityModel.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CompositeEntityModel.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CompositeEntityModel.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateClosedListEntityRoleOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateClosedListEntityRoleOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateClosedListEntityRoleOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateClosedListEntityRoleOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateCompositeEntityRoleOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateCompositeEntityRoleOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateCompositeEntityRoleOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateCompositeEntityRoleOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateCustomPrebuiltEntityRoleOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateCustomPrebuiltEntityRoleOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateCustomPrebuiltEntityRoleOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateCustomPrebuiltEntityRoleOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateEntityRoleOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateEntityRoleOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateEntityRoleOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateEntityRoleOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateHierarchicalEntityRoleOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateHierarchicalEntityRoleOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateHierarchicalEntityRoleOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateHierarchicalEntityRoleOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreatePatternAnyEntityRoleOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreatePatternAnyEntityRoleOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreatePatternAnyEntityRoleOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreatePatternAnyEntityRoleOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreatePrebuiltEntityRoleOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreatePrebuiltEntityRoleOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreatePrebuiltEntityRoleOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreatePrebuiltEntityRoleOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateRegexEntityRoleOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateRegexEntityRoleOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateRegexEntityRoleOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CreateRegexEntityRoleOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CustomPrebuiltModel.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CustomPrebuiltModel.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CustomPrebuiltModel.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/CustomPrebuiltModel.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/DeleteIntentOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/DeleteIntentOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/DeleteIntentOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/DeleteIntentOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/DeletePermissionsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/DeletePermissionsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/DeletePermissionsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/DeletePermissionsOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EndpointInfo.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EndpointInfo.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EndpointInfo.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EndpointInfo.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EnqueueTrainingResponse.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EnqueueTrainingResponse.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EnqueueTrainingResponse.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EnqueueTrainingResponse.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntitiesSuggestionExample.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntitiesSuggestionExample.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntitiesSuggestionExample.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntitiesSuggestionExample.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityExtractor.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityExtractor.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityExtractor.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityExtractor.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityLabel.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityLabel.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityLabel.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityLabel.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityLabelObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityLabelObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityLabelObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityLabelObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityModelInfo.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityModelInfo.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityModelInfo.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityModelInfo.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityPrediction.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityPrediction.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityPrediction.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityPrediction.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityRole.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityRole.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityRole.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityRole.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityRoleCreateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityRoleCreateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityRoleCreateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityRoleCreateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityRoleUpdateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityRoleUpdateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityRoleUpdateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/EntityRoleUpdateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ErrorResponse.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ErrorResponse.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ErrorResponse.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ErrorResponse.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ErrorResponseException.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ErrorResponseException.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ErrorResponseException.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ErrorResponseException.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ExampleLabelObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ExampleLabelObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ExampleLabelObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ExampleLabelObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ExamplesMethodOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ExamplesMethodOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ExamplesMethodOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ExamplesMethodOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ExplicitListItem.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ExplicitListItem.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ExplicitListItem.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ExplicitListItem.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ExplicitListItemCreateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ExplicitListItemCreateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ExplicitListItemCreateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ExplicitListItemCreateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ExplicitListItemUpdateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ExplicitListItemUpdateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ExplicitListItemUpdateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ExplicitListItemUpdateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/FeatureInfoObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/FeatureInfoObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/FeatureInfoObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/FeatureInfoObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/FeaturesResponseObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/FeaturesResponseObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/FeaturesResponseObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/FeaturesResponseObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetEntitySuggestionsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetEntitySuggestionsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetEntitySuggestionsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetEntitySuggestionsOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetIntentPatternsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetIntentPatternsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetIntentPatternsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetIntentPatternsOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetIntentSuggestionsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetIntentSuggestionsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetIntentSuggestionsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetIntentSuggestionsOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetPatternAnyEntityInfosOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetPatternAnyEntityInfosOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetPatternAnyEntityInfosOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetPatternAnyEntityInfosOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetPatternsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetPatternsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetPatternsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetPatternsOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetRegexEntityInfosOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetRegexEntityInfosOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetRegexEntityInfosOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/GetRegexEntityInfosOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalChildEntity.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalChildEntity.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalChildEntity.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalChildEntity.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalChildModelCreateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalChildModelCreateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalChildModelCreateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalChildModelCreateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalChildModelUpdateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalChildModelUpdateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalChildModelUpdateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalChildModelUpdateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalEntityExtractor.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalEntityExtractor.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalEntityExtractor.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalEntityExtractor.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalEntityModel.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalEntityModel.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalEntityModel.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalEntityModel.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalModel.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalModel.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalModel.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/HierarchicalModel.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ImportMethodAppsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ImportMethodAppsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ImportMethodAppsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ImportMethodAppsOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ImportMethodVersionsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ImportMethodVersionsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ImportMethodVersionsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ImportMethodVersionsOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/IntentClassifier.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/IntentClassifier.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/IntentClassifier.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/IntentClassifier.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/IntentPrediction.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/IntentPrediction.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/IntentPrediction.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/IntentPrediction.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/IntentsSuggestionExample.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/IntentsSuggestionExample.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/IntentsSuggestionExample.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/IntentsSuggestionExample.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/JSONEntity.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/JSONEntity.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/JSONEntity.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/JSONEntity.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/JSONModelFeature.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/JSONModelFeature.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/JSONModelFeature.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/JSONModelFeature.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/JSONRegexFeature.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/JSONRegexFeature.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/JSONRegexFeature.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/JSONRegexFeature.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/JSONUtterance.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/JSONUtterance.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/JSONUtterance.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/JSONUtterance.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/LabelExampleResponse.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/LabelExampleResponse.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/LabelExampleResponse.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/LabelExampleResponse.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/LabelTextObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/LabelTextObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/LabelTextObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/LabelTextObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/LabeledUtterance.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/LabeledUtterance.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/LabeledUtterance.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/LabeledUtterance.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListAppsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListAppsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListAppsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListAppsOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListClosedListsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListClosedListsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListClosedListsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListClosedListsOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListCompositeEntitiesOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListCompositeEntitiesOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListCompositeEntitiesOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListCompositeEntitiesOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListEntitiesOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListEntitiesOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListEntitiesOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListEntitiesOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListExamplesOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListExamplesOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListExamplesOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListExamplesOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListFeaturesOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListFeaturesOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListFeaturesOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListFeaturesOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListHierarchicalEntitiesOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListHierarchicalEntitiesOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListHierarchicalEntitiesOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListHierarchicalEntitiesOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListIntentsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListIntentsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListIntentsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListIntentsOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListModelsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListModelsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListModelsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListModelsOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListPhraseListsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListPhraseListsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListPhraseListsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListPhraseListsOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListPrebuiltsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListPrebuiltsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListPrebuiltsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListPrebuiltsOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListVersionsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListVersionsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListVersionsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ListVersionsOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/LuisApp.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/LuisApp.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/LuisApp.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/LuisApp.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelCreateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelCreateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelCreateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelCreateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelInfo.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelInfo.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelInfo.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelInfo.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelInfoResponse.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelInfoResponse.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelInfoResponse.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelInfoResponse.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelTrainingDetails.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelTrainingDetails.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelTrainingDetails.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelTrainingDetails.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelTrainingInfo.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelTrainingInfo.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelTrainingInfo.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelTrainingInfo.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelUpdateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelUpdateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelUpdateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ModelUpdateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/OperationError.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/OperationError.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/OperationError.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/OperationError.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/OperationStatus.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/OperationStatus.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/OperationStatus.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/OperationStatus.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/OperationStatusType.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/OperationStatusType.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/OperationStatusType.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/OperationStatusType.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatchClosedListOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatchClosedListOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatchClosedListOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatchClosedListOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternAny.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternAny.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternAny.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternAny.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternAnyEntityExtractor.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternAnyEntityExtractor.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternAnyEntityExtractor.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternAnyEntityExtractor.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternAnyModelCreateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternAnyModelCreateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternAnyModelCreateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternAnyModelCreateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternAnyModelUpdateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternAnyModelUpdateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternAnyModelUpdateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternAnyModelUpdateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternCreateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternCreateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternCreateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternCreateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternFeatureInfo.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternFeatureInfo.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternFeatureInfo.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternFeatureInfo.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternRule.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternRule.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternRule.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternRule.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternRuleCreateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternRuleCreateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternRuleCreateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternRuleCreateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternRuleInfo.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternRuleInfo.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternRuleInfo.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternRuleInfo.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternRuleUpdateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternRuleUpdateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternRuleUpdateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternRuleUpdateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternUpdateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternUpdateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternUpdateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PatternUpdateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PersonalAssistantsResponse.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PersonalAssistantsResponse.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PersonalAssistantsResponse.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PersonalAssistantsResponse.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PhraseListFeatureInfo.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PhraseListFeatureInfo.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PhraseListFeatureInfo.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PhraseListFeatureInfo.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PhraselistCreateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PhraselistCreateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PhraselistCreateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PhraselistCreateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PhraselistUpdateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PhraselistUpdateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PhraselistUpdateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PhraselistUpdateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomain.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomain.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomain.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomain.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomainCreateBaseObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomainCreateBaseObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomainCreateBaseObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomainCreateBaseObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomainCreateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomainCreateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomainCreateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomainCreateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomainItem.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomainItem.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomainItem.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomainItem.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomainModelCreateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomainModelCreateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomainModelCreateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomainModelCreateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomainObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomainObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomainObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltDomainObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltEntity.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltEntity.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltEntity.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltEntity.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltEntityExtractor.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltEntityExtractor.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltEntityExtractor.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PrebuiltEntityExtractor.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ProductionOrStagingEndpointInfo.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ProductionOrStagingEndpointInfo.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ProductionOrStagingEndpointInfo.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/ProductionOrStagingEndpointInfo.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PublishSettingUpdateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PublishSettingUpdateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PublishSettingUpdateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PublishSettingUpdateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PublishSettings.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PublishSettings.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PublishSettings.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/PublishSettings.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/RegexEntity.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/RegexEntity.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/RegexEntity.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/RegexEntity.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/RegexEntityExtractor.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/RegexEntityExtractor.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/RegexEntityExtractor.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/RegexEntityExtractor.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/RegexModelCreateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/RegexModelCreateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/RegexModelCreateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/RegexModelCreateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/RegexModelUpdateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/RegexModelUpdateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/RegexModelUpdateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/RegexModelUpdateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/SubClosedList.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/SubClosedList.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/SubClosedList.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/SubClosedList.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/SubClosedListResponse.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/SubClosedListResponse.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/SubClosedListResponse.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/SubClosedListResponse.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/TaskUpdateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/TaskUpdateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/TaskUpdateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/TaskUpdateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/TrainingStatus.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/TrainingStatus.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/TrainingStatus.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/TrainingStatus.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateClosedListEntityRoleOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateClosedListEntityRoleOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateClosedListEntityRoleOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateClosedListEntityRoleOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateCompositeEntityRoleOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateCompositeEntityRoleOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateCompositeEntityRoleOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateCompositeEntityRoleOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateCustomPrebuiltEntityRoleOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateCustomPrebuiltEntityRoleOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateCustomPrebuiltEntityRoleOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateCustomPrebuiltEntityRoleOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateEntityOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateEntityOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateEntityOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateEntityOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateEntityRoleOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateEntityRoleOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateEntityRoleOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateEntityRoleOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateExplicitListItemOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateExplicitListItemOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateExplicitListItemOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateExplicitListItemOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateHierarchicalEntityChildOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateHierarchicalEntityChildOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateHierarchicalEntityChildOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateHierarchicalEntityChildOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateHierarchicalEntityRoleOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateHierarchicalEntityRoleOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateHierarchicalEntityRoleOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateHierarchicalEntityRoleOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateIntentOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateIntentOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateIntentOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateIntentOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdatePatternAnyEntityRoleOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdatePatternAnyEntityRoleOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdatePatternAnyEntityRoleOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdatePatternAnyEntityRoleOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdatePermissionsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdatePermissionsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdatePermissionsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdatePermissionsOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdatePhraseListOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdatePhraseListOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdatePhraseListOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdatePhraseListOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdatePrebuiltEntityRoleOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdatePrebuiltEntityRoleOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdatePrebuiltEntityRoleOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdatePrebuiltEntityRoleOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateRegexEntityRoleOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateRegexEntityRoleOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateRegexEntityRoleOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateRegexEntityRoleOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateSettingsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateSettingsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateSettingsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateSettingsOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateVersionsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateVersionsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateVersionsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UpdateVersionsOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UserAccessList.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UserAccessList.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UserAccessList.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UserAccessList.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UserCollaborator.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UserCollaborator.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UserCollaborator.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/UserCollaborator.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/VersionInfo.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/VersionInfo.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/VersionInfo.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/VersionInfo.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/WordListBaseUpdateObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/WordListBaseUpdateObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/WordListBaseUpdateObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/WordListBaseUpdateObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/WordListObject.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/WordListObject.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/WordListObject.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/WordListObject.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/package-info.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/package-info.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/models/package-info.java diff --git a/cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/package-info.java b/sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/package-info.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-luis-authoring/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/authoring/package-info.java diff --git a/cognitiveservices/data-plane/language/luis/runtime/pom.xml b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/pom.xml old mode 100755 new mode 100644 similarity index 98% rename from cognitiveservices/data-plane/language/luis/runtime/pom.xml rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/pom.xml index e8beb81425867..5975fc3328ad7 --- a/cognitiveservices/data-plane/language/luis/runtime/pom.xml +++ b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/pom.xml @@ -9,7 +9,7 @@ com.microsoft.azure.cognitiveservices azure-cognitiveservices-parent 1.0.2 - ../../../pom.xml + ../pom.xml azure-cognitiveservices-luis-runtime 1.0.2-beta diff --git a/cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/EndpointAPI.java b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/EndpointAPI.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/EndpointAPI.java rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/EndpointAPI.java diff --git a/cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/LuisRuntimeAPI.java b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/LuisRuntimeAPI.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/LuisRuntimeAPI.java rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/LuisRuntimeAPI.java diff --git a/cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/LuisRuntimeManager.java b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/LuisRuntimeManager.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/LuisRuntimeManager.java rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/LuisRuntimeManager.java diff --git a/cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/Predictions.java b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/Predictions.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/Predictions.java rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/Predictions.java diff --git a/cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/implementation/LuisRuntimeAPIImpl.java b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/implementation/LuisRuntimeAPIImpl.java similarity index 100% rename from cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/implementation/LuisRuntimeAPIImpl.java rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/implementation/LuisRuntimeAPIImpl.java diff --git a/cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/implementation/PredictionsImpl.java b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/implementation/PredictionsImpl.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/implementation/PredictionsImpl.java rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/implementation/PredictionsImpl.java diff --git a/cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/implementation/package-info.java b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/implementation/package-info.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/implementation/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/implementation/package-info.java diff --git a/cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/APIError.java b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/APIError.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/APIError.java rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/APIError.java diff --git a/cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/APIErrorException.java b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/APIErrorException.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/APIErrorException.java rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/APIErrorException.java diff --git a/cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/AzureRegions.java b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/AzureRegions.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/AzureRegions.java rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/AzureRegions.java diff --git a/cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/CompositeChildModel.java b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/CompositeChildModel.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/CompositeChildModel.java rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/CompositeChildModel.java diff --git a/cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/CompositeEntityModel.java b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/CompositeEntityModel.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/CompositeEntityModel.java rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/CompositeEntityModel.java diff --git a/cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/EntityModel.java b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/EntityModel.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/EntityModel.java rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/EntityModel.java diff --git a/cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/EntityWithResolution.java b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/EntityWithResolution.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/EntityWithResolution.java rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/EntityWithResolution.java diff --git a/cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/EntityWithScore.java b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/EntityWithScore.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/EntityWithScore.java rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/EntityWithScore.java diff --git a/cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/IntentModel.java b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/IntentModel.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/IntentModel.java rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/IntentModel.java diff --git a/cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/LuisResult.java b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/LuisResult.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/LuisResult.java rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/LuisResult.java diff --git a/cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/ResolveOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/ResolveOptionalParameter.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/ResolveOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/ResolveOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/Sentiment.java b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/Sentiment.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/Sentiment.java rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/Sentiment.java diff --git a/cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/package-info.java b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/package-info.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/models/package-info.java diff --git a/cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/package-info.java b/sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/package-info.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/luis/runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-luis-runtime/src/main/java/com/microsoft/azure/cognitiveservices/language/luis/runtime/package-info.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/pom.xml b/sdk/cognitiveservices/ms-azure-cs-newssearch/pom.xml similarity index 98% rename from cognitiveservices/data-plane/search/bingnewssearch/pom.xml rename to sdk/cognitiveservices/ms-azure-cs-newssearch/pom.xml index 97d457b0420a1..e876be271200c 100644 --- a/cognitiveservices/data-plane/search/bingnewssearch/pom.xml +++ b/sdk/cognitiveservices/ms-azure-cs-newssearch/pom.xml @@ -9,7 +9,7 @@ com.microsoft.azure.cognitiveservices azure-cognitiveservices-parent 1.0.2 - ../../pom.xml + ../pom.xml azure-cognitiveservices-newssearch 1.0.2 diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/BingNews.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/BingNews.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/BingNews.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/BingNews.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/BingNewsSearchAPI.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/BingNewsSearchAPI.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/BingNewsSearchAPI.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/BingNewsSearchAPI.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/BingNewsSearchManager.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/BingNewsSearchManager.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/BingNewsSearchManager.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/BingNewsSearchManager.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/implementation/BingNewsImpl.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/implementation/BingNewsImpl.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/implementation/BingNewsImpl.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/implementation/BingNewsImpl.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/implementation/BingNewsSearchAPIImpl.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/implementation/BingNewsSearchAPIImpl.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/implementation/BingNewsSearchAPIImpl.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/implementation/BingNewsSearchAPIImpl.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/implementation/package-info.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/implementation/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/implementation/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/implementation/package-info.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Answer.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Answer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Answer.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Answer.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Article.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Article.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Article.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Article.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/CategoryOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/CategoryOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/CategoryOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/CategoryOptionalParameter.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/CreativeWork.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/CreativeWork.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/CreativeWork.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/CreativeWork.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Error.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Error.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Error.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Error.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ErrorCode.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ErrorCode.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ErrorCode.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ErrorCode.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ErrorResponse.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ErrorResponse.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ErrorResponse.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ErrorResponse.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ErrorResponseException.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ErrorResponseException.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ErrorResponseException.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ErrorResponseException.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ErrorSubCode.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ErrorSubCode.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ErrorSubCode.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ErrorSubCode.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Freshness.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Freshness.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Freshness.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Freshness.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Identifiable.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Identifiable.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Identifiable.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Identifiable.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ImageObject.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ImageObject.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ImageObject.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ImageObject.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/MediaObject.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/MediaObject.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/MediaObject.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/MediaObject.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/NewsArticle.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/NewsArticle.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/NewsArticle.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/NewsArticle.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/NewsModel.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/NewsModel.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/NewsModel.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/NewsModel.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/NewsTopic.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/NewsTopic.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/NewsTopic.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/NewsTopic.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Organization.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Organization.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Organization.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Organization.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Query.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Query.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Query.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Query.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Response.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Response.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Response.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Response.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ResponseBase.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ResponseBase.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ResponseBase.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/ResponseBase.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/SafeSearch.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/SafeSearch.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/SafeSearch.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/SafeSearch.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/SearchOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/SearchOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/SearchOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/SearchOptionalParameter.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/SearchResultsAnswer.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/SearchResultsAnswer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/SearchResultsAnswer.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/SearchResultsAnswer.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/TextFormat.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/TextFormat.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/TextFormat.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/TextFormat.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Thing.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Thing.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Thing.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/Thing.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/TrendingOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/TrendingOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/TrendingOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/TrendingOptionalParameter.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/TrendingTopics.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/TrendingTopics.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/TrendingTopics.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/TrendingTopics.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/VideoObject.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/VideoObject.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/VideoObject.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/VideoObject.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/package-info.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/models/package-info.java diff --git a/cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/package-info.java b/sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingnewssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-newssearch/src/main/java/com/microsoft/azure/cognitiveservices/search/newssearch/package-info.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/pom.xml b/sdk/cognitiveservices/ms-azure-cs-spellcheck/pom.xml old mode 100755 new mode 100644 similarity index 98% rename from cognitiveservices/data-plane/language/bingspellcheck/pom.xml rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/pom.xml index a175bc04e384f..3d2478a24caee --- a/cognitiveservices/data-plane/language/bingspellcheck/pom.xml +++ b/sdk/cognitiveservices/ms-azure-cs-spellcheck/pom.xml @@ -9,7 +9,7 @@ com.microsoft.azure.cognitiveservices azure-cognitiveservices-parent 1.0.2 - ../../pom.xml + ../pom.xml 1.0.2 azure-cognitiveservices-spellcheck diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/BingSpellCheckAPI.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/BingSpellCheckAPI.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/BingSpellCheckAPI.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/BingSpellCheckAPI.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/BingSpellCheckManager.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/BingSpellCheckManager.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/BingSpellCheckManager.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/BingSpellCheckManager.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/BingSpellCheckOperations.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/BingSpellCheckOperations.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/BingSpellCheckOperations.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/BingSpellCheckOperations.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/implementation/BingSpellCheckAPIImpl.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/implementation/BingSpellCheckAPIImpl.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/implementation/BingSpellCheckAPIImpl.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/implementation/BingSpellCheckAPIImpl.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/implementation/BingSpellCheckOperationsImpl.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/implementation/BingSpellCheckOperationsImpl.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/implementation/BingSpellCheckOperationsImpl.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/implementation/BingSpellCheckOperationsImpl.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/implementation/package-info.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/implementation/package-info.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/implementation/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/implementation/package-info.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ActionType.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ActionType.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ActionType.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ActionType.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/Answer.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/Answer.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/Answer.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/Answer.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/Error.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/Error.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/Error.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/Error.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ErrorCode.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ErrorCode.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ErrorCode.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ErrorCode.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ErrorResponse.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ErrorResponse.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ErrorResponse.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ErrorResponse.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ErrorResponseException.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ErrorResponseException.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ErrorResponseException.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ErrorResponseException.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ErrorSubCode.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ErrorSubCode.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ErrorSubCode.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ErrorSubCode.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ErrorType.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ErrorType.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ErrorType.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ErrorType.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/Identifiable.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/Identifiable.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/Identifiable.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/Identifiable.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/Response.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/Response.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/Response.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/Response.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ResponseBase.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ResponseBase.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ResponseBase.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/ResponseBase.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/SpellCheck.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/SpellCheck.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/SpellCheck.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/SpellCheck.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/SpellCheckerOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/SpellCheckerOptionalParameter.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/SpellCheckerOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/SpellCheckerOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/SpellingFlaggedToken.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/SpellingFlaggedToken.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/SpellingFlaggedToken.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/SpellingFlaggedToken.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/SpellingTokenSuggestion.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/SpellingTokenSuggestion.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/SpellingTokenSuggestion.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/SpellingTokenSuggestion.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/package-info.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/package-info.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/models/package-info.java diff --git a/cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/package-info.java b/sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/package-info.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/bingspellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-spellcheck/src/main/java/com/microsoft/azure/cognitiveservices/language/spellcheck/package-info.java diff --git a/cognitiveservices/data-plane/language/textanalytics/pom.xml b/sdk/cognitiveservices/ms-azure-cs-textanalytics/pom.xml old mode 100755 new mode 100644 similarity index 98% rename from cognitiveservices/data-plane/language/textanalytics/pom.xml rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/pom.xml index 3f19296b15696..ed884313899dc --- a/cognitiveservices/data-plane/language/textanalytics/pom.xml +++ b/sdk/cognitiveservices/ms-azure-cs-textanalytics/pom.xml @@ -11,7 +11,7 @@ com.microsoft.azure.cognitiveservices azure-cognitiveservices-parent 1.0.2 - ../../pom.xml + ../pom.xml azure-cognitiveservices-textanalytics 1.0.2-beta diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/TextAnalytics.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/TextAnalytics.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/TextAnalytics.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/TextAnalytics.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/TextAnalyticsAPI.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/TextAnalyticsAPI.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/TextAnalyticsAPI.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/TextAnalyticsAPI.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/TextAnalyticsManager.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/TextAnalyticsManager.java similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/TextAnalyticsManager.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/TextAnalyticsManager.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/implementation/TextAnalyticsAPIImpl.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/implementation/TextAnalyticsAPIImpl.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/implementation/TextAnalyticsAPIImpl.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/implementation/TextAnalyticsAPIImpl.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/implementation/TextAnalyticsImpl.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/implementation/TextAnalyticsImpl.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/implementation/TextAnalyticsImpl.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/implementation/TextAnalyticsImpl.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/implementation/package-info.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/implementation/package-info.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/implementation/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/implementation/package-info.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/AzureRegions.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/AzureRegions.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/AzureRegions.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/AzureRegions.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/BatchInput.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/BatchInput.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/BatchInput.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/BatchInput.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/DetectLanguageOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/DetectLanguageOptionalParameter.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/DetectLanguageOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/DetectLanguageOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/DetectedLanguage.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/DetectedLanguage.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/DetectedLanguage.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/DetectedLanguage.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/EntitiesBatchResult.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/EntitiesBatchResult.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/EntitiesBatchResult.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/EntitiesBatchResult.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/EntitiesBatchResultItem.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/EntitiesBatchResultItem.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/EntitiesBatchResultItem.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/EntitiesBatchResultItem.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/EntitiesOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/EntitiesOptionalParameter.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/EntitiesOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/EntitiesOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/EntityRecord.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/EntityRecord.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/EntityRecord.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/EntityRecord.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/ErrorRecord.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/ErrorRecord.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/ErrorRecord.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/ErrorRecord.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/ErrorResponse.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/ErrorResponse.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/ErrorResponse.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/ErrorResponse.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/ErrorResponseException.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/ErrorResponseException.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/ErrorResponseException.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/ErrorResponseException.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/Input.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/Input.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/Input.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/Input.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/InternalError.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/InternalError.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/InternalError.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/InternalError.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/KeyPhraseBatchResult.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/KeyPhraseBatchResult.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/KeyPhraseBatchResult.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/KeyPhraseBatchResult.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/KeyPhraseBatchResultItem.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/KeyPhraseBatchResultItem.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/KeyPhraseBatchResultItem.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/KeyPhraseBatchResultItem.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/KeyPhrasesOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/KeyPhrasesOptionalParameter.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/KeyPhrasesOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/KeyPhrasesOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/LanguageBatchResult.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/LanguageBatchResult.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/LanguageBatchResult.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/LanguageBatchResult.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/LanguageBatchResultItem.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/LanguageBatchResultItem.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/LanguageBatchResultItem.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/LanguageBatchResultItem.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/MatchRecord.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/MatchRecord.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/MatchRecord.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/MatchRecord.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/MultiLanguageBatchInput.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/MultiLanguageBatchInput.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/MultiLanguageBatchInput.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/MultiLanguageBatchInput.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/MultiLanguageInput.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/MultiLanguageInput.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/MultiLanguageInput.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/MultiLanguageInput.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/SentimentBatchResult.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/SentimentBatchResult.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/SentimentBatchResult.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/SentimentBatchResult.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/SentimentBatchResultItem.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/SentimentBatchResultItem.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/SentimentBatchResultItem.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/SentimentBatchResultItem.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/SentimentOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/SentimentOptionalParameter.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/SentimentOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/SentimentOptionalParameter.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/package-info.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/package-info.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/models/package-info.java diff --git a/cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/package-info.java b/sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/package-info.java old mode 100755 new mode 100644 similarity index 100% rename from cognitiveservices/data-plane/language/textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-textanalytics/src/main/java/com/microsoft/azure/cognitiveservices/language/textanalytics/package-info.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/pom.xml b/sdk/cognitiveservices/ms-azure-cs-videosearch/pom.xml similarity index 98% rename from cognitiveservices/data-plane/search/bingvideosearch/pom.xml rename to sdk/cognitiveservices/ms-azure-cs-videosearch/pom.xml index 149edcf09ea97..665291eabc7f1 100644 --- a/cognitiveservices/data-plane/search/bingvideosearch/pom.xml +++ b/sdk/cognitiveservices/ms-azure-cs-videosearch/pom.xml @@ -9,7 +9,7 @@ com.microsoft.azure.cognitiveservices azure-cognitiveservices-parent 1.0.2 - ../../pom.xml + ../pom.xml azure-cognitiveservices-videosearch 1.0.2 diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/BingVideoSearchAPI.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/BingVideoSearchAPI.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/BingVideoSearchAPI.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/BingVideoSearchAPI.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/BingVideoSearchManager.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/BingVideoSearchManager.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/BingVideoSearchManager.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/BingVideoSearchManager.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/BingVideos.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/BingVideos.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/BingVideos.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/BingVideos.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/implementation/BingVideoSearchAPIImpl.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/implementation/BingVideoSearchAPIImpl.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/implementation/BingVideoSearchAPIImpl.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/implementation/BingVideoSearchAPIImpl.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/implementation/BingVideosImpl.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/implementation/BingVideosImpl.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/implementation/BingVideosImpl.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/implementation/BingVideosImpl.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/implementation/package-info.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/implementation/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/implementation/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/implementation/package-info.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Answer.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Answer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Answer.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Answer.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/CreativeWork.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/CreativeWork.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/CreativeWork.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/CreativeWork.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/DetailsOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/DetailsOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/DetailsOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/DetailsOptionalParameter.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Error.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Error.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Error.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Error.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ErrorCode.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ErrorCode.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ErrorCode.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ErrorCode.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ErrorResponse.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ErrorResponse.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ErrorResponse.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ErrorResponse.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ErrorResponseException.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ErrorResponseException.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ErrorResponseException.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ErrorResponseException.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ErrorSubCode.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ErrorSubCode.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ErrorSubCode.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ErrorSubCode.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Freshness.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Freshness.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Freshness.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Freshness.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Identifiable.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Identifiable.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Identifiable.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Identifiable.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ImageObject.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ImageObject.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ImageObject.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ImageObject.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/MediaObject.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/MediaObject.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/MediaObject.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/MediaObject.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/PivotSuggestions.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/PivotSuggestions.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/PivotSuggestions.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/PivotSuggestions.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Query.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Query.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Query.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Query.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/QueryContext.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/QueryContext.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/QueryContext.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/QueryContext.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Response.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Response.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Response.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Response.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ResponseBase.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ResponseBase.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ResponseBase.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/ResponseBase.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/SafeSearch.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/SafeSearch.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/SafeSearch.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/SafeSearch.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/SearchOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/SearchOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/SearchOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/SearchOptionalParameter.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/SearchResultsAnswer.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/SearchResultsAnswer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/SearchResultsAnswer.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/SearchResultsAnswer.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TextFormat.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TextFormat.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TextFormat.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TextFormat.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Thing.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Thing.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Thing.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/Thing.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TrendingOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TrendingOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TrendingOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TrendingOptionalParameter.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TrendingVideos.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TrendingVideos.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TrendingVideos.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TrendingVideos.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TrendingVideosCategory.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TrendingVideosCategory.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TrendingVideosCategory.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TrendingVideosCategory.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TrendingVideosSubcategory.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TrendingVideosSubcategory.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TrendingVideosSubcategory.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TrendingVideosSubcategory.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TrendingVideosTile.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TrendingVideosTile.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TrendingVideosTile.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/TrendingVideosTile.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoDetails.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoDetails.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoDetails.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoDetails.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoInsightModule.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoInsightModule.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoInsightModule.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoInsightModule.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoLength.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoLength.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoLength.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoLength.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoObject.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoObject.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoObject.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoObject.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoPricing.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoPricing.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoPricing.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoPricing.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoQueryScenario.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoQueryScenario.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoQueryScenario.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoQueryScenario.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoResolution.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoResolution.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoResolution.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideoResolution.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideosModel.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideosModel.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideosModel.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideosModel.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideosModule.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideosModule.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideosModule.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/VideosModule.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/package-info.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/models/package-info.java diff --git a/cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/package-info.java b/sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvideosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-videosearch/src/main/java/com/microsoft/azure/cognitiveservices/search/videosearch/package-info.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/pom.xml b/sdk/cognitiveservices/ms-azure-cs-visualsearch/pom.xml similarity index 98% rename from cognitiveservices/data-plane/search/bingvisualsearch/pom.xml rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/pom.xml index bf6ba0b59e493..1ba947143e7ab 100644 --- a/cognitiveservices/data-plane/search/bingvisualsearch/pom.xml +++ b/sdk/cognitiveservices/ms-azure-cs-visualsearch/pom.xml @@ -9,7 +9,7 @@ com.microsoft.azure.cognitiveservices azure-cognitiveservices-parent 1.0.2 - ../../pom.xml + ../pom.xml azure-cognitiveservices-visualsearch 1.0.2-beta diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/BingImages.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/BingImages.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/BingImages.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/BingImages.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/BingVisualSearchAPI.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/BingVisualSearchAPI.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/BingVisualSearchAPI.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/BingVisualSearchAPI.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/BingVisualSearchManager.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/BingVisualSearchManager.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/BingVisualSearchManager.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/BingVisualSearchManager.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/implementation/BingImagesImpl.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/implementation/BingImagesImpl.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/implementation/BingImagesImpl.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/implementation/BingImagesImpl.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/implementation/BingVisualSearchAPIImpl.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/implementation/BingVisualSearchAPIImpl.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/implementation/BingVisualSearchAPIImpl.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/implementation/BingVisualSearchAPIImpl.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/implementation/package-info.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/implementation/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/implementation/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/implementation/package-info.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Action.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Action.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Action.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Action.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/AggregateOffer.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/AggregateOffer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/AggregateOffer.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/AggregateOffer.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/AggregateRating.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/AggregateRating.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/AggregateRating.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/AggregateRating.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/CreativeWork.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/CreativeWork.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/CreativeWork.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/CreativeWork.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/CropArea.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/CropArea.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/CropArea.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/CropArea.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Currency.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Currency.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Currency.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Currency.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Error.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Error.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Error.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Error.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ErrorCode.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ErrorCode.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ErrorCode.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ErrorCode.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ErrorResponse.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ErrorResponse.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ErrorResponse.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ErrorResponse.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ErrorResponseException.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ErrorResponseException.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ErrorResponseException.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ErrorResponseException.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ErrorSubCode.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ErrorSubCode.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ErrorSubCode.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ErrorSubCode.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Filters.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Filters.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Filters.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Filters.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Identifiable.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Identifiable.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Identifiable.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Identifiable.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageAction.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageAction.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageAction.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageAction.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageEntityAction.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageEntityAction.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageEntityAction.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageEntityAction.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageInfo.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageInfo.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageInfo.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageInfo.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageKnowledge.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageKnowledge.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageKnowledge.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageKnowledge.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageModuleAction.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageModuleAction.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageModuleAction.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageModuleAction.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageObject.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageObject.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageObject.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageObject.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageRecipesAction.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageRecipesAction.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageRecipesAction.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageRecipesAction.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageRelatedSearchesAction.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageRelatedSearchesAction.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageRelatedSearchesAction.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageRelatedSearchesAction.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageShoppingSourcesAction.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageShoppingSourcesAction.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageShoppingSourcesAction.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageShoppingSourcesAction.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageTag.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageTag.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageTag.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageTag.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageTagRegion.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageTagRegion.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageTagRegion.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImageTagRegion.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImagesImageMetadata.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImagesImageMetadata.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImagesImageMetadata.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImagesImageMetadata.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImagesModule.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImagesModule.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImagesModule.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ImagesModule.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Intangible.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Intangible.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Intangible.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Intangible.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ItemAvailability.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ItemAvailability.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ItemAvailability.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ItemAvailability.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/KnowledgeRequest.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/KnowledgeRequest.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/KnowledgeRequest.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/KnowledgeRequest.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/MediaObject.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/MediaObject.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/MediaObject.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/MediaObject.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/NormalizedQuadrilateral.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/NormalizedQuadrilateral.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/NormalizedQuadrilateral.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/NormalizedQuadrilateral.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Offer.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Offer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Offer.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Offer.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Organization.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Organization.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Organization.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Organization.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Person.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Person.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Person.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Person.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Point2D.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Point2D.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Point2D.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Point2D.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/PropertiesItem.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/PropertiesItem.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/PropertiesItem.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/PropertiesItem.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Query.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Query.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Query.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Query.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Rating.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Rating.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Rating.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Rating.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Recipe.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Recipe.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Recipe.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Recipe.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/RecipesModule.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/RecipesModule.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/RecipesModule.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/RecipesModule.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/RelatedSearchesModule.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/RelatedSearchesModule.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/RelatedSearchesModule.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/RelatedSearchesModule.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Response.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Response.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Response.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Response.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ResponseBase.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ResponseBase.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ResponseBase.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/ResponseBase.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/StructuredValue.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/StructuredValue.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/StructuredValue.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/StructuredValue.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Thing.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Thing.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Thing.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/Thing.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/VisualSearchOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/VisualSearchOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/VisualSearchOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/VisualSearchOptionalParameter.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/VisualSearchRequest.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/VisualSearchRequest.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/VisualSearchRequest.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/VisualSearchRequest.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/package-info.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/models/package-info.java diff --git a/cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/package-info.java b/sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingvisualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-visualsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/visualsearch/package-info.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/pom.xml b/sdk/cognitiveservices/ms-azure-cs-websearch/pom.xml similarity index 98% rename from cognitiveservices/data-plane/search/bingwebsearch/pom.xml rename to sdk/cognitiveservices/ms-azure-cs-websearch/pom.xml index 82b74ceb2e931..1fad4bab508b9 100644 --- a/cognitiveservices/data-plane/search/bingwebsearch/pom.xml +++ b/sdk/cognitiveservices/ms-azure-cs-websearch/pom.xml @@ -9,7 +9,7 @@ com.microsoft.azure.cognitiveservices azure-cognitiveservices-parent 1.0.2 - ../../pom.xml + ../pom.xml azure-cognitiveservices-websearch 1.0.2 diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/BingWebSearch.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/BingWebSearch.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/BingWebSearch.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/BingWebSearch.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/BingWebSearchAPI.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/BingWebSearchAPI.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/BingWebSearchAPI.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/BingWebSearchAPI.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/BingWebSearchManager.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/BingWebSearchManager.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/BingWebSearchManager.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/BingWebSearchManager.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/implementation/BingWebSearchAPIImpl.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/implementation/BingWebSearchAPIImpl.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/implementation/BingWebSearchAPIImpl.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/implementation/BingWebSearchAPIImpl.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/implementation/BingWebSearchImpl.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/implementation/BingWebSearchImpl.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/implementation/BingWebSearchImpl.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/implementation/BingWebSearchImpl.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/implementation/package-info.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/implementation/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/implementation/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/implementation/package-info.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Answer.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Answer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Answer.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Answer.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/AnswerType.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/AnswerType.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/AnswerType.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/AnswerType.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Article.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Article.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Article.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Article.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Computation.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Computation.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Computation.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Computation.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/CreativeWork.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/CreativeWork.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/CreativeWork.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/CreativeWork.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Error.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Error.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Error.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Error.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ErrorCode.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ErrorCode.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ErrorCode.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ErrorCode.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ErrorResponse.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ErrorResponse.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ErrorResponse.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ErrorResponse.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ErrorResponseException.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ErrorResponseException.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ErrorResponseException.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ErrorResponseException.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ErrorSubCode.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ErrorSubCode.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ErrorSubCode.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ErrorSubCode.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Freshness.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Freshness.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Freshness.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Freshness.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Identifiable.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Identifiable.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Identifiable.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Identifiable.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ImageObject.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ImageObject.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ImageObject.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ImageObject.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Images.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Images.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Images.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Images.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Intangible.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Intangible.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Intangible.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Intangible.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/MediaObject.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/MediaObject.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/MediaObject.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/MediaObject.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/News.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/News.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/News.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/News.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/NewsArticle.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/NewsArticle.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/NewsArticle.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/NewsArticle.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Places.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Places.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Places.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Places.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Query.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Query.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Query.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Query.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/QueryContext.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/QueryContext.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/QueryContext.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/QueryContext.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/RankingRankingGroup.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/RankingRankingGroup.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/RankingRankingGroup.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/RankingRankingGroup.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/RankingRankingItem.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/RankingRankingItem.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/RankingRankingItem.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/RankingRankingItem.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/RankingRankingResponse.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/RankingRankingResponse.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/RankingRankingResponse.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/RankingRankingResponse.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/RelatedSearchesRelatedSearchAnswer.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/RelatedSearchesRelatedSearchAnswer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/RelatedSearchesRelatedSearchAnswer.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/RelatedSearchesRelatedSearchAnswer.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Response.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Response.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Response.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Response.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ResponseBase.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ResponseBase.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ResponseBase.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/ResponseBase.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/SafeSearch.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/SafeSearch.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/SafeSearch.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/SafeSearch.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/SearchOptionalParameter.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/SearchOptionalParameter.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/SearchOptionalParameter.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/SearchOptionalParameter.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/SearchResponse.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/SearchResponse.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/SearchResponse.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/SearchResponse.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/SearchResultsAnswer.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/SearchResultsAnswer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/SearchResultsAnswer.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/SearchResultsAnswer.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/SpellSuggestions.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/SpellSuggestions.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/SpellSuggestions.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/SpellSuggestions.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/StructuredValue.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/StructuredValue.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/StructuredValue.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/StructuredValue.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/TextFormat.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/TextFormat.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/TextFormat.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/TextFormat.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Thing.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Thing.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Thing.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Thing.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/TimeZone.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/TimeZone.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/TimeZone.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/TimeZone.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/TimeZoneTimeZoneInformation.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/TimeZoneTimeZoneInformation.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/TimeZoneTimeZoneInformation.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/TimeZoneTimeZoneInformation.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/VideoObject.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/VideoObject.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/VideoObject.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/VideoObject.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Videos.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Videos.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Videos.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/Videos.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/WebMetaTag.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/WebMetaTag.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/WebMetaTag.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/WebMetaTag.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/WebPage.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/WebPage.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/WebPage.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/WebPage.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/WebWebAnswer.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/WebWebAnswer.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/WebWebAnswer.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/WebWebAnswer.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/WebWebGrouping.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/WebWebGrouping.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/WebWebGrouping.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/WebWebGrouping.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/package-info.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/models/package-info.java diff --git a/cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/package-info.java b/sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/package-info.java similarity index 100% rename from cognitiveservices/data-plane/search/bingwebsearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/package-info.java rename to sdk/cognitiveservices/ms-azure-cs-websearch/src/main/java/com/microsoft/azure/cognitiveservices/search/websearch/package-info.java diff --git a/sdk/cognitiveservices/pom.service.xml b/sdk/cognitiveservices/pom.service.xml index b05ef1bce0107..a65c5aa0bb467 100644 --- a/sdk/cognitiveservices/pom.service.xml +++ b/sdk/cognitiveservices/pom.service.xml @@ -9,6 +9,23 @@ pom 1.0.0 - + ms-azure-cs-spellcheck + ms-azure-cs-textanalytics + ms-azure-cs-luis-runtime + ms-azure-cs-luis-authoring + ms-azure-cs-autosuggest + ms-azure-cs-customimagesearch + ms-azure-cs-customsearch + ms-azure-cs-entitysearch + ms-azure-cs-imagesearch + ms-azure-cs-newssearch + ms-azure-cs-videosearch + ms-azure-cs-visualsearch + ms-azure-cs-websearch + ms-azure-cs-computervision + ms-azure-cs-contentmoderator + ms-azure-cs-customvision-prediction + ms-azure-cs-customvision-training + ms-azure-cs-faceapi diff --git a/cognitiveservices/data-plane/pom.xml b/sdk/cognitiveservices/pom.xml old mode 100755 new mode 100644 similarity index 92% rename from cognitiveservices/data-plane/pom.xml rename to sdk/cognitiveservices/pom.xml index 2e2e36b91d2c9..b563ba399f475 --- a/cognitiveservices/data-plane/pom.xml +++ b/sdk/cognitiveservices/pom.xml @@ -296,23 +296,23 @@ - ./language/bingspellcheck - ./language/luis/authoring - ./language/luis/runtime - ./language/textanalytics - ./search/bingautosuggest - ./search/bingcustomimagesearch - ./search/bingcustomsearch - ./search/bingentitysearch - ./search/bingimagesearch - ./search/bingnewssearch - ./search/bingvideosearch - ./search/bingvisualsearch - ./search/bingwebsearch - ./vision/computervision - ./vision/contentmoderator - ./vision/customvision/prediction - ./vision/customvision/training - ./vision/faceapi + ms-azure-cs-spellcheck + ms-azure-cs-luis-authoring + ms-azure-cs-luis-runtime + ms-azure-cs-textanalytics + ms-azure-cs-autosuggest + ms-azure-cs-customimagesearch + ms-azure-cs-customsearch + ms-azure-cs-entitysearch + ms-azure-cs-imagesearch + ms-azure-cs-newssearch + ms-azure-cs-videosearch + ms-azure-cs-visualsearch + ms-azure-cs-websearch + ms-azure-cs-computervision + ms-azure-cs-contentmoderator + ms-azure-cs-customvision-prediction + ms-azure-cs-customvision-training + ms-azure-cs-faceapi \ No newline at end of file diff --git a/core/.gitignore b/sdk/core/.gitignore similarity index 100% rename from core/.gitignore rename to sdk/core/.gitignore diff --git a/core/README.md b/sdk/core/README.md similarity index 100% rename from core/README.md rename to sdk/core/README.md diff --git a/core/azure-core-amqp/README.md b/sdk/core/azure-core-amqp/README.md similarity index 100% rename from core/azure-core-amqp/README.md rename to sdk/core/azure-core-amqp/README.md diff --git a/core/azure-core-amqp/pom.xml b/sdk/core/azure-core-amqp/pom.xml similarity index 100% rename from core/azure-core-amqp/pom.xml rename to sdk/core/azure-core-amqp/pom.xml diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpConnection.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpConnection.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpConnection.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpConnection.java diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpEndpointState.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpEndpointState.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpEndpointState.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpEndpointState.java diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpExceptionHandler.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpExceptionHandler.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpExceptionHandler.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpExceptionHandler.java diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpLink.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpLink.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpLink.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpLink.java diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpSession.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpSession.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpSession.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpSession.java diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpShutdownSignal.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpShutdownSignal.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpShutdownSignal.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/AmqpShutdownSignal.java diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/CBSNode.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/CBSNode.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/CBSNode.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/CBSNode.java diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/EndpointStateNotifier.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/EndpointStateNotifier.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/EndpointStateNotifier.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/EndpointStateNotifier.java diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/ExponentialRetry.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/ExponentialRetry.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/ExponentialRetry.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/ExponentialRetry.java diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/MessageConstant.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/MessageConstant.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/MessageConstant.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/MessageConstant.java diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/Retry.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/Retry.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/Retry.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/Retry.java diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/TransportType.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/TransportType.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/TransportType.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/TransportType.java diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/AmqpException.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/AmqpException.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/AmqpException.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/AmqpException.java diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/AmqpResponseCode.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/AmqpResponseCode.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/AmqpResponseCode.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/AmqpResponseCode.java diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/ErrorCondition.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/ErrorCondition.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/ErrorCondition.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/ErrorCondition.java diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/ErrorContext.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/ErrorContext.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/ErrorContext.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/ErrorContext.java diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/ExceptionUtil.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/ExceptionUtil.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/ExceptionUtil.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/ExceptionUtil.java diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/LinkErrorContext.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/LinkErrorContext.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/LinkErrorContext.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/LinkErrorContext.java diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/OperationCancelledException.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/OperationCancelledException.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/OperationCancelledException.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/OperationCancelledException.java diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/SessionErrorContext.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/SessionErrorContext.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/SessionErrorContext.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/SessionErrorContext.java diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/package-info.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/package-info.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/package-info.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/exception/package-info.java diff --git a/core/azure-core-amqp/src/main/java/com/azure/core/amqp/package-info.java b/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/package-info.java similarity index 100% rename from core/azure-core-amqp/src/main/java/com/azure/core/amqp/package-info.java rename to sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/package-info.java diff --git a/core/azure-core-amqp/src/test/java/com/azure/core/amqp/AmqpShutdownSignalTest.java b/sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/AmqpShutdownSignalTest.java similarity index 100% rename from core/azure-core-amqp/src/test/java/com/azure/core/amqp/AmqpShutdownSignalTest.java rename to sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/AmqpShutdownSignalTest.java diff --git a/core/azure-core-amqp/src/test/java/com/azure/core/amqp/ExponentialRetryTest.java b/sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/ExponentialRetryTest.java similarity index 100% rename from core/azure-core-amqp/src/test/java/com/azure/core/amqp/ExponentialRetryTest.java rename to sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/ExponentialRetryTest.java diff --git a/core/azure-core-amqp/src/test/java/com/azure/core/amqp/MessageConstantTest.java b/sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/MessageConstantTest.java similarity index 100% rename from core/azure-core-amqp/src/test/java/com/azure/core/amqp/MessageConstantTest.java rename to sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/MessageConstantTest.java diff --git a/core/azure-core-amqp/src/test/java/com/azure/core/amqp/RetryTest.java b/sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/RetryTest.java similarity index 100% rename from core/azure-core-amqp/src/test/java/com/azure/core/amqp/RetryTest.java rename to sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/RetryTest.java diff --git a/core/azure-core-amqp/src/test/java/com/azure/core/amqp/TransportTypeTest.java b/sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/TransportTypeTest.java similarity index 100% rename from core/azure-core-amqp/src/test/java/com/azure/core/amqp/TransportTypeTest.java rename to sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/TransportTypeTest.java diff --git a/core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/AmqpExceptionTest.java b/sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/AmqpExceptionTest.java similarity index 100% rename from core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/AmqpExceptionTest.java rename to sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/AmqpExceptionTest.java diff --git a/core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/AmqpResponseCodeTest.java b/sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/AmqpResponseCodeTest.java similarity index 100% rename from core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/AmqpResponseCodeTest.java rename to sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/AmqpResponseCodeTest.java diff --git a/core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/ErrorContextTest.java b/sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/ErrorContextTest.java similarity index 100% rename from core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/ErrorContextTest.java rename to sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/ErrorContextTest.java diff --git a/core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/ExceptionUtilTest.java b/sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/ExceptionUtilTest.java similarity index 100% rename from core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/ExceptionUtilTest.java rename to sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/ExceptionUtilTest.java diff --git a/core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/LinkErrorContextTest.java b/sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/LinkErrorContextTest.java similarity index 100% rename from core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/LinkErrorContextTest.java rename to sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/LinkErrorContextTest.java diff --git a/core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/OperationCancelledExceptionTest.java b/sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/OperationCancelledExceptionTest.java similarity index 100% rename from core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/OperationCancelledExceptionTest.java rename to sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/OperationCancelledExceptionTest.java diff --git a/core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/SessionErrorContextTest.java b/sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/SessionErrorContextTest.java similarity index 100% rename from core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/SessionErrorContextTest.java rename to sdk/core/azure-core-amqp/src/test/java/com/azure/core/amqp/exception/SessionErrorContextTest.java diff --git a/core/azure-core-management/pom.xml b/sdk/core/azure-core-management/pom.xml similarity index 100% rename from core/azure-core-management/pom.xml rename to sdk/core/azure-core-management/pom.xml diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/AsyncOperationResource.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/AsyncOperationResource.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/AsyncOperationResource.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/AsyncOperationResource.java diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/CloudError.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/CloudError.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/CloudError.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/CloudError.java diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/CloudException.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/CloudException.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/CloudException.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/CloudException.java diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/OperationState.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/OperationState.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/OperationState.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/OperationState.java diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/Page.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/Page.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/Page.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/Page.java diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/PagedList.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/PagedList.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/PagedList.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/PagedList.java diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/Resource.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/Resource.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/Resource.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/Resource.java diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/SubResource.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/SubResource.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/SubResource.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/SubResource.java diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/annotations/AzureHost.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/annotations/AzureHost.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/annotations/AzureHost.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/annotations/AzureHost.java diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/annotations/package-info.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/annotations/package-info.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/annotations/package-info.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/annotations/package-info.java diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/implementation/AzureAsyncOperationPollStrategy.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/implementation/AzureAsyncOperationPollStrategy.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/implementation/AzureAsyncOperationPollStrategy.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/implementation/AzureAsyncOperationPollStrategy.java diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/implementation/AzureProxy.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/implementation/AzureProxy.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/implementation/AzureProxy.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/implementation/AzureProxy.java diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/implementation/CompletedPollStrategy.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/implementation/CompletedPollStrategy.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/implementation/CompletedPollStrategy.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/implementation/CompletedPollStrategy.java diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/implementation/LocationPollStrategy.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/implementation/LocationPollStrategy.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/implementation/LocationPollStrategy.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/implementation/LocationPollStrategy.java diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/implementation/OperationStatus.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/implementation/OperationStatus.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/implementation/OperationStatus.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/implementation/OperationStatus.java diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/implementation/PollStrategy.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/implementation/PollStrategy.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/implementation/PollStrategy.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/implementation/PollStrategy.java diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/implementation/ProvisioningStatePollStrategy.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/implementation/ProvisioningStatePollStrategy.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/implementation/ProvisioningStatePollStrategy.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/implementation/ProvisioningStatePollStrategy.java diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/implementation/ResourceWithProvisioningState.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/implementation/ResourceWithProvisioningState.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/implementation/ResourceWithProvisioningState.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/implementation/ResourceWithProvisioningState.java diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/package-info.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/package-info.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/package-info.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/package-info.java diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/serializer/AzureJacksonAdapter.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/serializer/AzureJacksonAdapter.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/serializer/AzureJacksonAdapter.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/serializer/AzureJacksonAdapter.java diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/serializer/CloudErrorDeserializer.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/serializer/CloudErrorDeserializer.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/serializer/CloudErrorDeserializer.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/serializer/CloudErrorDeserializer.java diff --git a/core/azure-core-management/src/main/java/com/azure/core/management/serializer/package-info.java b/sdk/core/azure-core-management/src/main/java/com/azure/core/management/serializer/package-info.java similarity index 100% rename from core/azure-core-management/src/main/java/com/azure/core/management/serializer/package-info.java rename to sdk/core/azure-core-management/src/main/java/com/azure/core/management/serializer/package-info.java diff --git a/core/azure-core-management/src/test/java/com/azure/core/management/AzureProxyToRestProxyTests.java b/sdk/core/azure-core-management/src/test/java/com/azure/core/management/AzureProxyToRestProxyTests.java similarity index 100% rename from core/azure-core-management/src/test/java/com/azure/core/management/AzureProxyToRestProxyTests.java rename to sdk/core/azure-core-management/src/test/java/com/azure/core/management/AzureProxyToRestProxyTests.java diff --git a/core/azure-core-management/src/test/java/com/azure/core/management/AzureProxyToRestProxyWithMockTests.java b/sdk/core/azure-core-management/src/test/java/com/azure/core/management/AzureProxyToRestProxyWithMockTests.java similarity index 100% rename from core/azure-core-management/src/test/java/com/azure/core/management/AzureProxyToRestProxyWithMockTests.java rename to sdk/core/azure-core-management/src/test/java/com/azure/core/management/AzureProxyToRestProxyWithMockTests.java diff --git a/core/azure-core-management/src/test/java/com/azure/core/management/AzureProxyToRestProxyWithNettyTests.java b/sdk/core/azure-core-management/src/test/java/com/azure/core/management/AzureProxyToRestProxyWithNettyTests.java similarity index 100% rename from core/azure-core-management/src/test/java/com/azure/core/management/AzureProxyToRestProxyWithNettyTests.java rename to sdk/core/azure-core-management/src/test/java/com/azure/core/management/AzureProxyToRestProxyWithNettyTests.java diff --git a/core/azure-core-management/src/test/java/com/azure/core/management/AzureTests.java b/sdk/core/azure-core-management/src/test/java/com/azure/core/management/AzureTests.java similarity index 100% rename from core/azure-core-management/src/test/java/com/azure/core/management/AzureTests.java rename to sdk/core/azure-core-management/src/test/java/com/azure/core/management/AzureTests.java diff --git a/core/azure-core-management/src/test/java/com/azure/core/management/HttpBinJSON.java b/sdk/core/azure-core-management/src/test/java/com/azure/core/management/HttpBinJSON.java similarity index 100% rename from core/azure-core-management/src/test/java/com/azure/core/management/HttpBinJSON.java rename to sdk/core/azure-core-management/src/test/java/com/azure/core/management/HttpBinJSON.java diff --git a/core/azure-core-management/src/test/java/com/azure/core/management/MockResource.java b/sdk/core/azure-core-management/src/test/java/com/azure/core/management/MockResource.java similarity index 100% rename from core/azure-core-management/src/test/java/com/azure/core/management/MockResource.java rename to sdk/core/azure-core-management/src/test/java/com/azure/core/management/MockResource.java diff --git a/core/azure-core-management/src/test/java/com/azure/core/management/MyAzureException.java b/sdk/core/azure-core-management/src/test/java/com/azure/core/management/MyAzureException.java similarity index 100% rename from core/azure-core-management/src/test/java/com/azure/core/management/MyAzureException.java rename to sdk/core/azure-core-management/src/test/java/com/azure/core/management/MyAzureException.java diff --git a/core/azure-core-management/src/test/java/com/azure/core/management/PagedListTests.java b/sdk/core/azure-core-management/src/test/java/com/azure/core/management/PagedListTests.java similarity index 100% rename from core/azure-core-management/src/test/java/com/azure/core/management/PagedListTests.java rename to sdk/core/azure-core-management/src/test/java/com/azure/core/management/PagedListTests.java diff --git a/core/azure-core-management/src/test/java/com/azure/core/management/http/MockAzureHttpClient.java b/sdk/core/azure-core-management/src/test/java/com/azure/core/management/http/MockAzureHttpClient.java similarity index 100% rename from core/azure-core-management/src/test/java/com/azure/core/management/http/MockAzureHttpClient.java rename to sdk/core/azure-core-management/src/test/java/com/azure/core/management/http/MockAzureHttpClient.java diff --git a/core/azure-core-management/src/test/java/com/azure/core/management/implementation/AzureProxyTests.java b/sdk/core/azure-core-management/src/test/java/com/azure/core/management/implementation/AzureProxyTests.java similarity index 100% rename from core/azure-core-management/src/test/java/com/azure/core/management/implementation/AzureProxyTests.java rename to sdk/core/azure-core-management/src/test/java/com/azure/core/management/implementation/AzureProxyTests.java diff --git a/core/azure-core-management/src/test/java/com/azure/core/management/implementation/Value.java b/sdk/core/azure-core-management/src/test/java/com/azure/core/management/implementation/Value.java similarity index 100% rename from core/azure-core-management/src/test/java/com/azure/core/management/implementation/Value.java rename to sdk/core/azure-core-management/src/test/java/com/azure/core/management/implementation/Value.java diff --git a/core/azure-core-management/src/test/java/com/azure/core/management/implementation/ValueTests.java b/sdk/core/azure-core-management/src/test/java/com/azure/core/management/implementation/ValueTests.java similarity index 100% rename from core/azure-core-management/src/test/java/com/azure/core/management/implementation/ValueTests.java rename to sdk/core/azure-core-management/src/test/java/com/azure/core/management/implementation/ValueTests.java diff --git a/core/azure-core-test/README.md b/sdk/core/azure-core-test/README.md similarity index 100% rename from core/azure-core-test/README.md rename to sdk/core/azure-core-test/README.md diff --git a/core/azure-core-test/pom.xml b/sdk/core/azure-core-test/pom.xml similarity index 100% rename from core/azure-core-test/pom.xml rename to sdk/core/azure-core-test/pom.xml diff --git a/core/azure-core-test/src/main/java/com/azure/core/test/InterceptorManager.java b/sdk/core/azure-core-test/src/main/java/com/azure/core/test/InterceptorManager.java similarity index 100% rename from core/azure-core-test/src/main/java/com/azure/core/test/InterceptorManager.java rename to sdk/core/azure-core-test/src/main/java/com/azure/core/test/InterceptorManager.java diff --git a/core/azure-core-test/src/main/java/com/azure/core/test/TestBase.java b/sdk/core/azure-core-test/src/main/java/com/azure/core/test/TestBase.java similarity index 100% rename from core/azure-core-test/src/main/java/com/azure/core/test/TestBase.java rename to sdk/core/azure-core-test/src/main/java/com/azure/core/test/TestBase.java diff --git a/core/azure-core-test/src/main/java/com/azure/core/test/TestMode.java b/sdk/core/azure-core-test/src/main/java/com/azure/core/test/TestMode.java similarity index 100% rename from core/azure-core-test/src/main/java/com/azure/core/test/TestMode.java rename to sdk/core/azure-core-test/src/main/java/com/azure/core/test/TestMode.java diff --git a/core/azure-core-test/src/main/java/com/azure/core/test/http/MockHttpResponse.java b/sdk/core/azure-core-test/src/main/java/com/azure/core/test/http/MockHttpResponse.java similarity index 100% rename from core/azure-core-test/src/main/java/com/azure/core/test/http/MockHttpResponse.java rename to sdk/core/azure-core-test/src/main/java/com/azure/core/test/http/MockHttpResponse.java diff --git a/core/azure-core-test/src/main/java/com/azure/core/test/http/PlaybackClient.java b/sdk/core/azure-core-test/src/main/java/com/azure/core/test/http/PlaybackClient.java similarity index 100% rename from core/azure-core-test/src/main/java/com/azure/core/test/http/PlaybackClient.java rename to sdk/core/azure-core-test/src/main/java/com/azure/core/test/http/PlaybackClient.java diff --git a/core/azure-core-test/src/main/java/com/azure/core/test/http/package-info.java b/sdk/core/azure-core-test/src/main/java/com/azure/core/test/http/package-info.java similarity index 62% rename from core/azure-core-test/src/main/java/com/azure/core/test/http/package-info.java rename to sdk/core/azure-core-test/src/main/java/com/azure/core/test/http/package-info.java index eb3778c87cbc5..f98e81b7f4faa 100644 --- a/core/azure-core-test/src/main/java/com/azure/core/test/http/package-info.java +++ b/sdk/core/azure-core-test/src/main/java/com/azure/core/test/http/package-info.java @@ -2,6 +2,6 @@ // Licensed under the MIT License. /** - * Package contains classes to test HTTP communications in Azure client libraries. + * Package containing classes to test HTTP communications in Azure client libraries. */ package com.azure.core.test.http; diff --git a/core/azure-core-test/src/main/java/com/azure/core/test/models/NetworkCallRecord.java b/sdk/core/azure-core-test/src/main/java/com/azure/core/test/models/NetworkCallRecord.java similarity index 100% rename from core/azure-core-test/src/main/java/com/azure/core/test/models/NetworkCallRecord.java rename to sdk/core/azure-core-test/src/main/java/com/azure/core/test/models/NetworkCallRecord.java diff --git a/core/azure-core-test/src/main/java/com/azure/core/test/models/RecordedData.java b/sdk/core/azure-core-test/src/main/java/com/azure/core/test/models/RecordedData.java similarity index 100% rename from core/azure-core-test/src/main/java/com/azure/core/test/models/RecordedData.java rename to sdk/core/azure-core-test/src/main/java/com/azure/core/test/models/RecordedData.java diff --git a/core/azure-core-test/src/main/java/com/azure/core/test/models/package-info.java b/sdk/core/azure-core-test/src/main/java/com/azure/core/test/models/package-info.java similarity index 67% rename from core/azure-core-test/src/main/java/com/azure/core/test/models/package-info.java rename to sdk/core/azure-core-test/src/main/java/com/azure/core/test/models/package-info.java index b22156f6f6c01..060fb7b50dca9 100644 --- a/core/azure-core-test/src/main/java/com/azure/core/test/models/package-info.java +++ b/sdk/core/azure-core-test/src/main/java/com/azure/core/test/models/package-info.java @@ -1,6 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. /** - * Package contains models used to test Azure client libraries. + * Package containing models used to test Azure client libraries. */ package com.azure.core.test.models; diff --git a/core/azure-core-test/src/main/java/com/azure/core/test/package-info.java b/sdk/core/azure-core-test/src/main/java/com/azure/core/test/package-info.java similarity index 65% rename from core/azure-core-test/src/main/java/com/azure/core/test/package-info.java rename to sdk/core/azure-core-test/src/main/java/com/azure/core/test/package-info.java index 82c5f9e75223e..1dd6f464254ff 100644 --- a/core/azure-core-test/src/main/java/com/azure/core/test/package-info.java +++ b/sdk/core/azure-core-test/src/main/java/com/azure/core/test/package-info.java @@ -1,6 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. /** - * Package contains common test classes for Azure client libraries. + * Package containing common test classes for Azure client libraries. */ package com.azure.core.test; diff --git a/core/azure-core-test/src/main/java/com/azure/core/test/policy/RecordNetworkCallPolicy.java b/sdk/core/azure-core-test/src/main/java/com/azure/core/test/policy/RecordNetworkCallPolicy.java similarity index 100% rename from core/azure-core-test/src/main/java/com/azure/core/test/policy/RecordNetworkCallPolicy.java rename to sdk/core/azure-core-test/src/main/java/com/azure/core/test/policy/RecordNetworkCallPolicy.java diff --git a/core/azure-core-test/src/main/java/com/azure/core/test/policy/package-info.java b/sdk/core/azure-core-test/src/main/java/com/azure/core/test/policy/package-info.java similarity index 58% rename from core/azure-core-test/src/main/java/com/azure/core/test/policy/package-info.java rename to sdk/core/azure-core-test/src/main/java/com/azure/core/test/policy/package-info.java index 804d94478d955..28a5327ecbb69 100644 --- a/core/azure-core-test/src/main/java/com/azure/core/test/policy/package-info.java +++ b/sdk/core/azure-core-test/src/main/java/com/azure/core/test/policy/package-info.java @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. /** - * Package contains {@link com.azure.core.http.policy.HttpPipelinePolicy HttpPipelinePolicies} used to test Azure + * Package containing {@link com.azure.core.http.policy.HttpPipelinePolicy HttpPipelinePolicies} used to test Azure * client libraries. */ package com.azure.core.test.policy; diff --git a/core/azure-core-test/src/main/java/com/azure/core/test/utils/ResourceNamer.java b/sdk/core/azure-core-test/src/main/java/com/azure/core/test/utils/ResourceNamer.java similarity index 100% rename from core/azure-core-test/src/main/java/com/azure/core/test/utils/ResourceNamer.java rename to sdk/core/azure-core-test/src/main/java/com/azure/core/test/utils/ResourceNamer.java diff --git a/core/azure-core-test/src/main/java/com/azure/core/test/utils/TestResourceNamer.java b/sdk/core/azure-core-test/src/main/java/com/azure/core/test/utils/TestResourceNamer.java similarity index 100% rename from core/azure-core-test/src/main/java/com/azure/core/test/utils/TestResourceNamer.java rename to sdk/core/azure-core-test/src/main/java/com/azure/core/test/utils/TestResourceNamer.java diff --git a/core/azure-core-test/src/main/java/com/azure/core/test/utils/package-info.java b/sdk/core/azure-core-test/src/main/java/com/azure/core/test/utils/package-info.java similarity index 63% rename from core/azure-core-test/src/main/java/com/azure/core/test/utils/package-info.java rename to sdk/core/azure-core-test/src/main/java/com/azure/core/test/utils/package-info.java index 8b13830f047fd..f6b311ed0d560 100644 --- a/core/azure-core-test/src/main/java/com/azure/core/test/utils/package-info.java +++ b/sdk/core/azure-core-test/src/main/java/com/azure/core/test/utils/package-info.java @@ -1,6 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. /** - * Package contains utility classes used for testing Azure client libraries. + * Package containing utility classes used for testing Azure client libraries. */ package com.azure.core.test.utils; diff --git a/core/azure-core/README.md b/sdk/core/azure-core/README.md similarity index 100% rename from core/azure-core/README.md rename to sdk/core/azure-core/README.md diff --git a/core/azure-core/pom.xml b/sdk/core/azure-core/pom.xml similarity index 100% rename from core/azure-core/pom.xml rename to sdk/core/azure-core/pom.xml diff --git a/core/azure-core/src/main/java/com/azure/core/AzureEnvironment.java b/sdk/core/azure-core/src/main/java/com/azure/core/AzureEnvironment.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/AzureEnvironment.java rename to sdk/core/azure-core/src/main/java/com/azure/core/AzureEnvironment.java diff --git a/core/azure-core/src/main/java/com/azure/core/credentials/AccessToken.java b/sdk/core/azure-core/src/main/java/com/azure/core/credentials/AccessToken.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/credentials/AccessToken.java rename to sdk/core/azure-core/src/main/java/com/azure/core/credentials/AccessToken.java diff --git a/core/azure-core/src/main/java/com/azure/core/credentials/BasicAuthenticationCredential.java b/sdk/core/azure-core/src/main/java/com/azure/core/credentials/BasicAuthenticationCredential.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/credentials/BasicAuthenticationCredential.java rename to sdk/core/azure-core/src/main/java/com/azure/core/credentials/BasicAuthenticationCredential.java diff --git a/core/azure-core/src/main/java/com/azure/core/credentials/SimpleTokenCache.java b/sdk/core/azure-core/src/main/java/com/azure/core/credentials/SimpleTokenCache.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/credentials/SimpleTokenCache.java rename to sdk/core/azure-core/src/main/java/com/azure/core/credentials/SimpleTokenCache.java diff --git a/core/azure-core/src/main/java/com/azure/core/credentials/TokenCredential.java b/sdk/core/azure-core/src/main/java/com/azure/core/credentials/TokenCredential.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/credentials/TokenCredential.java rename to sdk/core/azure-core/src/main/java/com/azure/core/credentials/TokenCredential.java diff --git a/core/azure-core/src/main/java/com/azure/core/credentials/package-info.java b/sdk/core/azure-core/src/main/java/com/azure/core/credentials/package-info.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/credentials/package-info.java rename to sdk/core/azure-core/src/main/java/com/azure/core/credentials/package-info.java diff --git a/core/azure-core/src/main/java/com/azure/core/exception/AzureException.java b/sdk/core/azure-core/src/main/java/com/azure/core/exception/AzureException.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/exception/AzureException.java rename to sdk/core/azure-core/src/main/java/com/azure/core/exception/AzureException.java diff --git a/core/azure-core/src/main/java/com/azure/core/exception/ClientAuthenticationException.java b/sdk/core/azure-core/src/main/java/com/azure/core/exception/ClientAuthenticationException.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/exception/ClientAuthenticationException.java rename to sdk/core/azure-core/src/main/java/com/azure/core/exception/ClientAuthenticationException.java diff --git a/core/azure-core/src/main/java/com/azure/core/exception/DecodeException.java b/sdk/core/azure-core/src/main/java/com/azure/core/exception/DecodeException.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/exception/DecodeException.java rename to sdk/core/azure-core/src/main/java/com/azure/core/exception/DecodeException.java diff --git a/core/azure-core/src/main/java/com/azure/core/exception/HttpRequestException.java b/sdk/core/azure-core/src/main/java/com/azure/core/exception/HttpRequestException.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/exception/HttpRequestException.java rename to sdk/core/azure-core/src/main/java/com/azure/core/exception/HttpRequestException.java diff --git a/core/azure-core/src/main/java/com/azure/core/exception/HttpResponseException.java b/sdk/core/azure-core/src/main/java/com/azure/core/exception/HttpResponseException.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/exception/HttpResponseException.java rename to sdk/core/azure-core/src/main/java/com/azure/core/exception/HttpResponseException.java diff --git a/core/azure-core/src/main/java/com/azure/core/exception/ResourceExistsException.java b/sdk/core/azure-core/src/main/java/com/azure/core/exception/ResourceExistsException.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/exception/ResourceExistsException.java rename to sdk/core/azure-core/src/main/java/com/azure/core/exception/ResourceExistsException.java diff --git a/core/azure-core/src/main/java/com/azure/core/exception/ResourceModifiedException.java b/sdk/core/azure-core/src/main/java/com/azure/core/exception/ResourceModifiedException.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/exception/ResourceModifiedException.java rename to sdk/core/azure-core/src/main/java/com/azure/core/exception/ResourceModifiedException.java diff --git a/core/azure-core/src/main/java/com/azure/core/exception/ResourceNotFoundException.java b/sdk/core/azure-core/src/main/java/com/azure/core/exception/ResourceNotFoundException.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/exception/ResourceNotFoundException.java rename to sdk/core/azure-core/src/main/java/com/azure/core/exception/ResourceNotFoundException.java diff --git a/core/azure-core/src/main/java/com/azure/core/exception/ServiceResponseException.java b/sdk/core/azure-core/src/main/java/com/azure/core/exception/ServiceResponseException.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/exception/ServiceResponseException.java rename to sdk/core/azure-core/src/main/java/com/azure/core/exception/ServiceResponseException.java diff --git a/core/azure-core/src/main/java/com/azure/core/exception/TooManyRedirectsException.java b/sdk/core/azure-core/src/main/java/com/azure/core/exception/TooManyRedirectsException.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/exception/TooManyRedirectsException.java rename to sdk/core/azure-core/src/main/java/com/azure/core/exception/TooManyRedirectsException.java diff --git a/core/azure-core/src/main/java/com/azure/core/exception/package-info.java b/sdk/core/azure-core/src/main/java/com/azure/core/exception/package-info.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/exception/package-info.java rename to sdk/core/azure-core/src/main/java/com/azure/core/exception/package-info.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/HttpClient.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/HttpClient.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/HttpClient.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/HttpClient.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/HttpHeader.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/HttpHeader.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/HttpHeader.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/HttpHeader.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/HttpHeaders.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/HttpHeaders.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/HttpHeaders.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/HttpHeaders.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/HttpMethod.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/HttpMethod.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/HttpMethod.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/HttpMethod.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/HttpPipeline.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/HttpPipeline.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/HttpPipeline.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/HttpPipeline.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/HttpPipelineBuilder.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/HttpPipelineBuilder.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/HttpPipelineBuilder.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/HttpPipelineBuilder.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/HttpPipelineCallContext.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/HttpPipelineCallContext.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/HttpPipelineCallContext.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/HttpPipelineCallContext.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/HttpPipelineNextPolicy.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/HttpPipelineNextPolicy.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/HttpPipelineNextPolicy.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/HttpPipelineNextPolicy.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/HttpRequest.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/HttpRequest.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/HttpRequest.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/HttpRequest.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/HttpResponse.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/HttpResponse.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/HttpResponse.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/HttpResponse.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/ProxyOptions.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/ProxyOptions.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/ProxyOptions.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/ProxyOptions.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/ReactorNettyClient.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/ReactorNettyClient.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/ReactorNettyClient.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/ReactorNettyClient.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/package-info.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/package-info.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/package-info.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/package-info.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/policy/AddDatePolicy.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/policy/AddDatePolicy.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/policy/AddDatePolicy.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/policy/AddDatePolicy.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/policy/AddHeadersPolicy.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/policy/AddHeadersPolicy.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/policy/AddHeadersPolicy.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/policy/AddHeadersPolicy.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/policy/BearerTokenAuthenticationPolicy.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/policy/BearerTokenAuthenticationPolicy.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/policy/BearerTokenAuthenticationPolicy.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/policy/BearerTokenAuthenticationPolicy.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/policy/CookiePolicy.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/policy/CookiePolicy.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/policy/CookiePolicy.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/policy/CookiePolicy.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/policy/HostPolicy.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/policy/HostPolicy.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/policy/HostPolicy.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/policy/HostPolicy.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/policy/HttpLogDetailLevel.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/policy/HttpLogDetailLevel.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/policy/HttpLogDetailLevel.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/policy/HttpLogDetailLevel.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/policy/HttpLoggingPolicy.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/policy/HttpLoggingPolicy.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/policy/HttpLoggingPolicy.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/policy/HttpLoggingPolicy.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/policy/HttpPipelinePolicy.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/policy/HttpPipelinePolicy.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/policy/HttpPipelinePolicy.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/policy/HttpPipelinePolicy.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/policy/PortPolicy.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/policy/PortPolicy.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/policy/PortPolicy.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/policy/PortPolicy.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/policy/ProtocolPolicy.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/policy/ProtocolPolicy.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/policy/ProtocolPolicy.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/policy/ProtocolPolicy.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/policy/ProxyAuthenticationPolicy.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/policy/ProxyAuthenticationPolicy.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/policy/ProxyAuthenticationPolicy.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/policy/ProxyAuthenticationPolicy.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/policy/RequestIdPolicy.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/policy/RequestIdPolicy.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/policy/RequestIdPolicy.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/policy/RequestIdPolicy.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/policy/RetryPolicy.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/policy/RetryPolicy.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/policy/RetryPolicy.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/policy/RetryPolicy.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/policy/TimeoutPolicy.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/policy/TimeoutPolicy.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/policy/TimeoutPolicy.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/policy/TimeoutPolicy.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/policy/UserAgentPolicy.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/policy/UserAgentPolicy.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/policy/UserAgentPolicy.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/policy/UserAgentPolicy.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/policy/package-info.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/policy/package-info.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/policy/package-info.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/policy/package-info.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/rest/Page.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/rest/Page.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/rest/Page.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/rest/Page.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/rest/PagedFlux.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/rest/PagedFlux.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/rest/PagedFlux.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/rest/PagedFlux.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/rest/PagedResponse.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/rest/PagedResponse.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/rest/PagedResponse.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/rest/PagedResponse.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/rest/Response.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/rest/Response.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/rest/Response.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/rest/Response.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/rest/ResponseBase.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/rest/ResponseBase.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/rest/ResponseBase.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/rest/ResponseBase.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/rest/SimpleResponse.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/rest/SimpleResponse.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/rest/SimpleResponse.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/rest/SimpleResponse.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/rest/StreamResponse.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/rest/StreamResponse.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/rest/StreamResponse.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/rest/StreamResponse.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/rest/VoidResponse.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/rest/VoidResponse.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/rest/VoidResponse.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/rest/VoidResponse.java diff --git a/core/azure-core/src/main/java/com/azure/core/http/rest/package-info.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/rest/package-info.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/http/rest/package-info.java rename to sdk/core/azure-core/src/main/java/com/azure/core/http/rest/package-info.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/Base64Url.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/Base64Url.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/Base64Url.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/Base64Url.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/CollectionFormat.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/CollectionFormat.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/CollectionFormat.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/CollectionFormat.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/DateTimeRfc1123.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/DateTimeRfc1123.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/DateTimeRfc1123.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/DateTimeRfc1123.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/EncodedParameter.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/EncodedParameter.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/EncodedParameter.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/EncodedParameter.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/OperationDescription.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/OperationDescription.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/OperationDescription.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/OperationDescription.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/PercentEscaper.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/PercentEscaper.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/PercentEscaper.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/PercentEscaper.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/RestProxy.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/RestProxy.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/RestProxy.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/RestProxy.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/Substitution.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/Substitution.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/Substitution.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/Substitution.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/SwaggerInterfaceParser.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/SwaggerInterfaceParser.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/SwaggerInterfaceParser.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/SwaggerInterfaceParser.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/SwaggerMethodParser.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/SwaggerMethodParser.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/SwaggerMethodParser.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/SwaggerMethodParser.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/UnexpectedExceptionInformation.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/UnexpectedExceptionInformation.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/UnexpectedExceptionInformation.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/UnexpectedExceptionInformation.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/UnixTime.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/UnixTime.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/UnixTime.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/UnixTime.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/UrlEscapers.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/UrlEscapers.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/UrlEscapers.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/UrlEscapers.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/Validator.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/Validator.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/Validator.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/Validator.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Beta.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Beta.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/Beta.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Beta.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/BodyParam.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/BodyParam.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/BodyParam.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/BodyParam.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Delete.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Delete.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/Delete.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Delete.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ExpectedResponses.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ExpectedResponses.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/ExpectedResponses.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ExpectedResponses.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Fluent.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Fluent.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/Fluent.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Fluent.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/FormParam.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/FormParam.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/FormParam.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/FormParam.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Get.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Get.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/Get.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Get.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Head.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Head.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/Head.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Head.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/HeaderCollection.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/HeaderCollection.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/HeaderCollection.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/HeaderCollection.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/HeaderParam.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/HeaderParam.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/HeaderParam.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/HeaderParam.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Headers.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Headers.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/Headers.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Headers.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Host.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Host.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/Host.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Host.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/HostParam.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/HostParam.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/HostParam.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/HostParam.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Immutable.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Immutable.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/Immutable.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Immutable.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/JsonFlatten.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/JsonFlatten.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/JsonFlatten.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/JsonFlatten.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Patch.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Patch.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/Patch.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Patch.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/PathParam.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/PathParam.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/PathParam.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/PathParam.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Post.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Post.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/Post.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Post.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Put.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Put.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/Put.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/Put.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/QueryParam.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/QueryParam.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/QueryParam.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/QueryParam.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ResumeOperation.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ResumeOperation.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/ResumeOperation.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ResumeOperation.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ReturnType.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ReturnType.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/ReturnType.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ReturnType.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ReturnValueWireType.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ReturnValueWireType.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/ReturnValueWireType.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ReturnValueWireType.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ServiceClient.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ServiceClient.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/ServiceClient.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ServiceClient.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ServiceClientBuilder.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ServiceClientBuilder.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/ServiceClientBuilder.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ServiceClientBuilder.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ServiceInterface.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ServiceInterface.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/ServiceInterface.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ServiceInterface.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ServiceMethod.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ServiceMethod.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/ServiceMethod.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/ServiceMethod.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/SkipParentValidation.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/SkipParentValidation.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/SkipParentValidation.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/SkipParentValidation.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/UnexpectedResponseExceptionType.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/UnexpectedResponseExceptionType.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/UnexpectedResponseExceptionType.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/UnexpectedResponseExceptionType.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/UnexpectedResponseExceptionTypes.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/UnexpectedResponseExceptionTypes.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/UnexpectedResponseExceptionTypes.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/UnexpectedResponseExceptionTypes.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/annotation/package-info.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/package-info.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/annotation/package-info.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/annotation/package-info.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/exception/InvalidReturnTypeException.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/exception/InvalidReturnTypeException.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/exception/InvalidReturnTypeException.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/exception/InvalidReturnTypeException.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/exception/MissingRequiredAnnotationException.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/exception/MissingRequiredAnnotationException.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/exception/MissingRequiredAnnotationException.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/exception/MissingRequiredAnnotationException.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/exception/package-info.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/exception/package-info.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/exception/package-info.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/exception/package-info.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/http/BufferedHttpResponse.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/BufferedHttpResponse.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/http/BufferedHttpResponse.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/BufferedHttpResponse.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/http/ContentType.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/ContentType.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/http/ContentType.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/ContentType.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/http/PagedResponseBase.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/PagedResponseBase.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/http/PagedResponseBase.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/PagedResponseBase.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/http/UrlBuilder.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/UrlBuilder.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/http/UrlBuilder.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/UrlBuilder.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/http/UrlToken.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/UrlToken.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/http/UrlToken.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/UrlToken.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/http/UrlTokenType.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/UrlTokenType.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/http/UrlTokenType.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/UrlTokenType.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/http/UrlTokenizer.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/UrlTokenizer.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/http/UrlTokenizer.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/UrlTokenizer.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/http/UrlTokenizerState.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/UrlTokenizerState.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/http/UrlTokenizerState.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/UrlTokenizerState.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/http/package-info.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/package-info.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/http/package-info.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/package-info.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/http/policy/spi/AfterRetryPolicyProvider.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/policy/spi/AfterRetryPolicyProvider.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/http/policy/spi/AfterRetryPolicyProvider.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/policy/spi/AfterRetryPolicyProvider.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/http/policy/spi/BeforeRetryPolicyProvider.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/policy/spi/BeforeRetryPolicyProvider.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/http/policy/spi/BeforeRetryPolicyProvider.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/policy/spi/BeforeRetryPolicyProvider.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/http/policy/spi/HttpPolicyProviders.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/policy/spi/HttpPolicyProviders.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/http/policy/spi/HttpPolicyProviders.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/policy/spi/HttpPolicyProviders.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/http/policy/spi/PolicyProvider.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/policy/spi/PolicyProvider.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/http/policy/spi/PolicyProvider.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/policy/spi/PolicyProvider.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/http/policy/spi/package-info.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/policy/spi/package-info.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/http/policy/spi/package-info.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/http/policy/spi/package-info.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/package-info.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/package-info.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/package-info.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/package-info.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/serializer/HttpResponseBodyDecoder.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/HttpResponseBodyDecoder.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/serializer/HttpResponseBodyDecoder.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/HttpResponseBodyDecoder.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/serializer/HttpResponseDecodeData.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/HttpResponseDecodeData.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/serializer/HttpResponseDecodeData.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/HttpResponseDecodeData.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/serializer/HttpResponseDecoder.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/HttpResponseDecoder.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/serializer/HttpResponseDecoder.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/HttpResponseDecoder.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/serializer/HttpResponseHeaderDecoder.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/HttpResponseHeaderDecoder.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/serializer/HttpResponseHeaderDecoder.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/HttpResponseHeaderDecoder.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/serializer/ItemPage.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/ItemPage.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/serializer/ItemPage.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/ItemPage.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/serializer/MalformedValueException.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/MalformedValueException.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/serializer/MalformedValueException.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/MalformedValueException.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/serializer/SerializerAdapter.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/SerializerAdapter.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/serializer/SerializerAdapter.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/SerializerAdapter.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/serializer/SerializerEncoding.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/SerializerEncoding.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/serializer/SerializerEncoding.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/SerializerEncoding.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/AdditionalPropertiesDeserializer.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/AdditionalPropertiesDeserializer.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/AdditionalPropertiesDeserializer.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/AdditionalPropertiesDeserializer.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/AdditionalPropertiesSerializer.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/AdditionalPropertiesSerializer.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/AdditionalPropertiesSerializer.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/AdditionalPropertiesSerializer.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/Base64UrlSerializer.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/Base64UrlSerializer.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/Base64UrlSerializer.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/Base64UrlSerializer.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/ByteArraySerializer.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/ByteArraySerializer.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/ByteArraySerializer.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/ByteArraySerializer.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/DateTimeRfc1123Serializer.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/DateTimeRfc1123Serializer.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/DateTimeRfc1123Serializer.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/DateTimeRfc1123Serializer.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/DateTimeSerializer.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/DateTimeSerializer.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/DateTimeSerializer.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/DateTimeSerializer.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/DurationSerializer.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/DurationSerializer.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/DurationSerializer.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/DurationSerializer.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/FlatteningDeserializer.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/FlatteningDeserializer.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/FlatteningDeserializer.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/FlatteningDeserializer.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/FlatteningSerializer.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/FlatteningSerializer.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/FlatteningSerializer.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/FlatteningSerializer.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/JacksonAdapter.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/JacksonAdapter.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/JacksonAdapter.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/JacksonAdapter.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/package-info.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/package-info.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/package-info.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/jackson/package-info.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/serializer/package-info.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/package-info.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/serializer/package-info.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/serializer/package-info.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/tracing/Tracer.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/tracing/Tracer.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/tracing/Tracer.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/tracing/Tracer.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/tracing/TracerProxy.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/tracing/TracerProxy.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/tracing/TracerProxy.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/tracing/TracerProxy.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/util/Base64Util.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/util/Base64Util.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/util/Base64Util.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/util/Base64Util.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/util/FluxUtil.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/util/FluxUtil.java similarity index 87% rename from core/azure-core/src/main/java/com/azure/core/implementation/util/FluxUtil.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/util/FluxUtil.java index 1ec6329fdf95d..f8541f0983931 100644 --- a/core/azure-core/src/main/java/com/azure/core/implementation/util/FluxUtil.java +++ b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/util/FluxUtil.java @@ -3,11 +3,22 @@ package com.azure.core.implementation.util; +import com.azure.core.util.Context; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.CompositeByteBuf; import io.netty.buffer.Unpooled; import io.netty.util.ReferenceCountUtil; +import java.io.IOException; +import java.lang.reflect.Type; +import java.nio.channels.AsynchronousFileChannel; +import java.nio.channels.CompletionHandler; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import java.util.concurrent.atomic.AtomicLongFieldUpdater; +import java.util.function.Function; +import java.util.stream.Collectors; import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; import reactor.core.CoreSubscriber; @@ -16,13 +27,6 @@ import reactor.core.publisher.MonoSink; import reactor.core.publisher.Operators; -import java.io.IOException; -import java.lang.reflect.Type; -import java.nio.channels.AsynchronousFileChannel; -import java.nio.channels.CompletionHandler; -import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; -import java.util.concurrent.atomic.AtomicLongFieldUpdater; - /** * Utility type exposing methods to deal with {@link Flux}. */ @@ -174,6 +178,64 @@ public static Mono bytebufStreamToFile(Flux content, Asynchronous return Mono.create(emitter -> content.subscribe(new ByteBufToFileSubscriber(outFile, position, emitter))); } + /** + * This method converts the incoming {@code subscriberContext} from {@link reactor.util.context.Context Reactor + * Context} to {@link Context Azure Context} and calls the given lambda function with this context and returns a + * single entity of type {@code T} + *

+ * If the reactor context is empty, {@link Context#NONE} will be used to call the lambda function + *

+ * + *

Code samples

+ * {@codesnippet com.azure.core.implementation.util.fluxutil.monocontext} + * + * @param serviceCall The lambda function that makes the service call into which azure context will be passed + * @param The type of response returned from the service call + * @return The response from service call + */ + public static Mono monoContext(Function> serviceCall) { + return Mono.subscriberContext() + .map(FluxUtil::toAzureContext) + .flatMap(serviceCall); + } + + /** + * This method converts the incoming {@code subscriberContext} from {@link reactor.util.context.Context Reactor + * Context} to {@link Context Azure Context} and calls the given lambda function with this context and returns a + * collection of type {@code T} + *

+ * If the reactor context is empty, {@link Context#NONE} will be used to call the lambda function + *

+ * + *

Code samples

+ * {@codesnippet com.azure.core.implementation.util.fluxutil.fluxcontext} + * + * @param serviceCall The lambda function that makes the service call into which the context will be passed + * @param The type of response returned from the service call + * @return The response from service call + */ + public static Flux fluxContext(Function> serviceCall) { + return Mono.subscriberContext() + .map(FluxUtil::toAzureContext) + .flatMapMany(serviceCall); + } + + /** + * Converts a reactor context to azure context. If the reactor context is {@code null} or empty, + * {@link Context#NONE} will be returned. + * + * @param context The reactor context + * @return The azure context + */ + private static Context toAzureContext(reactor.util.context.Context context) { + Map keyValues = context.stream() + .collect(Collectors.toMap(Entry::getKey, Entry::getValue)); + if (ImplUtils.isNullOrEmpty(keyValues)) { + return Context.NONE; + } + return Context.of(keyValues); + } + private static class ByteBufToFileSubscriber implements Subscriber { private ByteBufToFileSubscriber(AsynchronousFileChannel outFile, long position, MonoSink emitter) { this.outFile = outFile; diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/util/ImplUtils.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/util/ImplUtils.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/util/ImplUtils.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/util/ImplUtils.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/util/ScopeUtil.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/util/ScopeUtil.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/util/ScopeUtil.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/util/ScopeUtil.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/util/TypeUtil.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/util/TypeUtil.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/util/TypeUtil.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/util/TypeUtil.java diff --git a/core/azure-core/src/main/java/com/azure/core/implementation/util/package-info.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/util/package-info.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/implementation/util/package-info.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/util/package-info.java diff --git a/core/azure-core/src/main/java/com/azure/core/package-info.java b/sdk/core/azure-core/src/main/java/com/azure/core/package-info.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/package-info.java rename to sdk/core/azure-core/src/main/java/com/azure/core/package-info.java diff --git a/core/azure-core/src/main/java/com/azure/core/util/Context.java b/sdk/core/azure-core/src/main/java/com/azure/core/util/Context.java similarity index 73% rename from core/azure-core/src/main/java/com/azure/core/util/Context.java rename to sdk/core/azure-core/src/main/java/com/azure/core/util/Context.java index 9d3b1d83477d0..bb58ed2987377 100644 --- a/core/azure-core/src/main/java/com/azure/core/util/Context.java +++ b/sdk/core/azure-core/src/main/java/com/azure/core/util/Context.java @@ -4,6 +4,8 @@ package com.azure.core.util; import com.azure.core.implementation.annotation.Immutable; +import com.azure.core.implementation.util.ImplUtils; +import java.util.Map; import java.util.Optional; /** @@ -63,6 +65,30 @@ public Context addData(Object key, Object value) { return new Context(this, key, value); } + /** + * Creates a new immutable {@link Context} object with all the keys and values provided by + * the input {@link Map} + * + * @param keyValues The input key value pairs that will be added to this context + * @return Context object containing all the key-value pairs in the input map + * @throws IllegalArgumentException If {@code keyValues} is {@code null} or empty + */ + public static Context of(Map keyValues) { + if (ImplUtils.isNullOrEmpty(keyValues)) { + throw new IllegalArgumentException("Key value map cannot be null or empty"); + } + + Context context = null; + for (Map.Entry entry : keyValues.entrySet()) { + if (context == null) { + context = new Context(entry.getKey(), entry.getValue()); + } else { + context = context.addData(entry.getKey(), entry.getValue()); + } + } + return context; + } + /** * Scans the linked-list of {@link Context} objects looking for one with the specified key. * Note that the first key found, i.e. the most recently added, will be returned. diff --git a/core/azure-core/src/main/java/com/azure/core/util/ExpandableStringEnum.java b/sdk/core/azure-core/src/main/java/com/azure/core/util/ExpandableStringEnum.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/util/ExpandableStringEnum.java rename to sdk/core/azure-core/src/main/java/com/azure/core/util/ExpandableStringEnum.java diff --git a/core/azure-core/src/main/java/com/azure/core/util/configuration/BaseConfigurations.java b/sdk/core/azure-core/src/main/java/com/azure/core/util/configuration/BaseConfigurations.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/util/configuration/BaseConfigurations.java rename to sdk/core/azure-core/src/main/java/com/azure/core/util/configuration/BaseConfigurations.java diff --git a/core/azure-core/src/main/java/com/azure/core/util/configuration/Configuration.java b/sdk/core/azure-core/src/main/java/com/azure/core/util/configuration/Configuration.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/util/configuration/Configuration.java rename to sdk/core/azure-core/src/main/java/com/azure/core/util/configuration/Configuration.java diff --git a/core/azure-core/src/main/java/com/azure/core/util/configuration/ConfigurationManager.java b/sdk/core/azure-core/src/main/java/com/azure/core/util/configuration/ConfigurationManager.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/util/configuration/ConfigurationManager.java rename to sdk/core/azure-core/src/main/java/com/azure/core/util/configuration/ConfigurationManager.java diff --git a/core/azure-core/src/main/java/com/azure/core/util/configuration/NoopConfiguration.java b/sdk/core/azure-core/src/main/java/com/azure/core/util/configuration/NoopConfiguration.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/util/configuration/NoopConfiguration.java rename to sdk/core/azure-core/src/main/java/com/azure/core/util/configuration/NoopConfiguration.java diff --git a/core/azure-core/src/main/java/com/azure/core/util/configuration/package-info.java b/sdk/core/azure-core/src/main/java/com/azure/core/util/configuration/package-info.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/util/configuration/package-info.java rename to sdk/core/azure-core/src/main/java/com/azure/core/util/configuration/package-info.java diff --git a/core/azure-core/src/main/java/com/azure/core/util/logging/ClientLogger.java b/sdk/core/azure-core/src/main/java/com/azure/core/util/logging/ClientLogger.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/util/logging/ClientLogger.java rename to sdk/core/azure-core/src/main/java/com/azure/core/util/logging/ClientLogger.java diff --git a/core/azure-core/src/main/java/com/azure/core/util/logging/package-info.java b/sdk/core/azure-core/src/main/java/com/azure/core/util/logging/package-info.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/util/logging/package-info.java rename to sdk/core/azure-core/src/main/java/com/azure/core/util/logging/package-info.java diff --git a/core/azure-core/src/main/java/com/azure/core/util/package-info.java b/sdk/core/azure-core/src/main/java/com/azure/core/util/package-info.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/util/package-info.java rename to sdk/core/azure-core/src/main/java/com/azure/core/util/package-info.java diff --git a/core/azure-core/src/main/java/com/azure/core/util/polling/PollResponse.java b/sdk/core/azure-core/src/main/java/com/azure/core/util/polling/PollResponse.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/util/polling/PollResponse.java rename to sdk/core/azure-core/src/main/java/com/azure/core/util/polling/PollResponse.java diff --git a/core/azure-core/src/main/java/com/azure/core/util/polling/Poller.java b/sdk/core/azure-core/src/main/java/com/azure/core/util/polling/Poller.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/util/polling/Poller.java rename to sdk/core/azure-core/src/main/java/com/azure/core/util/polling/Poller.java diff --git a/core/azure-core/src/main/java/com/azure/core/util/polling/package-info.java b/sdk/core/azure-core/src/main/java/com/azure/core/util/polling/package-info.java similarity index 100% rename from core/azure-core/src/main/java/com/azure/core/util/polling/package-info.java rename to sdk/core/azure-core/src/main/java/com/azure/core/util/polling/package-info.java diff --git a/core/azure-core/src/samples/java/com/azure/core/http/rest/PagedFluxJavaDocCodeSnippets.java b/sdk/core/azure-core/src/samples/java/com/azure/core/http/rest/PagedFluxJavaDocCodeSnippets.java similarity index 100% rename from core/azure-core/src/samples/java/com/azure/core/http/rest/PagedFluxJavaDocCodeSnippets.java rename to sdk/core/azure-core/src/samples/java/com/azure/core/http/rest/PagedFluxJavaDocCodeSnippets.java diff --git a/core/azure-core/src/samples/java/com/azure/core/implementation/util/ClientLoggerJavaDocCodeSnippets.java b/sdk/core/azure-core/src/samples/java/com/azure/core/implementation/util/ClientLoggerJavaDocCodeSnippets.java similarity index 100% rename from core/azure-core/src/samples/java/com/azure/core/implementation/util/ClientLoggerJavaDocCodeSnippets.java rename to sdk/core/azure-core/src/samples/java/com/azure/core/implementation/util/ClientLoggerJavaDocCodeSnippets.java diff --git a/sdk/core/azure-core/src/samples/java/com/azure/core/implementation/util/FluxUtilJavaDocCodeSnippets.java b/sdk/core/azure-core/src/samples/java/com/azure/core/implementation/util/FluxUtilJavaDocCodeSnippets.java new file mode 100644 index 0000000000000..341adf9e27f7d --- /dev/null +++ b/sdk/core/azure-core/src/samples/java/com/azure/core/implementation/util/FluxUtilJavaDocCodeSnippets.java @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.core.implementation.util; + +import com.azure.core.util.Context; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +/** + * Code snippets for {@link FluxUtil} + */ +public class FluxUtilJavaDocCodeSnippets { + + /** + * Code snippet for using {@link FluxUtil} with single item response + */ + public void codeSnippetForCallWithSingleResponse() { + // BEGIN: com.azure.core.implementation.util.fluxutil.monocontext + String prefix = "Hello, "; + Mono response = FluxUtil + .monoContext(context -> serviceCallReturnsSingle(prefix, context)); + // END: com.azure.core.implementation.util.fluxutil.monocontext + } + + /** + * Code snippet for using {@link FluxUtil} with collection response + */ + public void codeSnippetForCallWithCollectionResponse() { + // BEGIN: com.azure.core.implementation.util.fluxutil.fluxcontext + String prefix = "Hello, "; + Flux response = FluxUtil + .fluxContext(context -> serviceCallReturnsCollection(prefix, context)); + // END: com.azure.core.implementation.util.fluxutil.fluxcontext + } + + /** + * Implementation not provided + * @param prefix The prefix + * @param context Azure context + * @return {@link Flux#empty() empty} response + */ + private Flux serviceCallReturnsCollection(String prefix, Context context) { + return Flux.empty(); + } + + /** + * Implementation not provided + * @param prefix The prefix + * @param context Azure context + * @return {@link Mono#empty() empty} response + */ + private Mono serviceCallReturnsSingle(String prefix, Context context) { + return Mono.empty(); + } + +} diff --git a/core/azure-core/src/samples/java/com/azure/core/util/polling/PollResponseJavaDocCodeSnippets.java b/sdk/core/azure-core/src/samples/java/com/azure/core/util/polling/PollResponseJavaDocCodeSnippets.java similarity index 100% rename from core/azure-core/src/samples/java/com/azure/core/util/polling/PollResponseJavaDocCodeSnippets.java rename to sdk/core/azure-core/src/samples/java/com/azure/core/util/polling/PollResponseJavaDocCodeSnippets.java diff --git a/core/azure-core/src/samples/java/com/azure/core/util/polling/PollerJavaDocCodeSnippets.java b/sdk/core/azure-core/src/samples/java/com/azure/core/util/polling/PollerJavaDocCodeSnippets.java similarity index 100% rename from core/azure-core/src/samples/java/com/azure/core/util/polling/PollerJavaDocCodeSnippets.java rename to sdk/core/azure-core/src/samples/java/com/azure/core/util/polling/PollerJavaDocCodeSnippets.java diff --git a/core/azure-core/src/test/java/com/azure/core/ConfigurationTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/ConfigurationTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/ConfigurationTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/ConfigurationTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/MockServer.java b/sdk/core/azure-core/src/test/java/com/azure/core/MockServer.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/MockServer.java rename to sdk/core/azure-core/src/test/java/com/azure/core/MockServer.java diff --git a/core/azure-core/src/test/java/com/azure/core/MyOtherRestException.java b/sdk/core/azure-core/src/test/java/com/azure/core/MyOtherRestException.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/MyOtherRestException.java rename to sdk/core/azure-core/src/test/java/com/azure/core/MyOtherRestException.java diff --git a/core/azure-core/src/test/java/com/azure/core/MyRestException.java b/sdk/core/azure-core/src/test/java/com/azure/core/MyRestException.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/MyRestException.java rename to sdk/core/azure-core/src/test/java/com/azure/core/MyRestException.java diff --git a/core/azure-core/src/test/java/com/azure/core/UserAgentTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/UserAgentTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/UserAgentTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/UserAgentTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/credentials/CredentialsTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/credentials/CredentialsTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/credentials/CredentialsTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/credentials/CredentialsTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/credentials/TokenCacheTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/credentials/TokenCacheTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/credentials/TokenCacheTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/credentials/TokenCacheTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/entities/AccessPolicy.java b/sdk/core/azure-core/src/test/java/com/azure/core/entities/AccessPolicy.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/entities/AccessPolicy.java rename to sdk/core/azure-core/src/test/java/com/azure/core/entities/AccessPolicy.java diff --git a/core/azure-core/src/test/java/com/azure/core/entities/HttpBinFormDataJSON.java b/sdk/core/azure-core/src/test/java/com/azure/core/entities/HttpBinFormDataJSON.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/entities/HttpBinFormDataJSON.java rename to sdk/core/azure-core/src/test/java/com/azure/core/entities/HttpBinFormDataJSON.java diff --git a/core/azure-core/src/test/java/com/azure/core/entities/HttpBinHeaders.java b/sdk/core/azure-core/src/test/java/com/azure/core/entities/HttpBinHeaders.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/entities/HttpBinHeaders.java rename to sdk/core/azure-core/src/test/java/com/azure/core/entities/HttpBinHeaders.java diff --git a/core/azure-core/src/test/java/com/azure/core/entities/HttpBinJSON.java b/sdk/core/azure-core/src/test/java/com/azure/core/entities/HttpBinJSON.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/entities/HttpBinJSON.java rename to sdk/core/azure-core/src/test/java/com/azure/core/entities/HttpBinJSON.java diff --git a/core/azure-core/src/test/java/com/azure/core/entities/SignedIdentifierInner.java b/sdk/core/azure-core/src/test/java/com/azure/core/entities/SignedIdentifierInner.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/entities/SignedIdentifierInner.java rename to sdk/core/azure-core/src/test/java/com/azure/core/entities/SignedIdentifierInner.java diff --git a/core/azure-core/src/test/java/com/azure/core/entities/SignedIdentifiersWrapper.java b/sdk/core/azure-core/src/test/java/com/azure/core/entities/SignedIdentifiersWrapper.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/entities/SignedIdentifiersWrapper.java rename to sdk/core/azure-core/src/test/java/com/azure/core/entities/SignedIdentifiersWrapper.java diff --git a/core/azure-core/src/test/java/com/azure/core/entities/Slide.java b/sdk/core/azure-core/src/test/java/com/azure/core/entities/Slide.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/entities/Slide.java rename to sdk/core/azure-core/src/test/java/com/azure/core/entities/Slide.java diff --git a/core/azure-core/src/test/java/com/azure/core/entities/Slideshow.java b/sdk/core/azure-core/src/test/java/com/azure/core/entities/Slideshow.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/entities/Slideshow.java rename to sdk/core/azure-core/src/test/java/com/azure/core/entities/Slideshow.java diff --git a/core/azure-core/src/test/java/com/azure/core/http/HttpHeaderTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/http/HttpHeaderTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/http/HttpHeaderTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/http/HttpHeaderTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/http/HttpHeadersTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/http/HttpHeadersTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/http/HttpHeadersTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/http/HttpHeadersTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/http/HttpMethodTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/http/HttpMethodTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/http/HttpMethodTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/http/HttpMethodTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/http/HttpPipelineTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/http/HttpPipelineTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/http/HttpPipelineTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/http/HttpPipelineTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/http/HttpRequestTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/http/HttpRequestTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/http/HttpRequestTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/http/HttpRequestTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/http/MockHttpClient.java b/sdk/core/azure-core/src/test/java/com/azure/core/http/MockHttpClient.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/http/MockHttpClient.java rename to sdk/core/azure-core/src/test/java/com/azure/core/http/MockHttpClient.java diff --git a/core/azure-core/src/test/java/com/azure/core/http/MockHttpResponse.java b/sdk/core/azure-core/src/test/java/com/azure/core/http/MockHttpResponse.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/http/MockHttpResponse.java rename to sdk/core/azure-core/src/test/java/com/azure/core/http/MockHttpResponse.java diff --git a/core/azure-core/src/test/java/com/azure/core/http/ReactorNettyClientTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/http/ReactorNettyClientTests.java similarity index 99% rename from core/azure-core/src/test/java/com/azure/core/http/ReactorNettyClientTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/http/ReactorNettyClientTests.java index 4d486fc00cdf1..d5b182d585927 100644 --- a/core/azure-core/src/test/java/com/azure/core/http/ReactorNettyClientTests.java +++ b/sdk/core/azure-core/src/test/java/com/azure/core/http/ReactorNettyClientTests.java @@ -72,7 +72,6 @@ public void testFlowableResponseLongBodyAsByteArrayAsync() { checkBodyReceived(LONG_BODY, "/long"); } - @Test public void testMultipleSubscriptionsEmitsError() { HttpResponse response = getResponse("/short"); @@ -94,8 +93,6 @@ public void testDispose() throws InterruptedException { Assert.assertTrue(response.internConnection().isDisposed()); } - - @Test public void testCancel() { HttpResponse response = getResponse("/long"); @@ -217,6 +214,7 @@ public void testServerShutsDownSocketShouldPushErrorToContentFlowable() } } + @Ignore("This flakey test fails often on MacOS. https://github.com/Azure/azure-sdk-for-java/issues/4357.") @Test public void testConcurrentRequests() throws NoSuchAlgorithmException { long t = System.currentTimeMillis(); diff --git a/core/azure-core/src/test/java/com/azure/core/http/policy/HostPolicyTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/http/policy/HostPolicyTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/http/policy/HostPolicyTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/http/policy/HostPolicyTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/http/policy/ProtocolPolicyTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/http/policy/ProtocolPolicyTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/http/policy/ProtocolPolicyTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/http/policy/ProtocolPolicyTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/http/policy/ProxyAuthenticationPolicyTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/http/policy/ProxyAuthenticationPolicyTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/http/policy/ProxyAuthenticationPolicyTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/http/policy/ProxyAuthenticationPolicyTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/http/policy/RequestIdPolicyTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/http/policy/RequestIdPolicyTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/http/policy/RequestIdPolicyTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/http/policy/RequestIdPolicyTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/http/policy/RetryPolicyTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/http/policy/RetryPolicyTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/http/policy/RetryPolicyTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/http/policy/RetryPolicyTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/http/rest/PagedFluxTest.java b/sdk/core/azure-core/src/test/java/com/azure/core/http/rest/PagedFluxTest.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/http/rest/PagedFluxTest.java rename to sdk/core/azure-core/src/test/java/com/azure/core/http/rest/PagedFluxTest.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/Base64UrlTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/Base64UrlTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/Base64UrlTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/Base64UrlTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/EncodedParameterTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/EncodedParameterTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/EncodedParameterTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/EncodedParameterTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/RestProxyStressTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/RestProxyStressTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/RestProxyStressTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/RestProxyStressTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/RestProxyTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/RestProxyTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/RestProxyTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/RestProxyTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/RestProxyWithHttpProxyNettyTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/RestProxyWithHttpProxyNettyTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/RestProxyWithHttpProxyNettyTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/RestProxyWithHttpProxyNettyTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/RestProxyWithMockTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/RestProxyWithMockTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/RestProxyWithMockTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/RestProxyWithMockTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/RestProxyWithNettyTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/RestProxyWithNettyTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/RestProxyWithNettyTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/RestProxyWithNettyTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/RestProxyXMLTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/RestProxyXMLTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/RestProxyXMLTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/RestProxyXMLTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/SubstitutionTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/SubstitutionTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/SubstitutionTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/SubstitutionTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/SwaggerInterfaceParserTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/SwaggerInterfaceParserTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/SwaggerInterfaceParserTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/SwaggerInterfaceParserTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/SwaggerMethodParserTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/SwaggerMethodParserTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/SwaggerMethodParserTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/SwaggerMethodParserTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/UrlEscaperTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/UrlEscaperTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/UrlEscaperTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/UrlEscaperTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/ValidatorTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/ValidatorTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/ValidatorTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/ValidatorTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/http/UrlBuilderTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/http/UrlBuilderTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/http/UrlBuilderTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/http/UrlBuilderTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/http/UrlTokenizerTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/http/UrlTokenizerTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/http/UrlTokenizerTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/http/UrlTokenizerTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/serializer/jackson/AdditionalPropertiesSerializerTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/serializer/jackson/AdditionalPropertiesSerializerTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/serializer/jackson/AdditionalPropertiesSerializerTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/serializer/jackson/AdditionalPropertiesSerializerTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/serializer/jackson/DateTimeSerializerTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/serializer/jackson/DateTimeSerializerTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/serializer/jackson/DateTimeSerializerTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/serializer/jackson/DateTimeSerializerTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/serializer/jackson/DurationSerializerTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/serializer/jackson/DurationSerializerTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/serializer/jackson/DurationSerializerTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/serializer/jackson/DurationSerializerTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/serializer/jackson/FlatteningSerializerTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/serializer/jackson/FlatteningSerializerTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/serializer/jackson/FlatteningSerializerTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/serializer/jackson/FlatteningSerializerTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/serializer/jackson/JacksonAdapterTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/serializer/jackson/JacksonAdapterTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/serializer/jackson/JacksonAdapterTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/serializer/jackson/JacksonAdapterTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/util/FluxUtilTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/util/FluxUtilTests.java similarity index 68% rename from core/azure-core/src/test/java/com/azure/core/implementation/util/FluxUtilTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/util/FluxUtilTests.java index d357bd83c494c..76cd1feb04a5e 100644 --- a/core/azure-core/src/test/java/com/azure/core/implementation/util/FluxUtilTests.java +++ b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/util/FluxUtilTests.java @@ -3,31 +3,46 @@ package com.azure.core.implementation.util; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.azure.core.http.HttpHeaders; +import com.azure.core.http.HttpMethod; +import com.azure.core.http.HttpRequest; +import com.azure.core.http.rest.PagedFlux; +import com.azure.core.http.rest.PagedResponse; +import com.azure.core.implementation.http.PagedResponseBase; +import com.azure.core.util.Context; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.util.ReferenceCountUtil; -import org.junit.Ignore; -import org.junit.Test; -import reactor.core.Exceptions; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.test.StepVerifier; - import java.io.BufferedOutputStream; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URL; import java.nio.channels.AsynchronousFileChannel; import java.nio.charset.StandardCharsets; import java.nio.file.StandardOpenOption; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; - -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.Assert; +import org.junit.Ignore; +import org.junit.Test; +import reactor.core.Exceptions; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.test.StepVerifier; public class FluxUtilTests { @@ -262,6 +277,114 @@ public void testCollectByteBufStream() { 0, 0, 0, (byte) 255, 0, 0, 1, 0}, bytes); } + + @Test + public void testCallWithContextGetSingle() { + String response = getSingle("Hello, ") + .subscriberContext(reactor.util.context.Context.of("FirstName", "Foo", "LastName", "Bar")) + .block(); + Assert.assertEquals("Hello, Foo Bar", response); + } + + @Test + public void testCallWithContextGetCollection() { + List expectedLines = Arrays.asList("Hello,", "Foo", "Bar"); + List actualLines = new ArrayList<>(); + getCollection("Hello, ") + .subscriberContext(reactor.util.context.Context.of("FirstName", "Foo", "LastName", "Bar")) + .doOnNext(line -> actualLines.add(line)) + .subscribe(); + Assert.assertEquals(expectedLines, actualLines); + } + + @Test + public void testCallWithContextGetPagedCollection() throws Exception { + // Simulates the customer code that includes context + getPagedCollection() + .subscriberContext( + reactor.util.context.Context.of("Key1", "Val1", "Key2", "Val2")) + .doOnNext(System.out::println) + .subscribe(); + } + + private PagedFlux getPagedCollection() + throws Exception { + // Simulates the client library API + List> pagedResponses = getPagedResponses(4); + return new PagedFlux<>( + () -> FluxUtil.monoContext(context -> getFirstPage(pagedResponses, context)), + continuationToken -> FluxUtil + .monoContext(context -> getNextPage(continuationToken, pagedResponses, context))); + } + + private List> getPagedResponses(int noOfPages) + throws MalformedURLException { + HttpHeaders httpHeaders = new HttpHeaders().put("header1", "value1") + .put("header2", "value2"); + HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, new URL("http://localhost")); + String deserializedHeaders = "header1,value1,header2,value2"; + return IntStream.range(0, noOfPages) + .boxed() + .map(i -> createPagedResponse(httpRequest, httpHeaders, deserializedHeaders, i, noOfPages)) + .collect(Collectors.toList()); + } + + private Mono> getFirstPage(List> pagedResponses, + Context context) { + // Simulates the service side code which should get the context provided by customer code + Assert.assertEquals("Val1", context.getData("Key1").get()); + return pagedResponses.isEmpty() ? Mono.empty() : Mono.just(pagedResponses.get(0)); + } + + private Mono> getNextPage(String continuationToken, + List> pagedResponses, Context context) { + // Simulates the service side code which should get the context provided by customer code + Assert.assertEquals("Val2", context.getData("Key2").get()); + if (continuationToken == null || continuationToken.isEmpty()) { + return Mono.empty(); + } + return Mono.just(pagedResponses.get(Integer.valueOf(continuationToken))); + } + + private PagedResponseBase createPagedResponse(HttpRequest httpRequest, + HttpHeaders httpHeaders, String deserializedHeaders, int i, int noOfPages) { + return new PagedResponseBase<>(httpRequest, HttpResponseStatus.OK.code(), + httpHeaders, + getItems(i), + i < noOfPages - 1 ? String.valueOf(i + 1) : null, + deserializedHeaders); + } + + private List getItems(Integer i) { + return IntStream.range(i * 3, i * 3 + 3).boxed().collect(Collectors.toList()); + } + + + private Mono getSingle(String prefix) { + return FluxUtil.monoContext(context -> serviceCallSingle(prefix, context)); + } + + private Flux getCollection(String prefix) { + return FluxUtil + .fluxContext(context -> serviceCallCollection(prefix, context)); + } + + private Mono serviceCallSingle(String prefix, Context context) { + String msg = prefix + + context.getData("FirstName").orElse("Stranger") + + " " + + context.getData("LastName").orElse(""); + return Mono.just(msg); + } + + private Flux serviceCallCollection(String prefix, Context context) { + String msg = prefix + + context.getData("FirstName").orElse("Stranger") + + " " + + context.getData("LastName").orElse(""); + + return Flux.just(msg.split(" ")); + } // private static byte[] toBytes(ByteBuf bb) { byte[] bytes = new byte[bb.readableBytes()]; diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/util/Foo.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/util/Foo.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/util/Foo.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/util/Foo.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/util/FooChild.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/util/FooChild.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/util/FooChild.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/util/FooChild.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/util/ImplUtilsTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/util/ImplUtilsTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/util/ImplUtilsTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/util/ImplUtilsTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/implementation/util/TypeUtilTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/util/TypeUtilTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/implementation/util/TypeUtilTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/util/TypeUtilTests.java diff --git a/core/azure-core/src/test/java/com/azure/core/util/polling/PollerTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/util/polling/PollerTests.java similarity index 100% rename from core/azure-core/src/test/java/com/azure/core/util/polling/PollerTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/util/polling/PollerTests.java diff --git a/core/azure-core/src/test/resources/GetContainerACLs.xml b/sdk/core/azure-core/src/test/resources/GetContainerACLs.xml similarity index 100% rename from core/azure-core/src/test/resources/GetContainerACLs.xml rename to sdk/core/azure-core/src/test/resources/GetContainerACLs.xml diff --git a/core/azure-core/src/test/resources/GetXMLWithAttributes.xml b/sdk/core/azure-core/src/test/resources/GetXMLWithAttributes.xml similarity index 100% rename from core/azure-core/src/test/resources/GetXMLWithAttributes.xml rename to sdk/core/azure-core/src/test/resources/GetXMLWithAttributes.xml diff --git a/core/azure-core/src/test/resources/upload.txt b/sdk/core/azure-core/src/test/resources/upload.txt similarity index 100% rename from core/azure-core/src/test/resources/upload.txt rename to sdk/core/azure-core/src/test/resources/upload.txt diff --git a/sdk/core/ci.yml b/sdk/core/ci.yml new file mode 100644 index 0000000000000..a4eb3bc813cf7 --- /dev/null +++ b/sdk/core/ci.yml @@ -0,0 +1,23 @@ +# DO NOT EDIT THIS FILE +# This file is generated automatically and any changes will be lost. + +trigger: + branches: + include: + - master + paths: + include: + - sdk/core/ + +pr: + branches: + include: + - master + paths: + include: + - sdk/core/ + +jobs: + - template: ../../eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: core \ No newline at end of file diff --git a/sdk/core/pom.service.xml b/sdk/core/pom.service.xml new file mode 100644 index 0000000000000..10c012fa679b8 --- /dev/null +++ b/sdk/core/pom.service.xml @@ -0,0 +1,17 @@ + + + 4.0.0 + com.azure + azure-core-service + pom + 1.0.0 + + azure-core + azure-core-amqp + azure-core-management + azure-core-test + + diff --git a/core/pom.xml b/sdk/core/pom.xml similarity index 97% rename from core/pom.xml rename to sdk/core/pom.xml index 8a99642ce90cc..033b535580980 100644 --- a/core/pom.xml +++ b/sdk/core/pom.xml @@ -10,7 +10,7 @@ com.azure azure-client-sdk-parent 1.1.0 - ../pom.client.xml + ../../pom.client.xml com.azure diff --git a/sdk/cosmos/CODEOWNERS b/sdk/cosmos/CODEOWNERS new file mode 100644 index 0000000000000..26da20e9f2cf2 --- /dev/null +++ b/sdk/cosmos/CODEOWNERS @@ -0,0 +1,10 @@ +# CODEOWNERS is a GitHub standard to specify who is automatically assigned pull requests to review. +# This helps to prevent pull requests from languishing without review. +# GitHub can also be configured to require review from code owners before a pull request can be merged. + +# Further reading is available from the following two URLs: +# https://blog.github.com/2017-07-06-introducing-code-owners/ +# https://help.github.com/articles/about-codeowners/ + +# Default owner for repo +* @moderakh @christopheranderson @kushagraThapar diff --git a/sdk/cosmos/LICENSE b/sdk/cosmos/LICENSE new file mode 100644 index 0000000000000..21071075c2459 --- /dev/null +++ b/sdk/cosmos/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/sdk/cosmos/README.md b/sdk/cosmos/README.md new file mode 100644 index 0000000000000..b6ec762cc2cd3 --- /dev/null +++ b/sdk/cosmos/README.md @@ -0,0 +1,323 @@ +# Java SDK for SQL API of Azure Cosmos DB + +[![Maven Central](https://img.shields.io/maven-central/v/com.microsoft.azure/azure-cosmosdb.svg)](https://search.maven.org/artifact/com.microsoft.azure/azure-cosmosdb/2.4.3/jar) +[![Build Status](https://api.travis-ci.org/Azure/azure-cosmosdb-java.svg?branch=master)](https://travis-ci.org/Azure/azure-cosmosdb-java) +[![Known Vulnerabilities](https://snyk.io/test/github/Azure/azure-cosmosdb-java/badge.svg?targetFile=sdk%2Fpom.xml)](https://snyk.io/test/github/Azure/azure-cosmosdb-java?targetFile=sdk%2Fpom.xml) + + + + + +- [Consuming the official Microsoft Azure Cosmos DB Java SDK](#consuming-the-official-microsoft-azure-cosmos-db-java-sdk) +- [Prerequisites](#prerequisites) +- [API Documentation](#api-documentation) +- [Usage Code Sample](#usage-code-sample) +- [Guide for Prod](#guide-for-prod) +- [Future, CompletableFuture, and ListenableFuture](#future-completablefuture-and-listenablefuture) +- [Checking out the Source Code](#checking-out-the-source-code) +- [FAQ](#faq) +- [Release changes](#release-changes) +- [Contribution and Feedback](#contribution-and-feedback) +- [License](#license) + + + +## Consuming the official Microsoft Azure Cosmos DB Java SDK + +This project provides a SDK library in Java for interacting with [SQL API](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sql-query) of [Azure Cosmos DB +Database Service](https://azure.microsoft.com/en-us/services/cosmos-db/). This project also includes samples, tools, and utilities. + +Jar dependency binary information for maven and gradle can be found here at [maven](https://mvnrepository.com/artifact/com.microsoft.azure/azure-cosmosdb/2.4.3). + +For example, using maven, you can add the following dependency to your maven pom file: + +```xml + + com.microsoft.azure + azure-cosmosdb + 2.4.3 + +``` + +Useful links: + +- [Sample Get Started APP](https://github.com/Azure-Samples/azure-cosmos-db-sql-api-async-java-getting-started) +- [Introduction to Resource Model of Azure Cosmos DB Service](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-resources) +- [Introduction to SQL API of Azure Cosmos DB Service](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sql-query) +- [SDK JavaDoc API](https://azure.github.io/azure-cosmosdb-java/2.4.0/com/microsoft/azure/cosmosdb/rx/AsyncDocumentClient.html) +- [RxJava Observable JavaDoc API](http://reactivex.io/RxJava/1.x/javadoc/rx/Observable.html) +- [SDK FAQ](faq/) + +## Prerequisites + +- Java Development Kit 8 +- An active Azure account. If you don't have one, you can sign up for a [free account](https://azure.microsoft.com/free/). Alternatively, you can use the [Azure Cosmos DB Emulator](https://azure.microsoft.com/documentation/articles/documentdb-nosql-local-emulator) for development and testing. As emulator https certificate is self signed, you need to import its certificate to java trusted cert store as [explained here](https://docs.microsoft.com/en-us/azure/cosmos-db/local-emulator-export-ssl-certificates) +- (Optional) SLF4J is a logging facade. +- (Optional) [SLF4J binding](http://www.slf4j.org/manual.html) is used to associate a specific logging framework with SLF4J. +- (Optional) Maven + +SLF4J is only needed if you plan to use logging, please also download an SLF4J binding which will link the SLF4J API with the logging implementation of your choice. See the [SLF4J user manual](http://www.slf4j.org/manual.html) for more information. + +## API Documentation + +Javadoc is available [here](https://azure.github.io/azure-cosmosdb-java/2.4.0/com/microsoft/azure/cosmosdb/rx/AsyncDocumentClient.html). + +The SDK provide Reactive Extension Observable based async API. You can read more about RxJava and [Observable APIs here](http://reactivex.io/RxJava/1.x/javadoc/rx/Observable.html). + +## Usage Code Sample + +Code Sample for creating a Document: + +```java +import com.azure.data.cosmos.rx.*; +import com.azure.data.cosmos.*; + +ConnectionPolicy policy = new ConnectionPolicy(); +policy.setConnectionMode(ConnectionMode.Direct); + +AsyncDocumentClient asyncClient = new AsyncDocumentClient.Builder() + .withServiceEndpoint(HOST) + .withMasterKeyOrResourceToken(MASTER_KEY) + .withConnectionPolicy(policy) + .withConsistencyLevel(ConsistencyLevel.Eventual) + .build(); + +Document doc = new Document(String.format("{ 'id': 'doc%d', 'counter': '%d'}", 1, 1)); + +Observable> createDocumentObservable = + asyncClient.createDocument(collectionLink, doc, null, false); + createDocumentObservable + .single() // we know there will be one response + .subscribe( + + documentResourceResponse -> { + System.out.println(documentResourceResponse.getRequestCharge()); + }, + + error -> { + System.err.println("an error happened: " + error.getMessage()); + }); +``` + +We have a get started sample app available [here](https://github.com/Azure-Samples/azure-cosmos-db-sql-api-async-java-getting-started). + +Also We have more examples in form of standalone unit tests in [examples project](examples/src/test/java/com/microsoft/azure/cosmosdb/rx/examples). + +## Guide for Prod + +To achieve better performance and higher throughput there are a few tips that are helpful to follow: + +### Use Appropriate Scheduler (Avoid stealing Eventloop IO Netty threads) + +SDK uses [netty](https://netty.io/) for non-blocking IO. The SDK uses a fixed number of IO netty eventloop threads (as many CPU cores your machine has) for executing IO operations. + +The Observable returned by API emits the result on one of the shared IO eventloop netty threads. So it is important to not block the shared IO eventloop netty threads. Doing CPU intensive work or blocking operation on the IO eventloop netty thread may cause deadlock or significantly reduce SDK throughput. + +For example the following code executes a cpu intensive work on the eventloop IO netty thread: + +```java +Observable> createDocObs = asyncDocumentClient.createDocument( + collectionLink, document, null, true); + +createDocObs.subscribe( + resourceResponse -> { + //this is executed on eventloop IO netty thread. + //the eventloop thread is shared and is meant to return back quickly. + // + // DON'T do this on eventloop IO netty thread. + veryCpuIntensiveWork(); + }); + +``` + +After result is received if you want to do CPU intensive work on the result you should avoid doing so on eventloop IO netty thread. You can instead provide your own Scheduler to provide your own thread for running your work. + +```java +import rx.schedulers; + +Observable> createDocObs = asyncDocumentClient.createDocument( + collectionLink, document, null, true); + +createDocObs.subscribeOn(Schedulers.computation()) +subscribe( + resourceResponse -> { + // this is executed on threads provided by Scheduler.computation() + // Schedulers.computation() should be used only the work is cpu intensive and you are not doing blocking IO, thread sleep, etc. in this thread against other resources. + veryCpuIntensiveWork(); + }); + +``` + +Based on the type of your work you should use the appropriate existing RxJava Scheduler for your work. Please read here +[`Schedulers`](http://reactivex.io/RxJava/1.x/javadoc/rx/schedulers/Schedulers.html). + +### Disable netty's logging + +Netty library logging is very chatty and need to be turned off (suppressing log in the configuration may not be enough) to avoid additional CPU costs. +If you are not in debugging mode disable netty's logging altogether. So if you are using log4j to remove the additional CPU costs incurred by `org.apache.log4j.Category.callAppenders()` from netty add the following line to your codebase: + +```java +org.apache.log4j.Logger.getLogger("io.netty").setLevel(org.apache.log4j.Level.OFF); +``` + +### OS Open files Resource Limit + +Some Linux systems (like Redhat) have an upper limit on the number of open files and so the total number of connections. Run the following to view the current limits: + +```bash +ulimit -a +``` + +The number of open files (nofile) need to be large enough to have enough room for your configured connection pool size and other open files by the OS. It can be modified to allow for a larger connection pool size. + +Open the limits.conf file: + +```bash +vim /etc/security/limits.conf +``` + +Add/modify the following lines: + +``` +* - nofile 100000 +``` + +### Use native SSL implementation for netty + +Netty can use OpenSSL directly for SSL implementation stack to achieve better performance. +In the absence of this configuration netty will fall back to Java's default SSL implementation. + +on Ubuntu: + +```bash +sudo apt-get install openssl +sudo apt-get install libapr1 +``` + +and add the following dependency to your project maven dependencies: + +```xml + + io.netty + netty-tcnative + 2.0.20.Final + linux-x86_64 + +``` + +For other platforms (Redhat, Windows, Mac, etc) please refer to these instructions https://netty.io/wiki/forked-tomcat-native.html + +### Common Perf Tips + +There is a set of common perf tips written for our sync SDK. The majority of them also apply to the async SDK. It is available [here](https://docs.microsoft.com/en-us/azure/cosmos-db/performance-tips-java). + +## Future, CompletableFuture, and ListenableFuture + +The SDK provide Reactive Extension (Rx) [Observable](http://reactivex.io/RxJava/1.x/javadoc/rx/Observable.html) based async API. + +RX API has advantages over Future based APIs. But if you wish to use `Future` you can translate Observables to Java native Futures: + +```java +// You can convert an Observable to a ListenableFuture. +// ListenableFuture (part of google guava library) is a popular extension +// of Java's Future which allows registering listener callbacks: +// https://github.com/google/guava/wiki/ListenableFutureExplained + +import rx.observable.ListenableFutureObservable; + +Observable> createDocObservable = asyncClient.createDocument( + collectionLink, document, null, false); + +// NOTE: if you are going to do CPU intensive work +// on the result thread consider changing the scheduler see Use Proper Scheduler +// (Avoid Stealing Eventloop IO Netty threads) section +ListenableFuture> listenableFuture = + ListenableFutureObservable.to(createDocObservable); + +ResourceResponse rrd = listenableFuture.get(); +``` + +For this to work you will need [RxJava Guava library dependency ](https://mvnrepository.com/artifact/io.reactivex/rxjava-guava/1.0.3). More information available here https://github.com/ReactiveX/RxJavaGuava. + +You can see more details on how to convert Observables to Futures here: +https://dzone.com/articles/converting-between + +## Checking out the Source Code + +The SDK is open source and is available here [sdk](sdk/). + +Clone the Repo + +```bash +git clone https://github.com/Azure/azure-cosmosdb-java.git +cd azure-cosmosdb-java +``` + +### How to Build from Command Line + +- Run the following maven command to build: + +```bash +maven clean package -DskipTests +``` + +### How to generate directory structure for publishing + +- Run the following maven command to collect the jars needed for publishing + +```bash +mvn antrun:run -N +``` + +Note: the `-N` is required to assert this command is only run in the parent pom. + +Afterwards, you can upload the contents of `./target/collectedArtifactsForRelease` for publishing. + +#### Running Tests from Command Line + +Running tests require Azure Cosmos DB Endpoint credentials: + +```bash +mvn test -DACCOUNT_HOST="https://REPLACE_ME_WITH_YOURS.documents.azure.com:443/" -DACCOUNT_KEY="REPLACE_ME_WITH_YOURS" +``` + +### Import into Intellij or Eclipse + +- Load the main parent project pom file in Intellij/Eclipse (That should automatically load examples). +- For running the samples you need a proper Azure Cosmos DB Endpoint. The endpoints are picked up from [TestConfigurations.java](examples/src/test/java/com/microsoft/azure/cosmosdb/rx/examples/TestConfigurations.java). There is a similar endpoint config file for the sdk tests [here](sdk/src/test/java/com/microsoft/azure/cosmosdb/rx/TestConfigurations.java). +- You can pass your endpoint credentials as VM Arguments in Eclipse JUnit Run Config: + +```bash + -DACCOUNT_HOST="https://REPLACE_ME.documents.azure.com:443/" -DACCOUNT_KEY="REPLACE_ME" +``` + +- or you can simply put your endpoint credentials in TestConfigurations.java +- The SDK tests are written using TestNG framework, if you use Eclipse you may have to + add TestNG plugin to your eclipse IDE as explained [here](http://testng.org/doc/eclipse.html). + Intellij has builtin support for TestNG. +- Now you can run the tests in your Intellij/Eclipse IDE. + +## FAQ + +We have a frequently asked questions which is maintained [here](faq/). + +## Release changes + +Release changelog is available [here](changelog/). + +## Contribution and Feedback + +This is an open source project and we welcome contributions. + +If you would like to become an active contributor to this project please follow the instructions provided in [Azure Projects Contribution Guidelines](http://azure.github.io/guidelines/). + +We have [travis build CI](https://travis-ci.org/Azure/azure-cosmosdb-java) which should pass for any PR. + +If you encounter any bugs with the SDK please file an [issue](https://github.com/Azure/azure-cosmosdb-java/issues) in the Issues section of the project. + +## License + +MIT License +Copyright (c) 2018 Copyright (c) Microsoft Corporation diff --git a/sdk/cosmos/benchmark/README.md b/sdk/cosmos/benchmark/README.md new file mode 100644 index 0000000000000..4f7aadcc7796e --- /dev/null +++ b/sdk/cosmos/benchmark/README.md @@ -0,0 +1,81 @@ +# Benchmark tool + +## Build the benchmarking tool + +```bash +git clone https://github.com/Azure/azure-cosmosdb-java.git +cd azure-cosmosdb-java + +mvn clean package -DskipTests +``` + +and then the package will be generated. + +## Run the WriteLatency workload + +```bash +java -jar benchmark/target/azure-cosmosdb-benchmark-2.4.1-SNAPSHOT-jar-with-dependencies.jar \ + -serviceEndpoint $endpoint -masterKey $masterkey \ + -databaseId $dbname -collectionId $colname \ + -consistencyLevel Eventual -concurrency 10 -numberOfOperations 1000000 \ + -operation WriteLatency -connectionMode Direct +``` + +## Sample Report: + +``` +2/13/19 9:32:39 PM ============================================================= + +-- Meters ---------------------------------------------------------------------- +#Successful Operations + count = 89934 + mean rate = 1798.56 events/second + 1-minute rate = 1718.45 events/second + 5-minute rate = 1630.17 events/second + 15-minute rate = 1610.01 events/second +#Unsuccessful Operations + count = 0 + mean rate = 0.00 events/second + 1-minute rate = 0.00 events/second + 5-minute rate = 0.00 events/second + 15-minute rate = 0.00 events/second + +-- Timers ---------------------------------------------------------------------- +Latency + count = 89938 + mean rate = 1798.64 calls/second + 1-minute rate = 1718.65 calls/second + 5-minute rate = 1630.37 calls/second + 15-minute rate = 1610.21 calls/second + min = 3.97 milliseconds + max = 22.81 milliseconds + mean = 5.37 milliseconds + stddev = 0.96 milliseconds + median = 5.26 milliseconds + 75% <= 5.70 milliseconds + 95% <= 6.40 milliseconds + 98% <= 6.93 milliseconds + 99% <= 7.51 milliseconds + 99.9% <= 17.37 milliseconds +``` + +## Other Currently Supported Workloads + +* ReadLatency, +* WriteLatency, +* ReadThroughput, +* WriteThroughput, +* QueryCross, +* QuerySingle, +* QuerySingleMany, +* QueryParallel, +* QueryOrderby, +* QueryAggregate, +* QueryAggregateTopOrderby, +* QueryTopOrderby, +* Mixed +* ReadMyWrites + + +You can provide ``--help`` to the tool to see the list of other work loads (read, etc) and other options. + diff --git a/sdk/cosmos/benchmark/pom.xml b/sdk/cosmos/benchmark/pom.xml new file mode 100644 index 0000000000000..491a48a51ee6b --- /dev/null +++ b/sdk/cosmos/benchmark/pom.xml @@ -0,0 +1,188 @@ + + + + 4.0.0 + + com.microsoft.azure + azure-cosmos-parent + 3.0.0 + + + azure-cosmos-benchmark + Async SDK for SQL API of Azure Cosmos DB Service - Benchmarking tool + Benchmarking tool for Async SDK for SQL API of Azure Cosmos DB Service + + + UTF-8 + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + com.azure.data.cosmos.benchmark.Main + + + + maven-assembly-plugin + 2.2 + + + jar-with-dependencies + + + + com.azure.data.cosmos.benchmark.Main + + + + + + make-assembly + package + + single + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.6.0 + + 1.8 + 1.8 + + + + org.apache.maven.plugins + maven-eclipse-plugin + 2.8 + + + + org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8 + + + + + + org.apache.maven.plugins + maven-antrun-plugin + 1.8 + false + + + none + default-cli + + + + true + + + + + + + com.microsoft.azure + azure-cosmos + + + com.beust + jcommander + ${jcommander.version} + + + com.google.guava + guava + ${guava.version} + + + io.dropwizard.metrics + metrics-core + ${metrics.version} + + + io.dropwizard.metrics + metrics-jvm + ${metrics.version} + + + io.dropwizard.metrics + metrics-graphite + ${metrics.version} + + + io.netty + netty-tcnative + ${netty-tcnative.version} + linux-x86_64 + + + log4j + log4j + ${log4j.version} + + + org.apache.commons + commons-lang3 + ${commons-lang3.version} + + + org.slf4j + slf4j-api + ${slf4j.version} + + + org.slf4j + slf4j-log4j12 + ${slf4j.version} + + + org.assertj + assertj-core + ${assertj.version} + test + + + org.hamcrest + hamcrest-all + ${hamcrest.version} + test + + + org.testng + testng + ${testng.version} + test + + + diff --git a/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/AsyncBenchmark.java b/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/AsyncBenchmark.java new file mode 100644 index 0000000000000..f93c9a0b53a27 --- /dev/null +++ b/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/AsyncBenchmark.java @@ -0,0 +1,258 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.benchmark; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.benchmark.Configuration.Operation; +import com.azure.data.cosmos.internal.ResourceResponse; +import com.codahale.metrics.ConsoleReporter; +import com.codahale.metrics.Meter; +import com.codahale.metrics.MetricFilter; +import com.codahale.metrics.MetricRegistry; +import com.codahale.metrics.ScheduledReporter; +import com.codahale.metrics.Timer; +import com.codahale.metrics.graphite.Graphite; +import com.codahale.metrics.graphite.GraphiteReporter; +import com.codahale.metrics.jvm.CachedThreadStatesGaugeSet; +import com.codahale.metrics.jvm.GarbageCollectorMetricSet; +import com.codahale.metrics.jvm.MemoryUsageGaugeSet; +import org.apache.commons.lang3.RandomStringUtils; +import org.reactivestreams.Subscription; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.BaseSubscriber; +import reactor.core.publisher.Flux; + +import java.net.InetSocketAddress; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + +abstract class AsyncBenchmark { + private final MetricRegistry metricsRegistry = new MetricRegistry(); + private final ScheduledReporter reporter; + private final String nameCollectionLink; + + private Meter successMeter; + private Meter failureMeter; + + final Logger logger; + final AsyncDocumentClient client; + final DocumentCollection collection; + final String partitionKey; + final Configuration configuration; + final List docsToRead; + final Semaphore concurrencyControlSemaphore; + Timer latency; + + AsyncBenchmark(Configuration cfg) { + client = new AsyncDocumentClient.Builder() + .withServiceEndpoint(cfg.getServiceEndpoint()) + .withMasterKeyOrResourceToken(cfg.getMasterKey()) + .withConnectionPolicy(cfg.getConnectionPolicy()) + .withConsistencyLevel(cfg.getConsistencyLevel()) + .build(); + + logger = LoggerFactory.getLogger(this.getClass()); + + Database database = DocDBUtils.getDatabase(client, cfg.getDatabaseId()); + collection = DocDBUtils.getCollection(client, database.selfLink(), cfg.getCollectionId()); + nameCollectionLink = String.format("dbs/%s/colls/%s", database.id(), collection.id()); + partitionKey = collection.getPartitionKey().paths().iterator().next().split("/")[1]; + concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); + configuration = cfg; + + ArrayList> createDocumentObservables = new ArrayList<>(); + + if (configuration.getOperationType() != Operation.WriteLatency + && configuration.getOperationType() != Operation.WriteThroughput + && configuration.getOperationType() != Operation.ReadMyWrites) { + String dataFieldValue = RandomStringUtils.randomAlphabetic(cfg.getDocumentDataFieldSize()); + for (int i = 0; i < cfg.getNumberOfPreCreatedDocuments(); i++) { + String uuid = UUID.randomUUID().toString(); + Document newDoc = new Document(); + newDoc.id(uuid); + BridgeInternal.setProperty(newDoc, partitionKey, uuid); + BridgeInternal.setProperty(newDoc, "dataField1", dataFieldValue); + BridgeInternal.setProperty(newDoc, "dataField2", dataFieldValue); + BridgeInternal.setProperty(newDoc, "dataField3", dataFieldValue); + BridgeInternal.setProperty(newDoc, "dataField4", dataFieldValue); + BridgeInternal.setProperty(newDoc, "dataField5", dataFieldValue); + Flux obs = client.createDocument(collection.selfLink(), newDoc, null, false) + .map(ResourceResponse::getResource); + createDocumentObservables.add(obs); + } + } + + docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block(); + init(); + + if (configuration.isEnableJvmStats()) { + metricsRegistry.register("gc", new GarbageCollectorMetricSet()); + metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); + metricsRegistry.register("memory", new MemoryUsageGaugeSet()); + } + + if (configuration.getGraphiteEndpoint() != null) { + final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort())); + reporter = GraphiteReporter.forRegistry(metricsRegistry) + .prefixedWith(configuration.getOperationType().name()) + .convertRatesTo(TimeUnit.SECONDS) + .convertDurationsTo(TimeUnit.MILLISECONDS) + .filter(MetricFilter.ALL) + .build(graphite); + } else { + reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS) + .convertDurationsTo(TimeUnit.MILLISECONDS).build(); + } + } + + protected void init() { + } + + void shutdown() { + client.close(); + } + + protected void onSuccess() { + } + + protected void onError(Throwable throwable) { + } + + protected String getCollectionLink() { + if (configuration.isUseNameLink()) { + return this.nameCollectionLink; + } else { + return collection.selfLink(); + } + } + + protected String getDocumentLink(Document doc) { + if (configuration.isUseNameLink()) { + return this.nameCollectionLink + "/docs/" + doc.id(); + } else { + return doc.selfLink(); + } + } + + protected abstract void performWorkload(BaseSubscriber baseSubscriber, long i) throws Exception; + + private boolean shouldContinue(long startTimeMillis, long iterationCount) { + Duration maxDurationTime = configuration.getMaxRunningTimeDuration(); + int maxNumberOfOperations = configuration.getNumberOfOperations(); + if (maxDurationTime == null) { + return iterationCount < maxNumberOfOperations; + } + + if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) { + return false; + } + + if (maxNumberOfOperations < 0) { + return true; + } + + return iterationCount < maxNumberOfOperations; + } + + void run() throws Exception { + + successMeter = metricsRegistry.meter("#Successful Operations"); + failureMeter = metricsRegistry.meter("#Unsuccessful Operations"); + if (configuration.getOperationType() == Operation.ReadLatency + || configuration.getOperationType() == Operation.WriteLatency) + latency = metricsRegistry.timer("Latency"); + + reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); + + long startTime = System.currentTimeMillis(); + + AtomicLong count = new AtomicLong(0); + long i; + for ( i = 0; shouldContinue(startTime, i); i++) { + + BaseSubscriber baseSubscriber = new BaseSubscriber() { + @Override + protected void hookOnSubscribe(Subscription subscription) { + super.hookOnSubscribe(subscription); + } + + @Override + protected void hookOnNext(T value) { + + } + + @Override + protected void hookOnComplete() { + successMeter.mark(); + concurrencyControlSemaphore.release(); + AsyncBenchmark.this.onSuccess(); + + synchronized (count) { + count.incrementAndGet(); + count.notify(); + } + } + + @Override + protected void hookOnError(Throwable throwable) { + failureMeter.mark(); + logger.error("Encountered failure {} on thread {}" , + throwable.getMessage(), Thread.currentThread().getName(), throwable); + concurrencyControlSemaphore.release(); + AsyncBenchmark.this.onError(throwable); + + synchronized (count) { + count.incrementAndGet(); + count.notify(); + } + } + }; + + performWorkload(baseSubscriber, i); + } + + synchronized (count) { + while (count.get() < i) { + count.wait(); + } + } + + long endTime = System.currentTimeMillis(); + logger.info("[{}] operations performed in [{}] seconds.", + configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); + + reporter.report(); + reporter.close(); + } +} diff --git a/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/AsyncMixedBenchmark.java b/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/AsyncMixedBenchmark.java new file mode 100644 index 0000000000000..618d80556beee --- /dev/null +++ b/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/AsyncMixedBenchmark.java @@ -0,0 +1,92 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.benchmark; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.internal.RequestOptions; +import com.azure.data.cosmos.internal.ResourceResponse; +import org.apache.commons.lang3.RandomStringUtils; +import reactor.core.publisher.BaseSubscriber; +import reactor.core.publisher.Flux; +import reactor.core.scheduler.Schedulers; + +import java.util.Random; +import java.util.UUID; + +class AsyncMixedBenchmark extends AsyncBenchmark { + + private final String uuid; + private final String dataFieldValue; + private final Random r; + + AsyncMixedBenchmark(Configuration cfg) { + super(cfg); + uuid = UUID.randomUUID().toString(); + dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); + r = new Random(); + } + + @Override + protected void performWorkload(BaseSubscriber documentBaseSubscriber, long i) throws InterruptedException { + Flux obs; + if (i % 10 == 0 && i % 100 != 0) { + + String idString = uuid + i; + Document newDoc = new Document(); + newDoc.id(idString); + BridgeInternal.setProperty(newDoc, partitionKey, idString); + BridgeInternal.setProperty(newDoc, "dataField1", dataFieldValue); + BridgeInternal.setProperty(newDoc, "dataField2", dataFieldValue); + BridgeInternal.setProperty(newDoc, "dataField3", dataFieldValue); + BridgeInternal.setProperty(newDoc, "dataField4", dataFieldValue); + BridgeInternal.setProperty(newDoc, "dataField5", dataFieldValue); + obs = client.createDocument(getCollectionLink(), newDoc, null, false).map(ResourceResponse::getResource); + + } else if (i % 100 == 0) { + + FeedOptions options = new FeedOptions(); + options.maxItemCount(10); + options.enableCrossPartitionQuery(true); + + String sqlQuery = "Select top 100 * from c order by c._ts"; + obs = client.queryDocuments(getCollectionLink(), sqlQuery, options) + .map(frp -> frp.results().get(0)); + } else { + + int index = r.nextInt(1000); + + RequestOptions options = new RequestOptions(); + options.setPartitionKey(new PartitionKey(docsToRead.get(index).id())); + + obs = client.readDocument(getDocumentLink(docsToRead.get(index)), options).map(ResourceResponse::getResource); + } + + concurrencyControlSemaphore.acquire(); + + obs.subscribeOn(Schedulers.parallel()).subscribe(documentBaseSubscriber); + } +} diff --git a/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/AsyncQueryBenchmark.java b/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/AsyncQueryBenchmark.java new file mode 100644 index 0000000000000..ee592ad291eeb --- /dev/null +++ b/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/AsyncQueryBenchmark.java @@ -0,0 +1,110 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.benchmark; + +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.PartitionKey; +import reactor.core.publisher.BaseSubscriber; +import reactor.core.publisher.Flux; +import reactor.core.scheduler.Schedulers; + +import java.util.Random; + +class AsyncQueryBenchmark extends AsyncBenchmark> { + + private int pageCount = 0; + + AsyncQueryBenchmark(Configuration cfg) { + super(cfg); + } + + @Override + protected void onSuccess() { + pageCount++; + if (pageCount % 10000 == 0) { + if (pageCount == 0) { + return; + } + logger.info("total pages so far: {}", pageCount); + } + } + + @Override + protected void performWorkload(BaseSubscriber> baseSubscriber, long i) throws InterruptedException { + + Flux> obs; + Random r = new Random(); + FeedOptions options = new FeedOptions(); + + if (configuration.getOperationType() == Configuration.Operation.QueryCross) { + + int index = r.nextInt(1000); + options.enableCrossPartitionQuery(true); + String sqlQuery = "Select * from c where c._rid = \"" + docsToRead.get(index).resourceId() + "\""; + obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); + } else if (configuration.getOperationType() == Configuration.Operation.QuerySingle) { + + int index = r.nextInt(1000); + String pk = docsToRead.get(index).getString("pk"); + options.partitionKey(new PartitionKey(pk)); + String sqlQuery = "Select * from c where c.pk = \"" + pk + "\""; + obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); + } else if (configuration.getOperationType() == Configuration.Operation.QueryParallel) { + + options.maxItemCount(10); + options.enableCrossPartitionQuery(true); + String sqlQuery = "Select * from c"; + obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); + } else if (configuration.getOperationType() == Configuration.Operation.QueryOrderby) { + + options.maxItemCount(10); + options.enableCrossPartitionQuery(true); + String sqlQuery = "Select * from c order by c._ts"; + obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); + } else if (configuration.getOperationType() == Configuration.Operation.QueryAggregate) { + + options.maxItemCount(10); + options.enableCrossPartitionQuery(true); + String sqlQuery = "Select value max(c._ts) from c"; + obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); + } else if (configuration.getOperationType() == Configuration.Operation.QueryAggregateTopOrderby) { + + options.enableCrossPartitionQuery(true); + String sqlQuery = "Select top 1 value count(c) from c order by c._ts"; + obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); + } else if (configuration.getOperationType() == Configuration.Operation.QueryTopOrderby) { + + options.enableCrossPartitionQuery(true); + String sqlQuery = "Select top 1000 * from c order by c._ts"; + obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); + } else { + throw new IllegalArgumentException("Unsupported Operation: " + configuration.getOperationType()); + } + concurrencyControlSemaphore.acquire(); + + obs.subscribeOn(Schedulers.parallel()).subscribe(baseSubscriber); + } +} diff --git a/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/AsyncQuerySinglePartitionMultiple.java b/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/AsyncQuerySinglePartitionMultiple.java new file mode 100644 index 0000000000000..a41280c5afe28 --- /dev/null +++ b/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/AsyncQuerySinglePartitionMultiple.java @@ -0,0 +1,66 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.benchmark; + +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.PartitionKey; +import reactor.core.publisher.BaseSubscriber; +import reactor.core.publisher.Flux; +import reactor.core.scheduler.Schedulers; + +class AsyncQuerySinglePartitionMultiple extends AsyncBenchmark> { + + private static final String SQL_QUERY = "Select * from c where c.pk = \"pk\""; + private FeedOptions options; + private int pageCount = 0; + + AsyncQuerySinglePartitionMultiple(Configuration cfg) { + super(cfg); + options = new FeedOptions(); + options.partitionKey(new PartitionKey("pk")); + options.maxItemCount(10); + } + + @Override + protected void onSuccess() { + pageCount++; + if (pageCount % 10000 == 0) { + if (pageCount == 0) { + return; + } + logger.info("total pages so far: {}", pageCount); + } + } + + @Override + protected void performWorkload(BaseSubscriber> baseSubscriber, long i) throws InterruptedException { + Flux> obs = client.queryDocuments(getCollectionLink(), SQL_QUERY, options); + + concurrencyControlSemaphore.acquire(); + + obs.subscribeOn(Schedulers.parallel()).subscribe(baseSubscriber); + } +} diff --git a/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/AsyncReadBenchmark.java b/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/AsyncReadBenchmark.java new file mode 100644 index 0000000000000..0e8172ec35287 --- /dev/null +++ b/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/AsyncReadBenchmark.java @@ -0,0 +1,91 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.benchmark; + +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.internal.RequestOptions; +import com.azure.data.cosmos.internal.ResourceResponse; +import com.codahale.metrics.Timer; +import org.reactivestreams.Subscription; +import reactor.core.publisher.BaseSubscriber; +import reactor.core.publisher.Flux; +import reactor.core.scheduler.Schedulers; + +class AsyncReadBenchmark extends AsyncBenchmark> { + + class LatencySubscriber extends BaseSubscriber { + + Timer.Context context; + BaseSubscriber> baseSubscriber; + + LatencySubscriber(BaseSubscriber> baseSubscriber) { + this.baseSubscriber = baseSubscriber; + } + + @Override + protected void hookOnSubscribe(Subscription subscription) { + super.hookOnSubscribe(subscription); + } + + @Override + protected void hookOnNext(T value) { + } + + @Override + protected void hookOnComplete() { + context.stop(); + baseSubscriber.onComplete(); + } + + @Override + protected void hookOnError(Throwable throwable) { + context.stop(); + baseSubscriber.onError(throwable); + } + } + + AsyncReadBenchmark(Configuration cfg) { + super(cfg); + } + + @Override + protected void performWorkload(BaseSubscriber> baseSubscriber, long i) throws InterruptedException { + int index = (int) (i % docsToRead.size()); + RequestOptions options = new RequestOptions(); + options.setPartitionKey(new PartitionKey(docsToRead.get(index).id())); + + Flux> obs = client.readDocument(getDocumentLink(docsToRead.get(index)), options); + + concurrencyControlSemaphore.acquire(); + + if (configuration.getOperationType() == Configuration.Operation.ReadThroughput) { + obs.subscribeOn(Schedulers.parallel()).subscribe(baseSubscriber); + } else { + LatencySubscriber> latencySubscriber = new LatencySubscriber<>(baseSubscriber); + latencySubscriber.context = latency.time(); + obs.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber); + } + } +} diff --git a/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/AsyncWriteBenchmark.java b/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/AsyncWriteBenchmark.java new file mode 100644 index 0000000000000..54af2e988b8e2 --- /dev/null +++ b/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/AsyncWriteBenchmark.java @@ -0,0 +1,105 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.benchmark; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.internal.ResourceResponse; +import com.codahale.metrics.Timer; +import org.apache.commons.lang3.RandomStringUtils; +import org.reactivestreams.Subscription; +import reactor.core.publisher.BaseSubscriber; +import reactor.core.publisher.Flux; +import reactor.core.scheduler.Schedulers; + +import java.util.UUID; + +class AsyncWriteBenchmark extends AsyncBenchmark> { + + private final String uuid; + private final String dataFieldValue; + + class LatencySubscriber extends BaseSubscriber { + + Timer.Context context; + BaseSubscriber> baseSubscriber; + + LatencySubscriber(BaseSubscriber> baseSubscriber) { + this.baseSubscriber = baseSubscriber; + } + + @Override + protected void hookOnSubscribe(Subscription subscription) { + super.hookOnSubscribe(subscription); + } + + @Override + protected void hookOnNext(T value) { + } + + @Override + protected void hookOnComplete() { + context.stop(); + baseSubscriber.onComplete(); + } + + @Override + protected void hookOnError(Throwable throwable) { + context.stop(); + baseSubscriber.onError(throwable); + } + } + + AsyncWriteBenchmark(Configuration cfg) { + super(cfg); + uuid = UUID.randomUUID().toString(); + dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); + } + + @Override + protected void performWorkload(BaseSubscriber> baseSubscriber, long i) throws InterruptedException { + + String idString = uuid + i; + Document newDoc = new Document(); + newDoc.id(idString); + BridgeInternal.setProperty(newDoc, partitionKey, idString); + BridgeInternal.setProperty(newDoc, "dataField1", dataFieldValue); + BridgeInternal.setProperty(newDoc, "dataField2", dataFieldValue); + BridgeInternal.setProperty(newDoc, "dataField3", dataFieldValue); + BridgeInternal.setProperty(newDoc, "dataField4", dataFieldValue); + BridgeInternal.setProperty(newDoc, "dataField5", dataFieldValue); + Flux> obs = client.createDocument(getCollectionLink(), newDoc, null, + false); + + concurrencyControlSemaphore.acquire(); + + if (configuration.getOperationType() == Configuration.Operation.WriteThroughput) { + obs.subscribeOn(Schedulers.parallel()).subscribe(baseSubscriber); + } else { + LatencySubscriber> latencySubscriber = new LatencySubscriber<>(baseSubscriber); + latencySubscriber.context = latency.time(); + obs.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber); + } + } +} diff --git a/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/Configuration.java b/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/Configuration.java new file mode 100644 index 0000000000000..0a54017429e28 --- /dev/null +++ b/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/Configuration.java @@ -0,0 +1,329 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.benchmark; + +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.benchmark.Configuration.Operation.OperationTypeConverter; +import com.beust.jcommander.IStringConverter; +import com.beust.jcommander.Parameter; +import com.beust.jcommander.ParameterException; +import com.google.common.base.Strings; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; + +import java.time.Duration; +import java.util.Arrays; + +class Configuration { + private final static int GRAPHITE_SERVER_DEFAULT_PORT = 2003; + + @Parameter(names = "-serviceEndpoint", description = "Service Endpoint") + private String serviceEndpoint; + + @Parameter(names = "-masterKey", description = "Master Key") + private String masterKey; + + @Parameter(names = "-databaseId", description = "Database ID") + private String databaseId; + + @Parameter(names = "-collectionId", description = "Collection ID") + private String collectionId; + + @Parameter(names = "-useNameLink", description = "Use name Link") + private boolean useNameLink = false; + + @Parameter(names = "-documentDataFieldSize", description = "Length of a document data field in characters (16-bit)") + private int documentDataFieldSize = 20; + + @Parameter(names = "-maxConnectionPoolSize", description = "Max Connection Pool Size") + private Integer maxConnectionPoolSize = 1000; + + @Parameter(names = "-consistencyLevel", description = "Consistency Level", converter = ConsistencyLevelConverter.class) + private ConsistencyLevel consistencyLevel = ConsistencyLevel.SESSION; + + @Parameter(names = "-connectionMode", description = "Connection Mode") + private ConnectionMode connectionMode = ConnectionMode.DIRECT; + + @Parameter(names = "-graphiteEndpoint", description = "Graphite endpoint") + private String graphiteEndpoint; + + @Parameter(names = "-enableJvmStats", description = "Enables JVM Stats") + private boolean enableJvmStats; + + @Parameter(names = "-operation", description = "Type of Workload:\n" + + "\tReadThroughput- run a READ workload that prints only throughput *\n" + + "\tWriteThroughput - run a Write workload that prints only throughput\n" + + "\tReadLatency - run a READ workload that prints both throughput and latency *\n" + + "\tWriteLatency - run a Write workload that prints both throughput and latency\n" + + "\tQueryCross - run a 'Select * from c where c._rid = SOME_RID' workload that prints throughput\n" + + "\tQuerySingle - run a 'Select * from c where c.pk = SOME_PK' workload that prints throughput\n" + + "\tQuerySingleMany - run a 'Select * from c where c.pk = \"pk\"' workload that prints throughput\n" + + "\tQueryParallel - run a 'Select * from c' workload that prints throughput\n" + + "\tQueryOrderby - run a 'Select * from c order by c._ts' workload that prints throughput\n" + + "\tQueryAggregate - run a 'Select value max(c._ts) from c' workload that prints throughput\n" + + "\tQueryAggregateTopOrderby - run a 'Select top 1 value count(c) from c order by c._ts' workload that prints throughput\n" + + "\tQueryTopOrderby - run a 'Select top 1000 * from c order by c._ts' workload that prints throughput\n" + + "\tMixed - runa workload of 90 reads, 9 writes and 1 QueryTopOrderby per 100 operations *\n" + + "\tReadMyWrites - run a workflow of writes followed by reads and queries attempting to read the write.*\n" + + "\n\t* writes 10k documents initially, which are used in the reads", converter = OperationTypeConverter.class) + private Operation operation = Operation.WriteThroughput; + + @Parameter(names = "-concurrency", description = "Degree of Concurrency in Inserting Documents." + + " If this value is not specified, the max connection pool size will be used as the concurrency level.") + private Integer concurrency; + + @Parameter(names = "-numberOfOperations", description = "Total NUMBER Of Documents To Insert") + private int numberOfOperations = 100000; + + static class DurationConverter implements IStringConverter { + @Override + public Duration convert(String value) { + if (value == null) { + return null; + } + + return Duration.parse(value); + } + } + + @Parameter(names = "-maxRunningTimeDuration", description = "Max Running Time Duration", converter = DurationConverter.class) + private Duration maxRunningTimeDuration; + + @Parameter(names = "-printingInterval", description = "Interval of time after which Metrics should be printed (seconds)") + private int printingInterval = 10; + + @Parameter(names = "-numberOfPreCreatedDocuments", description = "Total NUMBER Of Documents To pre create for a read workload to use") + private int numberOfPreCreatedDocuments = 1000; + + @Parameter(names = {"-h", "-help", "--help"}, description = "Help", help = true) + private boolean help = false; + + enum Operation { + ReadThroughput, + WriteThroughput, + ReadLatency, + WriteLatency, + QueryCross, + QuerySingle, + QuerySingleMany, + QueryParallel, + QueryOrderby, + QueryAggregate, + QueryAggregateTopOrderby, + QueryTopOrderby, + Mixed, + ReadMyWrites; + + static Operation fromString(String code) { + + for (Operation output : Operation.values()) { + if (output.toString().equalsIgnoreCase(code)) { + return output; + } + } + + return null; + } + + static class OperationTypeConverter implements IStringConverter { + + /* + * (non-Javadoc) + * + * @see com.beust.jcommander.IStringConverter#convert(java.lang.STRING) + */ + @Override + public Operation convert(String value) { + Operation ret = fromString(value); + if (ret == null) { + throw new ParameterException("Value " + value + " can not be converted to ClientType. " + + "Available values are: " + Arrays.toString(Operation.values())); + } + return ret; + } + } + } + + private static ConsistencyLevel fromString(String code) { + for (ConsistencyLevel output : ConsistencyLevel.values()) { + if (output.toString().equalsIgnoreCase(code)) { + return output; + } + } + return null; + } + + static class ConsistencyLevelConverter implements IStringConverter { + + /* + * (non-Javadoc) + * + * @see com.beust.jcommander.IStringConverter#convert(java.lang.STRING) + */ + @Override + public ConsistencyLevel convert(String value) { + ConsistencyLevel ret = fromString(value); + if (ret == null) { + throw new ParameterException("Value " + value + " can not be converted to ClientType. " + + "Available values are: " + Arrays.toString(Operation.values())); + } + return ret; + } + } + + Duration getMaxRunningTimeDuration() { + return maxRunningTimeDuration; + } + + Operation getOperationType() { + return operation; + } + + int getNumberOfOperations() { + return numberOfOperations; + } + + String getServiceEndpoint() { + return serviceEndpoint; + } + + String getMasterKey() { + return masterKey; + } + + boolean isHelp() { + return help; + } + + int getDocumentDataFieldSize() { + return documentDataFieldSize; + } + + ConnectionPolicy getConnectionPolicy() { + ConnectionPolicy policy = new ConnectionPolicy(); + policy.connectionMode(connectionMode); + policy.maxPoolSize(maxConnectionPoolSize); + return policy; + } + + ConsistencyLevel getConsistencyLevel() { + return consistencyLevel; + } + + String getDatabaseId() { + return databaseId; + } + + String getCollectionId() { + return collectionId; + } + + int getNumberOfPreCreatedDocuments() { + return numberOfPreCreatedDocuments; + } + + int getPrintingInterval() { + return printingInterval; + } + + int getConcurrency() { + if (this.concurrency != null) { + return concurrency; + } else { + return this.maxConnectionPoolSize; + } + } + + boolean isUseNameLink() { + return useNameLink; + } + + public boolean isEnableJvmStats() { + return enableJvmStats; + } + + public String getGraphiteEndpoint() { + if (graphiteEndpoint == null) { + return null; + } + + return StringUtils.substringBeforeLast(graphiteEndpoint, ":"); + } + + public int getGraphiteEndpointPort() { + if (graphiteEndpoint == null) { + return -1; + } + + String portAsString = Strings.emptyToNull(StringUtils.substringAfterLast(graphiteEndpoint, ":")); + if (portAsString == null) { + return GRAPHITE_SERVER_DEFAULT_PORT; + } else { + return Integer.parseInt(portAsString); + } + } + + public String toString() { + return ToStringBuilder.reflectionToString(this, ToStringStyle.MULTI_LINE_STYLE); + } + + void tryGetValuesFromSystem() { + serviceEndpoint = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("SERVICE_END_POINT")), + serviceEndpoint); + + masterKey = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("MASTER_KEY")), masterKey); + + databaseId = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("DATABASE_ID")), databaseId); + + collectionId = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("COLLECTION_ID")), + collectionId); + + documentDataFieldSize = Integer.parseInt( + StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("DOCUMENT_DATA_FIELD_SIZE")), + Integer.toString(documentDataFieldSize))); + + maxConnectionPoolSize = Integer.parseInt( + StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("MAX_CONNECTION_POOL_SIZE")), + Integer.toString(maxConnectionPoolSize))); + + ConsistencyLevelConverter consistencyLevelConverter = new ConsistencyLevelConverter(); + consistencyLevel = consistencyLevelConverter.convert(StringUtils + .defaultString(Strings.emptyToNull(System.getenv().get("CONSISTENCY_LEVEL")), consistencyLevel.name())); + + OperationTypeConverter operationTypeConverter = new OperationTypeConverter(); + operation = operationTypeConverter.convert( + StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("OPERATION")), operation.name())); + + String concurrencyValue = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("CONCURRENCY")), + concurrency == null ? null : Integer.toString(concurrency)); + concurrency = concurrencyValue == null ? null : Integer.parseInt(concurrencyValue); + + String numberOfOperationsValue = StringUtils.defaultString( + Strings.emptyToNull(System.getenv().get("NUMBER_OF_OPERATIONS")), Integer.toString(numberOfOperations)); + numberOfOperations = Integer.parseInt(numberOfOperationsValue); + } +} diff --git a/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/DocDBUtils.java b/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/DocDBUtils.java new file mode 100644 index 0000000000000..534603bd67031 --- /dev/null +++ b/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/DocDBUtils.java @@ -0,0 +1,65 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.benchmark; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.SqlParameter; +import com.azure.data.cosmos.SqlParameterList; +import com.azure.data.cosmos.SqlQuerySpec; + +class DocDBUtils { + + private DocDBUtils() { + } + + static Database getDatabase(AsyncDocumentClient client, String databaseId) { + FeedResponse feedResponsePages = client + .queryDatabases(new SqlQuerySpec("SELECT * FROM root r WHERE r.id=@id", + new SqlParameterList(new SqlParameter("@id", databaseId))), null) + .single().block(); + + if (feedResponsePages.results().isEmpty()) { + throw new RuntimeException("cannot find datatbase " + databaseId); + } + return feedResponsePages.results().get(0); + } + + static DocumentCollection getCollection(AsyncDocumentClient client, String databaseLink, + String collectionId) { + FeedResponse feedResponsePages = client + .queryCollections(databaseLink, + new SqlQuerySpec("SELECT * FROM root r WHERE r.id=@id", + new SqlParameterList(new SqlParameter("@id", collectionId))), + null) + .single().block(); + + if (feedResponsePages.results().isEmpty()) { + throw new RuntimeException("cannot find collection " + collectionId); + } + return feedResponsePages.results().get(0); + } +} diff --git a/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/Main.java b/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/Main.java new file mode 100644 index 0000000000000..edd3969c89f6e --- /dev/null +++ b/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/Main.java @@ -0,0 +1,98 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.benchmark; + +import com.beust.jcommander.JCommander; +import com.beust.jcommander.ParameterException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class Main { + + private final static Logger LOGGER = LoggerFactory.getLogger(Main.class); + + public static void main(String[] args) throws Exception { + org.apache.log4j.Logger.getLogger("io.netty").setLevel(org.apache.log4j.Level.OFF); + + try { + LOGGER.debug("Parsing the arguments ..."); + Configuration cfg = new Configuration(); + cfg.tryGetValuesFromSystem(); + + JCommander jcommander = new JCommander(cfg, args); + if (cfg.isHelp()) { + // prints out the usage help + jcommander.usage(); + return; + } + + AsyncBenchmark benchmark; + switch (cfg.getOperationType()) { + case WriteThroughput: + case WriteLatency: + benchmark = new AsyncWriteBenchmark(cfg); + break; + + case ReadThroughput: + case ReadLatency: + benchmark = new AsyncReadBenchmark(cfg); + break; + + case QueryCross: + case QuerySingle: + case QueryParallel: + case QueryOrderby: + case QueryAggregate: + case QueryTopOrderby: + case QueryAggregateTopOrderby: + benchmark = new AsyncQueryBenchmark(cfg); + break; + + case Mixed: + benchmark = new AsyncMixedBenchmark(cfg); + break; + + case QuerySingleMany: + benchmark = new AsyncQuerySinglePartitionMultiple(cfg); + break; + + case ReadMyWrites: + benchmark = new ReadMyWriteWorkflow(cfg); + break; + + default: + throw new RuntimeException(cfg.getOperationType() + " is not supported"); + } + + benchmark.run(); + benchmark.shutdown(); + + } catch (ParameterException e) { + // if any error in parsing the cmd-line options print out the usage help + System.err.println("INVALID Usage: " + e.getMessage()); + System.err.println("Try '-help' for more information."); + throw e; + } + } +} diff --git a/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/ReadMyWriteWorkflow.java b/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/ReadMyWriteWorkflow.java new file mode 100644 index 0000000000000..233f177e3bb66 --- /dev/null +++ b/sdk/cosmos/benchmark/src/main/java/com/azure/data/cosmos/benchmark/ReadMyWriteWorkflow.java @@ -0,0 +1,419 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.benchmark; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.NotFoundException; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.SqlParameter; +import com.azure.data.cosmos.SqlParameterList; +import com.azure.data.cosmos.SqlQuerySpec; +import com.azure.data.cosmos.internal.RequestOptions; +import com.azure.data.cosmos.internal.ResourceResponse; +import com.azure.data.cosmos.internal.Utils; +import org.apache.commons.lang3.RandomUtils; +import reactor.core.publisher.BaseSubscriber; +import reactor.core.publisher.Flux; +import reactor.core.scheduler.Schedulers; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; + +/** + * This workflow is intended for session and above consistency levels. + *

+ * This workflow first will create some documents in cosmosdb and will store them all in its local cache. + * Then at each step will randomly will try to do a write, read its own write, or query for its own write. + */ +class ReadMyWriteWorkflow extends AsyncBenchmark { + private final static String QUERY_FIELD_NAME = "prop"; + private final static String ORDER_BY_FIELD_NAME = "_ts"; + private final static int MAX_TOP_QUERY_COUNT = 2000; + + private ConcurrentHashMap cache; + private int cacheSize; + + ReadMyWriteWorkflow(Configuration cfg) { + super(cfg); + } + + @Override + protected void init() { + this.cacheSize = configuration.getNumberOfPreCreatedDocuments(); + this.cache = new ConcurrentHashMap<>(); + this.populateCache(); + } + + @Override + protected void performWorkload(BaseSubscriber baseSubscriber, long i) throws Exception { + Flux obs; + boolean readyMyWrite = RandomUtils.nextBoolean(); + if (readyMyWrite) { + // will do a write and immediately upon success will either + // do a point read + // or single partition query + // or cross partition query to find the write. + int j = Math.toIntExact(Math.floorMod(i, 3)); + switch (j) { + case 0: + // write a random document to cosmodb and update the cache. + // then try to read the document which just was written + obs = writeDocument() + .flatMap(this::readDocument); + break; + case 1: + // write a random document to cosmodb and update the cache. + // then try to query for the document which just was written + obs = writeDocument() + .flatMap(d -> singlePartitionQuery(d) + .switchIfEmpty(Flux.error(new NotFoundException( + "couldn't find my write in a single partition query!")))); + break; + case 2: + // write a random document to cosmodb and update the cache. + // then try to query for the document which just was written + obs = writeDocument() + .flatMap(d -> xPartitionQuery(generateQuery(d)) + .switchIfEmpty(Flux.error(new NotFoundException( + "couldn't find my write in a cross partition query!")))); + break; + default: + assert false; + throw new IllegalStateException(); + } + } else { + // will either do + // a write + // a point read for a in memory cached document + // or single partition query for a in memory cached document + // or cross partition query for a in memory cached document + int j = Math.toIntExact(Math.floorMod(i, 4)); + switch (j) { + case 0: + // write a random document to cosmosdb and update the cache + obs = writeDocument(); + break; + case 1: + // randomly choose a document from the cache and do a single point read + obs = readDocument(cache.get(cacheKey())); + break; + case 2: + // randomly choose a document from the cache and do a single partition query + obs = singlePartitionQuery(cache.get(cacheKey())) + .switchIfEmpty(Flux.error(new NotFoundException( + "couldn't find my cached write in a single partition query!"))); + break; + case 3: + // randomly choose a document from the cache and do a cross partition query + obs = xPartitionQuery(generateRandomQuery()) + .switchIfEmpty(Flux.error(new NotFoundException( + "couldn't find my cached write in a cross partition query!"))); + break; + default: + assert false; + throw new IllegalStateException(); + } + } + + concurrencyControlSemaphore.acquire(); + + obs.subscribeOn(Schedulers.parallel()).subscribe(baseSubscriber); + } + + private void populateCache() { + ArrayList> list = new ArrayList<>(); + for (int i = 0; i < cacheSize; i++) { + Flux observable = writeDocument(i); + list.add(observable); + } + + logger.info("PRE-populating {} documents ....", cacheSize); + Flux.merge(Flux.fromIterable(list), configuration.getConcurrency()).then().block(); + logger.info("Finished pre-populating {} documents", cacheSize); + } + + /** + * Writes a random document to cosmosdb and store it in a random location in the cache. + * + * @return Observable of document + */ + private Flux writeDocument() { + return writeDocument(null); + } + + /** + * Writes a random document to cosmosdb and store it in the slot i-th in the cache. + * + * @return Observable of document + */ + private Flux writeDocument(Integer i) { + String idString = Utils.randomUUID().toString(); + String randomVal = Utils.randomUUID().toString(); + Document document = new Document(); + document.id(idString); + BridgeInternal.setProperty(document, partitionKey, idString); + BridgeInternal.setProperty(document, QUERY_FIELD_NAME, randomVal); + BridgeInternal.setProperty(document, "dataField1", randomVal); + BridgeInternal.setProperty(document, "dataField2", randomVal); + BridgeInternal.setProperty(document, "dataField3", randomVal); + BridgeInternal.setProperty(document, "dataField4", randomVal); + + Integer key = i == null ? cacheKey() : i; + return client.createDocument(getCollectionLink(), document, null, false) + .doOnNext(r -> cache.put(key, r.getResource())) + .map(ResourceResponse::getResource); + } + + /** + * given a document tries to read it from cosmosdb + * + * @param d document to be read + * @return Observable of document + */ + private Flux readDocument(Document d) { + RequestOptions options = new RequestOptions(); + options.setPartitionKey(new PartitionKey(d.getString(partitionKey))); + + return client.readDocument(getDocumentLink(d), options) + .map(ResourceResponse::getResource); + } + + /** + * Generates a random query + * + * @return a randomly generated query + */ + private SqlQuerySpec generateRandomQuery() { + int docCount = RandomUtils.nextInt(1, 2); + Set keys = new HashSet<>(); + for (int i = 0; i < docCount; i++) { + int key = RandomUtils.nextInt(0, cacheSize); + keys.add(key); + } + List documentList = null; + if (RandomUtils.nextBoolean()) { + documentList = keys.stream().map(cache::get).collect(Collectors.toList()); + } + + int top = RandomUtils.nextInt(0, MAX_TOP_QUERY_COUNT); + boolean useOrderBy = RandomUtils.nextBoolean(); + + return generateQuery(documentList, top > 1000 ? top : null, useOrderBy); + } + + /** + * given a query returns the corresponding observable result + * + * @param query to find document + * @return Observable document + */ + private Flux xPartitionQuery(SqlQuerySpec query) { + FeedOptions options = new FeedOptions(); + options.maxDegreeOfParallelism(-1); + options.enableCrossPartitionQuery(true); + + return client.queryDocuments(getCollectionLink(), query, options) + .flatMap(p -> Flux.fromIterable(p.results())); + } + + /** + * given a document returns the corresponding observable result of issuing a single partition query + * for the document. + * + * @param d document to be queried for. + * @return Observable document + */ + private Flux singlePartitionQuery(Document d) { + FeedOptions options = new FeedOptions(); + options.partitionKey(new PartitionKey(d.get(partitionKey))); + + SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(String.format("Select top 100 * from c where c.%s = '%s'", + QUERY_FIELD_NAME, + d.getString(QUERY_FIELD_NAME))); + return client.queryDocuments(getCollectionLink(), sqlQuerySpec, options) + .flatMap(p -> Flux.fromIterable(p.results())); + } + + /** + * Given a document list generates a randomly generated sql query which can find only and only the documents + *

+ * The generated query may have a top, orderby, top and orderby. + * + * @param documentList list of documents to be queried for + * @return SqlQuerySpec + */ + private SqlQuerySpec generateQuery(Document... documentList) { + return generateQuery(Arrays.asList(documentList)); + } + + /** + * Given a document list generates a randomly generated sql query which can find only and only the documents + *

+ * The generated query may have a top, orderby, top and orderby. + * + * @param documentList list of documents to be queried for + * @return SqlQuerySpec + */ + private SqlQuerySpec generateQuery(List documentList) { + int top = RandomUtils.nextInt(0, MAX_TOP_QUERY_COUNT); + boolean useOrderBy = RandomUtils.nextBoolean(); + + return generateQuery(documentList, top >= documentList.size() ? top : null, useOrderBy); + } + + /** + * Given a document list generates sql query which can find only and only the documents + * + * @param documentList lists of documents to find + * @param topCount if a valid top count, the query will have a top count + * @param withOrderBy if not null, the query will have an orderby clause + * @return SqlQuerySpec + */ + private SqlQuerySpec generateQuery(List documentList, Integer topCount, boolean withOrderBy) { + QueryBuilder queryBuilder = new QueryBuilder(); + if (withOrderBy) { + queryBuilder.orderBy(ORDER_BY_FIELD_NAME); + } + if (documentList != null && !documentList.isEmpty()) { + if (topCount != null) { + topCount = Math.max(topCount, documentList.size()); + } + + queryBuilder.whereClause(QueryBuilder.WhereClause.InWhereClause.asInWhereClause(QUERY_FIELD_NAME, documentList)); + } + + if ((documentList == null || documentList.isEmpty()) && (topCount == null || topCount <= 0)) { + topCount = 100; + } + + if (topCount != null) { + queryBuilder.top(topCount); + } + + return queryBuilder.toSqlQuerySpec(); + } + + private int cacheKey() { + return RandomUtils.nextInt(0, cacheSize); + } + + /** + * This is used for making random query generation with different terms (top, orderby) easier. + */ + static class QueryBuilder { + private String orderByFieldName; + private Integer topCount; + private WhereClause whereClause; + + QueryBuilder top(int top) { + this.topCount = top; + return this; + } + + QueryBuilder orderBy(String fieldName) { + this.orderByFieldName = fieldName; + return this; + } + + QueryBuilder whereClause(WhereClause whereClause) { + this.whereClause = whereClause; + return this; + } + + static abstract class WhereClause { + static class InWhereClause extends WhereClause { + private final List parameters; + private final String whereCondition; + + static InWhereClause asInWhereClause(String fieldName, List documentList) { + List parameters = new ArrayList<>(documentList.size()); + for (int i = 0; i < documentList.size(); i++) { + Object value = documentList.get(i).get(fieldName); + SqlParameter sqlParameter = new SqlParameter("@param" + i, value); + parameters.add(sqlParameter); + } + + return new InWhereClause(fieldName, parameters); + } + + InWhereClause(String fieldName, List parameters) { + StringBuilder stringBuilder = new StringBuilder(); + stringBuilder.append(fieldName); + stringBuilder.append(" IN ("); + List params = parameters.stream().map(SqlParameter::name).collect(Collectors.toList()); + stringBuilder.append(String.join(", ", params)); + stringBuilder.append(")"); + + this.whereCondition = stringBuilder.toString(); + this.parameters = parameters; + } + + @Override + String getWhereCondition(String rootName) { + return rootName + "." + this.whereCondition; + } + + @Override + SqlParameterList getSqlParameterCollection() { + return new SqlParameterList(this.parameters); + } + } + + abstract String getWhereCondition(String rootName); + + abstract SqlParameterList getSqlParameterCollection(); + } + + SqlQuerySpec toSqlQuerySpec() { + StringBuilder stringBuilder = new StringBuilder(); + stringBuilder.append("SELECT"); + + if (topCount != null) { + stringBuilder.append(" TOP ").append(topCount); + } + + stringBuilder.append(" * FROM root"); + if (whereClause != null) { + stringBuilder.append(" WHERE "); + stringBuilder.append(whereClause.getWhereCondition("root")); + + } + + if (orderByFieldName != null) { + stringBuilder.append(" ORDER BY ").append("root.").append(orderByFieldName); + } + + return whereClause == null ? + new SqlQuerySpec(stringBuilder.toString()) : + new SqlQuerySpec(stringBuilder.toString(), whereClause.getSqlParameterCollection()); + } + } +} diff --git a/sdk/cosmos/benchmark/src/main/resources/log4j.properties b/sdk/cosmos/benchmark/src/main/resources/log4j.properties new file mode 100644 index 0000000000000..7a31b9cb1817a --- /dev/null +++ b/sdk/cosmos/benchmark/src/main/resources/log4j.properties @@ -0,0 +1,14 @@ +# this is the log4j configuration for tests + +# Set root logger level to DEBUG and its only appender to A1. +log4j.rootLogger=INFO, A1 + +log4j.category.com.azure.data.cosmos.internal.directconnectivity.rntbd=WARN +log4j.category.io.netty=INFO +log4j.category.io.reactivex=INFO +# A1 is set to be a ConsoleAppender. +log4j.appender.A1=org.apache.log4j.ConsoleAppender + +# A1 uses PatternLayout. +log4j.appender.A1.layout=org.apache.log4j.PatternLayout +log4j.appender.A1.layout.ConversionPattern=%d %5X{pid} [%t] %-5p %c - %m%n \ No newline at end of file diff --git a/sdk/cosmos/benchmark/src/test/java/com/azure/data/cosmos/benchmark/QueryBuilderTest.java b/sdk/cosmos/benchmark/src/test/java/com/azure/data/cosmos/benchmark/QueryBuilderTest.java new file mode 100644 index 0000000000000..6e0580ddcc018 --- /dev/null +++ b/sdk/cosmos/benchmark/src/test/java/com/azure/data/cosmos/benchmark/QueryBuilderTest.java @@ -0,0 +1,84 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.benchmark; + +import com.azure.data.cosmos.SqlParameter; +import com.google.common.collect.ImmutableList; +import org.testng.annotations.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class QueryBuilderTest { + + @Test(groups = {"unit"}) + public void basic() { + ReadMyWriteWorkflow.QueryBuilder queryBuilder = new ReadMyWriteWorkflow.QueryBuilder(); + assertThat(queryBuilder.toSqlQuerySpec().queryText()) + .isEqualTo("SELECT * FROM root"); + } + + @Test(groups = {"unit"}) + public void top() { + ReadMyWriteWorkflow.QueryBuilder queryBuilder = new ReadMyWriteWorkflow.QueryBuilder(); + queryBuilder.top(50); + assertThat(queryBuilder.toSqlQuerySpec().queryText()) + .isEqualTo("SELECT TOP 50 * FROM root"); + } + + @Test(groups = {"unit"}) + public void orderBy() { + ReadMyWriteWorkflow.QueryBuilder queryBuilder = new ReadMyWriteWorkflow.QueryBuilder(); + queryBuilder.orderBy("prop"); + assertThat(queryBuilder.toSqlQuerySpec().queryText()) + .isEqualTo("SELECT * FROM root ORDER BY root.prop"); + } + + @Test(groups = {"unit"}) + public void whereInClause() { + ReadMyWriteWorkflow.QueryBuilder queryBuilder = new ReadMyWriteWorkflow.QueryBuilder(); + + ImmutableList parameters = ImmutableList.of(new SqlParameter("@param1", 1), + new SqlParameter("@param2", 2)); + queryBuilder.whereClause(new ReadMyWriteWorkflow.QueryBuilder.WhereClause.InWhereClause("colName", + parameters)); + assertThat(queryBuilder.toSqlQuerySpec().queryText()) + .isEqualTo("SELECT * FROM root WHERE root.colName IN (@param1, @param2)"); + assertThat(queryBuilder.toSqlQuerySpec().parameters()).containsExactlyElementsOf(parameters); + } + + @Test(groups = {"unit"}) + public void topOrderByWhereClause() { + ReadMyWriteWorkflow.QueryBuilder queryBuilder = new ReadMyWriteWorkflow.QueryBuilder(); + queryBuilder.orderBy("prop"); + queryBuilder.top(5); + + ImmutableList parameters = ImmutableList.of(new SqlParameter("@param1", 1), + new SqlParameter("@param2", 2)); + queryBuilder.whereClause(new ReadMyWriteWorkflow.QueryBuilder.WhereClause.InWhereClause("colName", + parameters)); + assertThat(queryBuilder.toSqlQuerySpec().queryText()) + .isEqualTo("SELECT TOP 5 * FROM root WHERE root.colName IN (@param1, @param2) ORDER BY root.prop"); + assertThat(queryBuilder.toSqlQuerySpec().parameters()).containsExactlyElementsOf(parameters); + } +} diff --git a/sdk/cosmos/benchmark/src/test/java/com/azure/data/cosmos/benchmark/ReadMyWritesConsistencyTest.java b/sdk/cosmos/benchmark/src/test/java/com/azure/data/cosmos/benchmark/ReadMyWritesConsistencyTest.java new file mode 100644 index 0000000000000..fc7086a6e82da --- /dev/null +++ b/sdk/cosmos/benchmark/src/test/java/com/azure/data/cosmos/benchmark/ReadMyWritesConsistencyTest.java @@ -0,0 +1,226 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.benchmark; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.DataType; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.IncludedPath; +import com.azure.data.cosmos.Index; +import com.azure.data.cosmos.IndexingPolicy; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.internal.RequestOptions; +import com.azure.data.cosmos.internal.TestConfigurations; +import com.beust.jcommander.JCommander; +import com.google.common.base.CaseFormat; +import com.google.common.base.Strings; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; +import reactor.core.scheduler.Schedulers; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ReadMyWritesConsistencyTest { + private final static Logger logger = LoggerFactory.getLogger(ReadMyWritesConsistencyTest.class); + private final int initialCollectionThroughput = 10_000; + private final int newCollectionThroughput = 100_000; + private final int delayForInitiationCollectionScaleUpInSeconds = 60; + private final Duration defaultMaxRunningTimeInSeconds = Duration.ofMinutes(45); + + private final String maxRunningTime = + System.getProperty("MAX_RUNNING_TIME", StringUtils.defaultString(Strings.emptyToNull( + System.getenv().get("MAX_RUNNING_TIME")), defaultMaxRunningTimeInSeconds.toString())); + + private final AtomicBoolean collectionScaleUpFailed = new AtomicBoolean(false); + private final String desiredConsistency = + System.getProperty("DESIRED_CONSISTENCY", + StringUtils.defaultString(Strings.emptyToNull( + System.getenv().get("DESIRED_CONSISTENCY")), "Session")); + + private final String numberOfOperationsAsString = + System.getProperty("NUMBER_OF_OPERATIONS", + StringUtils.defaultString(Strings.emptyToNull( + System.getenv().get("NUMBER_OF_OPERATIONS")), "-1")); + + private Database database; + private DocumentCollection collection; + + @Test(dataProvider = "collectionLinkTypeArgProvider", groups = "e2e") + public void readMyWrites(boolean useNameLink) throws Exception { + int concurrency = 5; + String cmdFormat = "-serviceEndpoint %s -masterKey %s" + + " -databaseId %s -collectionId %s" + + " -consistencyLevel %s -concurrency %d" + + " -numberOfOperations %s" + + " -maxRunningTimeDuration %s" + + " -operation ReadMyWrites -connectionMode DIRECT -numberOfPreCreatedDocuments 100 " + + " -printingInterval 60"; + + String cmd = String.format(cmdFormat, + TestConfigurations.HOST, + TestConfigurations.MASTER_KEY, + database.id(), + collection.id(), + CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, desiredConsistency), + concurrency, + numberOfOperationsAsString, + maxRunningTime) + + (useNameLink ? " -useNameLink" : ""); + + Configuration cfg = new Configuration(); + new JCommander(cfg, StringUtils.split(cmd)); + + AtomicInteger success = new AtomicInteger(); + AtomicInteger error = new AtomicInteger(); + + ReadMyWriteWorkflow wf = new ReadMyWriteWorkflow(cfg) { + @Override + protected void onError(Throwable throwable) { + error.incrementAndGet(); + } + + @Override + protected void onSuccess() { + success.incrementAndGet(); + } + }; + + // schedules a collection scale up after a delay + scheduleScaleUp(delayForInitiationCollectionScaleUpInSeconds, newCollectionThroughput); + + wf.run(); + wf.shutdown(); + + int numberOfOperations = Integer.parseInt(numberOfOperationsAsString); + + assertThat(error).hasValue(0); + assertThat(collectionScaleUpFailed).isFalse(); + + if (numberOfOperations > 0) { + assertThat(success).hasValue(numberOfOperations); + } + } + + @BeforeClass(groups = "e2e") + public void beforeClass() { + RequestOptions options = new RequestOptions(); + options.setOfferThroughput(initialCollectionThroughput); + AsyncDocumentClient housekeepingClient = Utils.housekeepingClient(); + database = Utils.createDatabaseForTest(housekeepingClient); + collection = housekeepingClient.createCollection("dbs/" + database.id(), + getCollectionDefinitionWithRangeRangeIndex(), + options) + .single().block().getResource(); + housekeepingClient.close(); + } + + @DataProvider(name = "collectionLinkTypeArgProvider") + public Object[][] collectionLinkTypeArgProvider() { + return new Object[][]{ + // is namebased + {true}, + }; + } + + @AfterClass(groups = "e2e") + public void afterClass() { + AsyncDocumentClient housekeepingClient = Utils.housekeepingClient(); + Utils.safeCleanDatabases(housekeepingClient); + Utils.safeClean(housekeepingClient, database); + Utils.safeClose(housekeepingClient); + } + + DocumentCollection getCollectionDefinitionWithRangeRangeIndex() { + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList<>(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + IndexingPolicy indexingPolicy = new IndexingPolicy(); + List includedPaths = new ArrayList<>(); + IncludedPath includedPath = new IncludedPath(); + includedPath.path("/*"); + Collection indexes = new ArrayList<>(); + Index stringIndex = Index.Range(DataType.STRING); + BridgeInternal.setProperty(stringIndex, "precision", -1); + indexes.add(stringIndex); + + Index numberIndex = Index.Range(DataType.NUMBER); + BridgeInternal.setProperty(numberIndex, "precision", -1); + indexes.add(numberIndex); + includedPath.indexes(indexes); + includedPaths.add(includedPath); + indexingPolicy.setIncludedPaths(includedPaths); + + DocumentCollection collectionDefinition = new DocumentCollection(); + collectionDefinition.setIndexingPolicy(indexingPolicy); + collectionDefinition.id(UUID.randomUUID().toString()); + collectionDefinition.setPartitionKey(partitionKeyDef); + + return collectionDefinition; + } + + private void scheduleScaleUp(int delayStartInSeconds, int newThroughput) { + AsyncDocumentClient housekeepingClient = Utils.housekeepingClient(); + Flux.just(0L).delayElements(Duration.ofSeconds(delayStartInSeconds), Schedulers.newSingle("ScaleUpThread")).flatMap(aVoid -> { + + // increase throughput to max for a single partition collection to avoid throttling + // for bulk insert and later queries. + return housekeepingClient.queryOffers( + String.format("SELECT * FROM r WHERE r.offerResourceId = '%s'", + collection.resourceId()) + , null).flatMap(page -> Flux.fromIterable(page.results())) + .take(1).flatMap(offer -> { + logger.info("going to scale up collection, newThroughput {}", newThroughput); + offer.setThroughput(newThroughput); + return housekeepingClient.replaceOffer(offer); + }); + }).doOnTerminate(housekeepingClient::close) + .subscribe(aVoid -> { + }, e -> { + logger.error("collectionScaleUpFailed to scale up collection", e); + collectionScaleUpFailed.set(true); + }, + () -> { + logger.info("Collection Scale up request sent to the service"); + + } + ); + } +} \ No newline at end of file diff --git a/sdk/cosmos/benchmark/src/test/java/com/azure/data/cosmos/benchmark/Utils.java b/sdk/cosmos/benchmark/src/test/java/com/azure/data/cosmos/benchmark/Utils.java new file mode 100644 index 0000000000000..641f85e3753a2 --- /dev/null +++ b/sdk/cosmos/benchmark/src/test/java/com/azure/data/cosmos/benchmark/Utils.java @@ -0,0 +1,143 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.benchmark; + +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.DatabaseForTest; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.ResourceResponse; +import com.azure.data.cosmos.RetryOptions; +import com.azure.data.cosmos.SqlQuerySpec; +import com.azure.data.cosmos.internal.TestConfigurations; +import reactor.core.publisher.Flux; + +public class Utils { + public static AsyncDocumentClient housekeepingClient() { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + RetryOptions options = new RetryOptions(); + options.maxRetryAttemptsOnThrottledRequests(100); + options.maxRetryWaitTimeInSeconds(60); + connectionPolicy.retryOptions(options); + return new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .build(); + } + + public static String getCollectionLink(Database db, DocumentCollection collection) { + return "dbs/" + db.id() + "/colls/" + collection; + } + + public static Database createDatabaseForTest(AsyncDocumentClient client) { + return DatabaseForTest.create(DatabaseManagerImpl.getInstance(client)).createdDatabase; + } + + public static void safeCleanDatabases(AsyncDocumentClient client) { + if (client != null) { + DatabaseForTest.cleanupStaleTestDatabases(DatabaseManagerImpl.getInstance(client)); + } + } + + public static void safeClean(AsyncDocumentClient client, Database database) { + if (database != null) { + safeClean(client, database.id()); + } + } + + public static void safeClean(AsyncDocumentClient client, String databaseId) { + if (client != null) { + if (databaseId != null) { + try { + client.deleteDatabase("/dbs/" + databaseId, null).then().block(); + } catch (Exception e) { + } + } + } + } + + public static String generateDatabaseId() { + return DatabaseForTest.generateId(); + } + + public static void safeClose(AsyncDocumentClient client) { + if (client != null) { + client.close(); + } + } + + private static class DatabaseManagerImpl implements DatabaseForTest.DatabaseManager { + public static DatabaseManagerImpl getInstance(AsyncDocumentClient client) { + return new DatabaseManagerImpl(client); + } + + private final AsyncDocumentClient client; + + private DatabaseManagerImpl(AsyncDocumentClient client) { + this.client = client; + } + + @Override + public Flux> queryDatabases(SqlQuerySpec query) { + return client.queryDatabases(query, null); + } + + @Override + public Flux> createDatabase(Database databaseDefinition) { + return client.createDatabase(databaseDefinition, null); + } + + @Override + public Flux> deleteDatabase(String id) { + + return client.deleteDatabase("dbs/" + id, null); + } + } +} \ No newline at end of file diff --git a/sdk/cosmos/benchmark/src/test/java/com/azure/data/cosmos/benchmark/WorkflowTest.java b/sdk/cosmos/benchmark/src/test/java/com/azure/data/cosmos/benchmark/WorkflowTest.java new file mode 100644 index 0000000000000..57e611fec82cd --- /dev/null +++ b/sdk/cosmos/benchmark/src/test/java/com/azure/data/cosmos/benchmark/WorkflowTest.java @@ -0,0 +1,350 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.benchmark; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.DataType; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.IncludedPath; +import com.azure.data.cosmos.Index; +import com.azure.data.cosmos.IndexingPolicy; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.internal.RequestOptions; +import com.azure.data.cosmos.internal.TestConfigurations; +import com.beust.jcommander.JCommander; +import org.apache.commons.lang3.StringUtils; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.assertj.core.api.Assertions.assertThat; + +public class WorkflowTest { + private static final int TIMEOUT = 120_000; // 2 minutes + private Database database; + private DocumentCollection collection; + + @Test(groups = "simple", timeOut = TIMEOUT) + public void readMyWritesCLI() throws Exception { + String cmdFormat = "-serviceEndpoint %s -masterKey %s" + + " -databaseId %s -collectionId %s" + + " -consistencyLevel SESSION -concurrency 2 -numberOfOperations 123" + + " -operation ReadMyWrites -connectionMode DIRECT -numberOfPreCreatedDocuments 100"; + + String cmd = String.format(cmdFormat, + TestConfigurations.HOST, + TestConfigurations.MASTER_KEY, + database.id(), + collection.id()); + Main.main(StringUtils.split(cmd)); + } + + @Test(dataProvider = "collectionLinkTypeArgProvider", groups = "simple", timeOut = TIMEOUT) + public void readMyWrites(boolean useNameLink) throws Exception { + int numberOfOperations = 123; + String cmdFormat = "-serviceEndpoint %s -masterKey %s" + + " -databaseId %s -collectionId %s" + + " -consistencyLevel SESSION -concurrency 2 -numberOfOperations %s" + + " -operation ReadMyWrites -connectionMode DIRECT -numberOfPreCreatedDocuments 100"; + + String cmd = String.format(cmdFormat, + TestConfigurations.HOST, + TestConfigurations.MASTER_KEY, + database.id(), + collection.id(), + numberOfOperations) + + (useNameLink ? " -useNameLink" : ""); + + Configuration cfg = new Configuration(); + new JCommander(cfg, StringUtils.split(cmd)); + + AtomicInteger success = new AtomicInteger(); + AtomicInteger error = new AtomicInteger(); + + ReadMyWriteWorkflow wf = new ReadMyWriteWorkflow(cfg) { + @Override + protected void onError(Throwable throwable) { + error.incrementAndGet(); + } + + @Override + protected void onSuccess() { + success.incrementAndGet(); + } + }; + + wf.run(); + wf.shutdown(); + + assertThat(error).hasValue(0); + assertThat(success).hasValue(numberOfOperations); + } + + @Test(groups = "simple", timeOut = TIMEOUT) + public void writeLatencyCLI() throws Exception { + String cmdFormat = "-serviceEndpoint %s -masterKey %s" + + " -databaseId %s -collectionId %s" + + " -consistencyLevel SESSION -concurrency 2 -numberOfOperations 1000" + + " -operation WriteLatency -connectionMode DIRECT"; + + String cmd = String.format(cmdFormat, + TestConfigurations.HOST, + TestConfigurations.MASTER_KEY, + database.id(), + collection.id()); + Main.main(StringUtils.split(cmd)); + } + + @Test(dataProvider = "collectionLinkTypeArgProvider", groups = "simple", timeOut = TIMEOUT) + public void writeLatency(boolean useNameLink) throws Exception { + int numberOfOperations = 123; + String cmdFormat = "-serviceEndpoint %s -masterKey %s" + + " -databaseId %s -collectionId %s" + + " -consistencyLevel SESSION -concurrency 2 -numberOfOperations %s" + + " -operation WriteLatency -connectionMode DIRECT"; + + String cmd = String.format(cmdFormat, + TestConfigurations.HOST, + TestConfigurations.MASTER_KEY, + database.id(), + collection.id(), + numberOfOperations) + + (useNameLink ? " -useNameLink" : ""); + + Configuration cfg = new Configuration(); + new JCommander(cfg, StringUtils.split(cmd)); + + AtomicInteger success = new AtomicInteger(); + AtomicInteger error = new AtomicInteger(); + + AsyncWriteBenchmark wf = new AsyncWriteBenchmark(cfg) { + @Override + protected void onError(Throwable throwable) { + error.incrementAndGet(); + } + + @Override + protected void onSuccess() { + success.incrementAndGet(); + } + }; + + wf.run(); + wf.shutdown(); + + assertThat(error).hasValue(0); + assertThat(success).hasValue(numberOfOperations); + } + + @Test(dataProvider = "collectionLinkTypeArgProvider", groups = "simple", timeOut = TIMEOUT) + public void writeThroughput(boolean useNameLink) throws Exception { + int numberOfOperations = 123; + String cmdFormat = "-serviceEndpoint %s -masterKey %s" + + " -databaseId %s -collectionId %s" + + " -consistencyLevel SESSION -concurrency 2 -numberOfOperations %s" + + " -operation WriteThroughput -connectionMode DIRECT"; + + String cmd = String.format(cmdFormat, + TestConfigurations.HOST, + TestConfigurations.MASTER_KEY, + database.id(), + collection.id(), + numberOfOperations) + + (useNameLink ? " -useNameLink" : ""); + + Configuration cfg = new Configuration(); + new JCommander(cfg, StringUtils.split(cmd)); + + AtomicInteger success = new AtomicInteger(); + AtomicInteger error = new AtomicInteger(); + + AsyncWriteBenchmark wf = new AsyncWriteBenchmark(cfg) { + @Override + protected void onError(Throwable throwable) { + error.incrementAndGet(); + } + + @Override + protected void onSuccess() { + success.incrementAndGet(); + } + }; + + wf.run(); + wf.shutdown(); + + assertThat(error).hasValue(0); + assertThat(success).hasValue(numberOfOperations); + } + + @Test(dataProvider = "collectionLinkTypeArgProvider", groups = "simple", timeOut = TIMEOUT) + public void readLatency(boolean useNameLink) throws Exception { + int numberOfOperations = 123; + String cmdFormat = "-serviceEndpoint %s -masterKey %s" + + " -databaseId %s -collectionId %s" + + " -consistencyLevel SESSION -concurrency 2 -numberOfOperations %s" + + " -operation ReadLatency -connectionMode DIRECT"; + + String cmd = String.format(cmdFormat, + TestConfigurations.HOST, + TestConfigurations.MASTER_KEY, + database.id(), + collection.id(), + numberOfOperations) + + (useNameLink ? " -useNameLink" : ""); + + Configuration cfg = new Configuration(); + new JCommander(cfg, StringUtils.split(cmd)); + + AtomicInteger success = new AtomicInteger(); + AtomicInteger error = new AtomicInteger(); + + AsyncReadBenchmark wf = new AsyncReadBenchmark(cfg) { + @Override + protected void onError(Throwable throwable) { + error.incrementAndGet(); + } + + @Override + protected void onSuccess() { + success.incrementAndGet(); + } + }; + + wf.run(); + wf.shutdown(); + + assertThat(error).hasValue(0); + assertThat(success).hasValue(numberOfOperations); + } + + @Test(dataProvider = "collectionLinkTypeArgProvider", groups = "simple", timeOut = TIMEOUT) + public void readThroughput(boolean useNameLink) throws Exception { + int numberOfOperations = 123; + String cmdFormat = "-serviceEndpoint %s -masterKey %s" + + " -databaseId %s -collectionId %s" + + " -consistencyLevel SESSION -concurrency 2 -numberOfOperations %s" + + " -operation ReadThroughput -connectionMode DIRECT"; + + String cmd = String.format(cmdFormat, + TestConfigurations.HOST, + TestConfigurations.MASTER_KEY, + database.id(), + collection.id(), + numberOfOperations) + + (useNameLink ? " -useNameLink" : ""); + + Configuration cfg = new Configuration(); + new JCommander(cfg, StringUtils.split(cmd)); + + AtomicInteger success = new AtomicInteger(); + AtomicInteger error = new AtomicInteger(); + + AsyncReadBenchmark wf = new AsyncReadBenchmark(cfg) { + @Override + protected void onError(Throwable throwable) { + error.incrementAndGet(); + } + + @Override + protected void onSuccess() { + success.incrementAndGet(); + } + }; + + wf.run(); + wf.shutdown(); + + assertThat(error).hasValue(0); + assertThat(success).hasValue(numberOfOperations); + } + + @BeforeClass(groups = "simple", timeOut = TIMEOUT) + public void beforeClass() { + RequestOptions options = new RequestOptions(); + options.setOfferThroughput(10000); + AsyncDocumentClient housekeepingClient = Utils.housekeepingClient(); + database = Utils.createDatabaseForTest(housekeepingClient); + collection = housekeepingClient.createCollection("dbs/"+ database.id(), + getCollectionDefinitionWithRangeRangeIndex(), + options) + .single().block().getResource(); + housekeepingClient.close(); + } + + @DataProvider(name = "collectionLinkTypeArgProvider") + public Object[][] collectionLinkTypeArgProvider() { + return new Object[][]{ + // is namebased + {true}, + {false}, + }; + } + + @AfterClass(groups = "simple", timeOut = TIMEOUT) + public void afterClass() { + AsyncDocumentClient housekeepingClient = Utils.housekeepingClient(); + Utils.safeCleanDatabases(housekeepingClient); + Utils.safeClean(housekeepingClient, database); + Utils.safeClose(housekeepingClient); + } + + DocumentCollection getCollectionDefinitionWithRangeRangeIndex() { + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList<>(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + IndexingPolicy indexingPolicy = new IndexingPolicy(); + List includedPaths = new ArrayList<>(); + IncludedPath includedPath = new IncludedPath(); + includedPath.path("/*"); + Collection indexes = new ArrayList<>(); + Index stringIndex = Index.Range(DataType.STRING); + BridgeInternal.setProperty(stringIndex, "precision", -1); + indexes.add(stringIndex); + + Index numberIndex = Index.Range(DataType.NUMBER); + BridgeInternal.setProperty(numberIndex, "precision", -1); + indexes.add(numberIndex); + includedPath.indexes(indexes); + includedPaths.add(includedPath); + indexingPolicy.setIncludedPaths(includedPaths); + + DocumentCollection collectionDefinition = new DocumentCollection(); + collectionDefinition.setIndexingPolicy(indexingPolicy); + collectionDefinition.id(UUID.randomUUID().toString()); + collectionDefinition.setPartitionKey(partitionKeyDef); + + return collectionDefinition; + } +} \ No newline at end of file diff --git a/sdk/cosmos/changelog/README.md b/sdk/cosmos/changelog/README.md new file mode 100644 index 0000000000000..7f145864521b5 --- /dev/null +++ b/sdk/cosmos/changelog/README.md @@ -0,0 +1,126 @@ +## Changelog + +### 2.4.3 + +- Fixed resource leak issue on closing client + +### 2.4.2 + +- Fixed bugs in continuation token support for cross partition queries + +### 2.4.1 + +- Fixed some bugs in Direct mode. +- Improved logging in Direct mode. +- Improved connection management. + +### 2.4.0 + +- Direct GA. +- Added support for QueryMetrics. +- Changed the APIs accepting java.util.Collection for which order is important to accept java.util.List instead. Now ConnectionPolicy#getPreferredLocations(), JsonSerialization, and PartitionKey(.) accept List. + +### 2.4.0-beta1 + +- Added support for Direct Https. +- Changed the APIs accepting java.util.Collection for which order is important to accept java.util.List instead. + Now ConnectionPolicy#getPreferredLocations(), JsonSerialization, and PartitionKey(.) accept List. +- Fixed a Session bug for Document query in Gateway mode. +- Upgraded dependencies (netty 0.4.20 [github #79](https://github.com/Azure/azure-cosmosdb-java/issues/79), RxJava 1.3.8). + +### 2.3.1 + +- Fix handling very large query responses. +- Fix resource token handling when instantiating client ([github #78](https://github.com/Azure/azure-cosmosdb-java/issues/78)). +- Upgraded vulnerable dependency jackson-databind ([github #77](https://github.com/Azure/azure-cosmosdb-java/pull/77)). + +### 2.3.0 + +- Fixed a resource leak bug. +- Added support for MultiPolygon +- Added support for custom headers in RequestOptions. + +### 2.2.2 + +- Fixed a packaging bug. + +### 2.2.1 + +- Fixed a NPE bug in write retry path. +- Fixed a NPE bug in endpoint management. +- Upgraded vulnerable dependencies ([github #68](https://github.com/Azure/azure-cosmosdb-java/issues/68)). +- Added support for Netty network logging for troubleshooting. + +### 2.2.0 + +- Added support for Multi-region write. + +### 2.1.0 + +- Added support for Proxy. +- Added support for resource token authorization. +- Fixed a bug in handling large partition keys ([github #63](https://github.com/Azure/azure-cosmosdb-java/issues/63)). +- Documentation improved. +- SDK restructured into more granular modules. + +### 2.0.1 + +- Fixed a bug for non-english locales ([github #51](https://github.com/Azure/azure-cosmosdb-java/issues/51)). +- Added helper methods for Conflict resource. + +### 2.0.0 + +- Replaced org.json dependency by jackson due to performance reasons and licensing ([github #29](https://github.com/Azure/azure-cosmosdb-java/issues/29)). +- Removed deprecated OfferV2 class. +- Added accessor method to Offer class for throughput content. +- Any method in Document/Resource returning org.json types changed to return a jackson object type. +- getObject(.) method of classes extending JsonSerializable changed to return a jackson ObjectNode type. +- getCollection(.) method changed to return Collection of ObjectNode. +- Removed JsonSerializable subclasses' constructors with org.json.JSONObject arg. +- JsonSerializable.toJson (SerializationFormattingPolicy.Indented) now uses two spaces for indentation. + +### 1.0.2 + +- Added support for Unique Index Policy. +- Added support for limiting response continuation token size in feed options. +- Added support for Partition Split in Cross Partition Query. +- Fixed a bug in Json timestamp serialization ([github #32](https://github.com/Azure/azure-cosmosdb-java/issues/32)). +- Fixed a bug in Json enum serialization. +- Fixed a bug in managing documents of 2MB size ([github #33](https://github.com/Azure/azure-cosmosdb-java/issues/33)). +- Dependency com.fasterxml.jackson.core:jackson-databind upgraded to 2.9.5 due to a bug ([jackson-databind: github #1599](https://github.com/FasterXML/jackson-databind/issues/1599)) +- Dependency on rxjava-extras upgraded to 0.8.0.17 due to a bug ([rxjava-extras: github #30](https://github.com/davidmoten/rxjava-extras/issues/30)). +- The metadata description in pom file updated to be inline with the rest of documentation. +- Syntax improvement ([github #41](https://github.com/Azure/azure-cosmosdb-java/issues/41)), ([github #40](https://github.com/Azure/azure-cosmosdb-java/issues/40)). + +### 1.0.1 + +- Added back-pressure support in query. +- Added support for partition key range id in query. +- Changed to allow larger continuation token in request header (bugfix github #24). +- netty dependency upgraded to 4.1.22.Final to ensure JVM shuts down after main thread finishes. +- Changed to avoid passing session token when reading master resources. +- Added more examples. +- Added more benchmarking scenarios. +- Fixed java header files for proper javadoc generation. + +### 1.0.0 + +- Release 1.0.0 has fully end to end support for non-blocking IO using netty library in Gateway mode. +- Dependency on `azure-documentdb` SDK removed. +- Artifact id changed to `azure-cosmosdb` from `azure-documentdb-rx` in 0.9.0-rc2. +- Java package name changed to `com.azure.data.cosmos` from `com.microsoft.azure.documentdb` in 0.9.0-rc2. + +### 0.9.0-rc2 + +- `FeedResponsePage` renamed to `FeedReponse` +- Some minor modifications to `ConnectionPolicy` configuration. + All time fields and methods in ConnectionPolicy suffixed with "InMillis" to be more precise of the time unit. +- `ConnectionPolicy#setProxy()` removed. +- `FeedOptions#pageSize` renamed to + `FeedOptions#maxItemCount` +- Release 1.0.0 deprecates 0.9.x releases. + +### 0.9.0-rc1 + +- First release of `azure-documentdb-rx` SDK. +- CRUD Document API fully non-blocking using netty. Query async API implemented as a wrapper using blocking SDK `azure-documentdb`. diff --git a/sdk/cosmos/examples/pom.xml b/sdk/cosmos/examples/pom.xml new file mode 100644 index 0000000000000..f7c52f9f530e5 --- /dev/null +++ b/sdk/cosmos/examples/pom.xml @@ -0,0 +1,159 @@ + + + + 4.0.0 + + com.microsoft.azure + azure-cosmos-parent + 3.0.0 + + + azure-cosmos-examples + Async SDK for SQL API of Azure Cosmos DB Service - Examples + Examples for Async SDK for SQL API of Azure Cosmos DB Service + + + UTF-8 + 1.7.6 + 1.2.17 + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.6.0 + + 1.8 + 1.8 + + + + org.apache.maven.plugins + maven-eclipse-plugin + 2.8 + + + + org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8 + + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + com.azure.data.cosmos.benchmark.Main + + + + maven-assembly-plugin + 2.2 + + + jar-with-dependencies + + + + com.azure.data.cosmos.rx.examples.multimaster.samples.Main + + + + + + make-assembly + package + + single + + + + + + org.apache.maven.plugins + maven-antrun-plugin + 1.8 + false + + + none + default-cli + + + + true + + + + + + + com.microsoft.azure + azure-cosmos + + + com.google.guava + guava + ${guava.version} + + + log4j + log4j + ${log4j.version} + + + org.slf4j + slf4j-api + ${slf4j.version} + + + org.slf4j + slf4j-log4j12 + ${slf4j.version} + + + org.hamcrest + hamcrest-all + ${hamcrest.version} + test + + + org.mockito + mockito-core + ${mockito.version} + test + + + org.testng + testng + ${testng.version} + test + + + diff --git a/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/examples/AccountSettings.java b/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/examples/AccountSettings.java new file mode 100644 index 0000000000000..11b748b8710c3 --- /dev/null +++ b/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/examples/AccountSettings.java @@ -0,0 +1,59 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.examples; + +import org.apache.commons.lang3.StringUtils; + +/** + * Contains the account configurations for Sample. + * + * For running tests, you can pass a customized endpoint configuration in one of the following + * ways: + *

    + *
  • -DACCOUNT_KEY="[your-key]" -ACCOUNT_HOST="[your-endpoint]" as JVM + * command-line option.
  • + *
  • You can set ACCOUNT_KEY and ACCOUNT_HOST as environment variables.
  • + *
+ * + * If none of the above is set, emulator endpoint will be used. + * Emulator http cert is self signed. If you are using emulator, + * make sure emulator https certificate is imported + * to java trusted cert store: + * https://docs.microsoft.com/en-us/azure/cosmos-db/local-emulator-export-ssl-certificates + */ +public class AccountSettings { + // REPLACE MASTER_KEY and HOST with values from your Azure Cosmos DB account. + // The default values are credentials of the local emulator, which are not used in any production environment. + public static String MASTER_KEY = + System.getProperty("ACCOUNT_KEY", + StringUtils.defaultString(StringUtils.trimToNull( + System.getenv().get("COSMOS_ACCOUNT_KEY")), + "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==")); + + public static String HOST = + System.getProperty("ACCOUNT_HOST", + StringUtils.defaultString(StringUtils.trimToNull( + System.getenv().get("COSMOS_ACCOUNT_HOST")), + "https://localhost:8081/")); +} diff --git a/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/examples/BasicDemo.java b/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/examples/BasicDemo.java new file mode 100644 index 0000000000000..5af0ed8f6febd --- /dev/null +++ b/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/examples/BasicDemo.java @@ -0,0 +1,223 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.examples; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosContainerProperties; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosItem; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.CosmosItemResponse; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Schedulers; + +public class BasicDemo { + + private static final String DATABASE_NAME = "test_db"; + private static final String CONTAINER_NAME = "test_container"; + + private CosmosClient client; + private CosmosDatabase database; + private CosmosContainer container; + + public static void main(String[] args) { + BasicDemo demo = new BasicDemo(); + demo.start(); + } + + private void start(){ + // Get client + client = CosmosClient.builder() + .endpoint(AccountSettings.HOST) + .key(AccountSettings.MASTER_KEY) + .build(); + + //CREATE a database and a container + createDbAndContainerBlocking(); + + //Get a proxy reference to container + container = client.getDatabase(DATABASE_NAME).getContainer(CONTAINER_NAME); + + CosmosContainer container = client.getDatabase(DATABASE_NAME).getContainer(CONTAINER_NAME); + TestObject testObject = new TestObject("item_new_id_1", "test", "test description", "US"); + TestObject testObject2 = new TestObject("item_new_id_2", "test2", "test description2", "CA"); + + //CREATE an Item async + Mono itemResponseMono = container.createItem(testObject); + //CREATE another Item async + Mono itemResponseMono1 = container.createItem(testObject2); + + //Wait for completion + try { + itemResponseMono.doOnError(throwable -> log("CREATE item 1", throwable)) + .mergeWith(itemResponseMono1) + .doOnError(throwable -> log("CREATE item 2 ", throwable)) + .doOnComplete(() -> log("Items created")) + .publishOn(Schedulers.elastic()) + .blockLast(); + }catch (RuntimeException e){ + log("Couldn't create items due to above exceptions"); + } + + createAndReplaceItem(); + + queryItems(); + + queryWithContinuationToken(); + + //Close client + client.close(); + log("Completed"); + } + + private void createAndReplaceItem() { + TestObject replaceObject = new TestObject("item_new_id_3", "test3", "test description3", "JP"); + CosmosItem cosmosItem = null; + //CREATE item sync + try { + cosmosItem = container.createItem(replaceObject) + .doOnError(throwable -> log("CREATE 3", throwable)) + .publishOn(Schedulers.elastic()) + .block() + .item(); + }catch (RuntimeException e){ + log("Couldn't create items due to above exceptions"); + } + if(cosmosItem != null) { + replaceObject.setName("new name test3"); + + //REPLACE the item and wait for completion + cosmosItem.replace(replaceObject).block(); + } + } + + private void createDbAndContainerBlocking() { + client.createDatabaseIfNotExists(DATABASE_NAME) + .doOnSuccess(cosmosDatabaseResponse -> log("Database: " + cosmosDatabaseResponse.database().id())) + .flatMap(dbResponse -> dbResponse.database().createContainerIfNotExists(new CosmosContainerProperties(CONTAINER_NAME, "/country"))) + .doOnSuccess(cosmosContainerResponse -> log("Container: " + cosmosContainerResponse.container().id())) + .doOnError(throwable -> log(throwable.getMessage())) + .publishOn(Schedulers.elastic()) + .block(); + } + + int count = 0; + private void queryItems(){ + log("+ Querying the collection "); + String query = "SELECT * from root"; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + options.maxDegreeOfParallelism(2); + Flux> queryFlux = container.queryItems(query, options); + + queryFlux.publishOn(Schedulers.elastic()).subscribe(cosmosItemFeedResponse -> {}, + throwable -> {}, + () -> {}); + + queryFlux.publishOn(Schedulers.elastic()) + .toIterable() + .forEach(cosmosItemFeedResponse -> + { + log(cosmosItemFeedResponse.results()); + }); + + } + + private void queryWithContinuationToken(){ + log("+ Query with paging using continuation token"); + String query = "SELECT * from root r "; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + options.populateQueryMetrics(true); + options.maxItemCount(1); + String continuation = null; + do{ + options.requestContinuation(continuation); + Flux> queryFlux = container.queryItems(query, options); + FeedResponse page = queryFlux.blockFirst(); + assert page != null; + log(page.results()); + continuation = page.continuationToken(); + }while(continuation!= null); + + } + + private void log(Object object) { + System.out.println(object); + } + + private void log(String msg, Throwable throwable){ + log(msg + ": " + ((CosmosClientException)throwable).statusCode()); + } + + class TestObject { + String id; + String name; + String description; + String country; + + public TestObject(String id, String name, String description, String country) { + this.id = id; + this.name = name; + this.description = description; + this.country = country; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getCountry() { + return country; + } + + public void setCountry(String country) { + this.country = country; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + } +} \ No newline at end of file diff --git a/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/examples/ChangeFeed/SampleChangeFeedProcessor.java b/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/examples/ChangeFeed/SampleChangeFeedProcessor.java new file mode 100644 index 0000000000000..9267879f9e0f2 --- /dev/null +++ b/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/examples/ChangeFeed/SampleChangeFeedProcessor.java @@ -0,0 +1,254 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.examples.ChangeFeed; + +import com.azure.data.cosmos.ChangeFeedProcessor; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosContainerProperties; +import com.azure.data.cosmos.CosmosContainerRequestOptions; +import com.azure.data.cosmos.CosmosContainerResponse; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.SerializationFormattingPolicy; +import org.apache.commons.lang3.RandomStringUtils; + +import java.time.Duration; + +/** + * Sample for Change Feed Processor. + * + */ +public class SampleChangeFeedProcessor { + + public static int WAIT_FOR_WORK = 60; + public static final String DATABASE_NAME = "db_" + RandomStringUtils.randomAlphabetic(7); + public static final String COLLECTION_NAME = "coll_" + RandomStringUtils.randomAlphabetic(7); + + private static ChangeFeedProcessor changeFeedProcessorInstance; + private static boolean isWorkCompleted = false; + + public static void main (String[]args) { + System.out.println("BEGIN Sample"); + + try { + + System.out.println("-->CREATE DocumentClient"); + CosmosClient client = getCosmosClient(); + + System.out.println("-->CREATE sample's database: " + DATABASE_NAME); + CosmosDatabase cosmosDatabase = createNewDatabase(client, DATABASE_NAME); + + System.out.println("-->CREATE container for documents: " + COLLECTION_NAME); + CosmosContainer feedContainer = createNewCollection(client, DATABASE_NAME, COLLECTION_NAME); + + System.out.println("-->CREATE container for lease: " + COLLECTION_NAME + "-leases"); + CosmosContainer leaseContainer = createNewLeaseCollection(client, DATABASE_NAME, COLLECTION_NAME + "-leases"); + + changeFeedProcessorInstance = getChangeFeedProcessor("SampleHost_1", feedContainer, leaseContainer); + + changeFeedProcessorInstance.start().subscribe(aVoid -> { + createNewDocuments(feedContainer, 10, Duration.ofSeconds(3)); + isWorkCompleted = true; + }); + + long remainingWork = WAIT_FOR_WORK; + while (!isWorkCompleted && remainingWork > 0) { + Thread.sleep(100); + remainingWork -= 100; + } + + if (isWorkCompleted) { + if (changeFeedProcessorInstance != null) { + changeFeedProcessorInstance.stop().subscribe().wait(10000); + } + } else { + throw new RuntimeException("The change feed processor initialization and automatic create document feeding process did not complete in the expected time"); + } + + System.out.println("-->DELETE sample's database: " + DATABASE_NAME); + deleteDatabase(cosmosDatabase); + + Thread.sleep(500); + + } catch (Exception e) { + e.printStackTrace(); + } + + System.out.println("END Sample"); + System.exit(0); + } + + public static ChangeFeedProcessor getChangeFeedProcessor(String hostName, CosmosContainer feedContainer, CosmosContainer leaseContainer) { + return ChangeFeedProcessor.Builder() + .hostName(hostName) + .feedContainer(feedContainer) + .leaseContainer(leaseContainer) + .handleChanges(docs -> { + System.out.println("--->handleChanges() START"); + + for (CosmosItemProperties document : docs) { + System.out.println("---->DOCUMENT RECEIVED: " + document.toJson(SerializationFormattingPolicy.INDENTED)); + } + System.out.println("--->handleChanges() END"); + + }) + .build(); + } + + public static CosmosClient getCosmosClient() { + + return CosmosClient.builder() + .endpoint(SampleConfigurations.HOST) + .key(SampleConfigurations.MASTER_KEY) + .connectionPolicy(ConnectionPolicy.defaultPolicy()) + .consistencyLevel(ConsistencyLevel.EVENTUAL) + .build(); + } + + public static CosmosDatabase createNewDatabase(CosmosClient client, String databaseName) { + return client.createDatabaseIfNotExists(databaseName).block().database(); + } + + public static void deleteDatabase(CosmosDatabase cosmosDatabase) { + cosmosDatabase.delete().block(); + } + + public static CosmosContainer createNewCollection(CosmosClient client, String databaseName, String collectionName) { + CosmosDatabase databaseLink = client.getDatabase(databaseName); + CosmosContainer collectionLink = databaseLink.getContainer(collectionName); + CosmosContainerResponse containerResponse = null; + + try { + containerResponse = collectionLink.read().block(); + + if (containerResponse != null) { + throw new IllegalArgumentException(String.format("Collection %s already exists in database %s.", collectionName, databaseName)); + } + } catch (RuntimeException ex) { + if (ex.getCause() instanceof CosmosClientException) { + CosmosClientException cosmosClientException = (CosmosClientException) ex.getCause(); + + if (cosmosClientException.statusCode() != 404) { + throw ex; + } + } else { + throw ex; + } + } + + CosmosContainerProperties containerSettings = new CosmosContainerProperties(collectionName, "/id"); + + CosmosContainerRequestOptions requestOptions = new CosmosContainerRequestOptions(); + + containerResponse = databaseLink.createContainer(containerSettings, 10000, requestOptions).block(); + + if (containerResponse == null) { + throw new RuntimeException(String.format("Failed to create collection %s in database %s.", collectionName, databaseName)); + } + + return containerResponse.container(); + } + + public static CosmosContainer createNewLeaseCollection(CosmosClient client, String databaseName, String leaseCollectionName) { + CosmosDatabase databaseLink = client.getDatabase(databaseName); + CosmosContainer leaseCollectionLink = databaseLink.getContainer(leaseCollectionName); + CosmosContainerResponse leaseContainerResponse = null; + + try { + leaseContainerResponse = leaseCollectionLink.read().block(); + + if (leaseContainerResponse != null) { + leaseCollectionLink.delete().block(); + + try { + Thread.sleep(1000); + } catch (InterruptedException ex) { + ex.printStackTrace(); + } + } + } catch (RuntimeException ex) { + if (ex.getCause() instanceof CosmosClientException) { + CosmosClientException cosmosClientException = (CosmosClientException) ex.getCause(); + + if (cosmosClientException.statusCode() != 404) { + throw ex; + } + } else { + throw ex; + } + } + + CosmosContainerProperties containerSettings = new CosmosContainerProperties(leaseCollectionName, "/id"); + CosmosContainerRequestOptions requestOptions = new CosmosContainerRequestOptions(); + + leaseContainerResponse = databaseLink.createContainer(containerSettings, 400,requestOptions).block(); + + if (leaseContainerResponse == null) { + throw new RuntimeException(String.format("Failed to create collection %s in database %s.", leaseCollectionName, databaseName)); + } + + return leaseContainerResponse.container(); + } + + public static void createNewDocuments(CosmosContainer containerClient, int count, Duration delay) { + String suffix = RandomStringUtils.randomAlphabetic(10); + for (int i = 0; i <= count; i++) { + CosmosItemProperties document = new CosmosItemProperties(); + document.id(String.format("0%d-%s", i, suffix)); + + containerClient.createItem(document).subscribe(doc -> { + System.out.println("---->DOCUMENT WRITE: " + doc.properties().toJson(SerializationFormattingPolicy.INDENTED)); + }); + + long remainingWork = delay.toMillis(); + try { + while (remainingWork > 0) { + Thread.sleep(100); + remainingWork -= 100; + } + } catch (InterruptedException iex) { + // exception caught + break; + } + } + } + + public static boolean ensureWorkIsDone(Duration delay) { + long remainingWork = delay.toMillis(); + try { + while (!isWorkCompleted && remainingWork > 0) { + Thread.sleep(100); + remainingWork -= 100; + } + } catch (InterruptedException iex) { + return false; + } + + return remainingWork > 0; + } + +} diff --git a/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/examples/ChangeFeed/SampleConfigurations.java b/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/examples/ChangeFeed/SampleConfigurations.java new file mode 100644 index 0000000000000..9657dc22363ee --- /dev/null +++ b/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/examples/ChangeFeed/SampleConfigurations.java @@ -0,0 +1,56 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.examples.ChangeFeed; + +import com.google.common.base.Strings; +import org.apache.commons.lang3.StringUtils; + +/** + * Contains the configurations for tests. + *

+ * For running tests, you can pass a customized endpoint configuration in one of the following + * ways: + *

    + *
  • -DACCOUNT_KEY="[your-key]" -ACCOUNT_HOST="[your-endpoint]" as JVM + * command-line option.
  • + *
  • You can set ACCOUNT_KEY and ACCOUNT_HOST as environment variables.
  • + *
+ *

+ * If none of the above is set, emulator endpoint will be used. + */ +public final class SampleConfigurations { + // REPLACE MASTER_KEY and HOST with values from your Azure Cosmos DB account. + // The default values are credentials of the local emulator, which are not used in any production environment. + // + public static String MASTER_KEY = + System.getProperty("ACCOUNT_KEY", + StringUtils.defaultString(Strings.emptyToNull( + System.getenv().get("ACCOUNT_KEY")), + "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==")); + + public static String HOST = + System.getProperty("ACCOUNT_HOST", + StringUtils.defaultString(Strings.emptyToNull( + System.getenv().get("ACCOUNT_HOST")), + "https://localhost:8081/")); +} diff --git a/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/rx/examples/multimaster/ConfigurationManager.java b/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/rx/examples/multimaster/ConfigurationManager.java new file mode 100644 index 0000000000000..4651739076c90 --- /dev/null +++ b/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/rx/examples/multimaster/ConfigurationManager.java @@ -0,0 +1,32 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.rx.examples.multimaster; + +import java.util.Properties; + +public class ConfigurationManager { + public static Properties getAppSettings() { + return System.getProperties(); + } +} diff --git a/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/rx/examples/multimaster/Helpers.java b/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/rx/examples/multimaster/Helpers.java new file mode 100644 index 0000000000000..b6a02833768b2 --- /dev/null +++ b/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/rx/examples/multimaster/Helpers.java @@ -0,0 +1,104 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.rx.examples.multimaster; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.internal.ResourceResponse; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +public class Helpers { + + static public String createDocumentCollectionUri(String databaseName, String collectionName) { + return String.format("/dbs/%s/colls/%s", databaseName, collectionName); + } + + static public String createDatabaseUri(String databaseName) { + return String.format("/dbs/%s", databaseName); + } + + static public Mono createDatabaseIfNotExists(AsyncDocumentClient client, String databaseName) { + + return client.readDatabase("/dbs/" + databaseName, null) + .onErrorResume( + e -> { + if (e instanceof CosmosClientException) { + CosmosClientException dce = (CosmosClientException) e; + if (dce.statusCode() == 404) { + // if doesn't exist create it + + Database d = new Database(); + d.id(databaseName); + + return client.createDatabase(d, null); + } + } + + return Flux.error(e); + } + ).map(ResourceResponse::getResource).single(); + } + + static public Mono createCollectionIfNotExists(AsyncDocumentClient client, String databaseName, String collectionName) { + return client.readCollection(createDocumentCollectionUri(databaseName, collectionName), null) + .onErrorResume( + e -> { + if (e instanceof CosmosClientException) { + CosmosClientException dce = (CosmosClientException) e; + if (dce.statusCode() == 404) { + // if doesn't exist create it + + DocumentCollection collection = new DocumentCollection(); + collection.id(collectionName); + + return client.createCollection(createDatabaseUri(databaseName), collection, null); + } + } + + return Flux.error(e); + } + ).map(ResourceResponse::getResource).single(); + } + + static public Mono createCollectionIfNotExists(AsyncDocumentClient client, String databaseName, DocumentCollection collection) { + return client.readCollection(createDocumentCollectionUri(databaseName, collection.id()), null) + .onErrorResume( + e -> { + if (e instanceof CosmosClientException) { + CosmosClientException dce = (CosmosClientException) e; + if (dce.statusCode() == 404) { + // if doesn't exist create it + + return client.createCollection(createDatabaseUri(databaseName), collection, null); + } + } + + return Flux.error(e); + } + ).map(ResourceResponse::getResource).single(); + } +} diff --git a/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/rx/examples/multimaster/samples/ConflictWorker.java b/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/rx/examples/multimaster/samples/ConflictWorker.java new file mode 100644 index 0000000000000..720bc936b0ecf --- /dev/null +++ b/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/rx/examples/multimaster/samples/ConflictWorker.java @@ -0,0 +1,877 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.rx.examples.multimaster.samples; + +import com.azure.data.cosmos.AccessCondition; +import com.azure.data.cosmos.AccessConditionType; +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.internal.Conflict; +import com.azure.data.cosmos.ConflictResolutionPolicy; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.internal.RequestOptions; +import com.azure.data.cosmos.internal.ResourceResponse; +import com.azure.data.cosmos.internal.StoredProcedure; +import com.azure.data.cosmos.rx.examples.multimaster.Helpers; +import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.scheduler.Scheduler; +import reactor.core.scheduler.Schedulers; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +public class ConflictWorker { + private static Logger logger = LoggerFactory.getLogger(ConflictWorker.class); + + private final Scheduler schedulerForBlockingWork; + private final List clients; + private final String basicCollectionUri; + private final String manualCollectionUri; + private final String lwwCollectionUri; + private final String udpCollectionUri; + private final String databaseName; + private final String basicCollectionName; + private final String manualCollectionName; + private final String lwwCollectionName; + private final String udpCollectionName; + private final ExecutorService executor; + + public ConflictWorker(String databaseName, String basicCollectionName, String manualCollectionName, String lwwCollectionName, String udpCollectionName) { + this.clients = new ArrayList<>(); + this.basicCollectionUri = Helpers.createDocumentCollectionUri(databaseName, basicCollectionName); + this.manualCollectionUri = Helpers.createDocumentCollectionUri(databaseName, manualCollectionName); + this.lwwCollectionUri = Helpers.createDocumentCollectionUri(databaseName, lwwCollectionName); + this.udpCollectionUri = Helpers.createDocumentCollectionUri(databaseName, udpCollectionName); + + this.databaseName = databaseName; + this.basicCollectionName = basicCollectionName; + this.manualCollectionName = manualCollectionName; + this.lwwCollectionName = lwwCollectionName; + this.udpCollectionName = udpCollectionName; + + this.executor = Executors.newFixedThreadPool(100); + this.schedulerForBlockingWork = Schedulers.fromExecutor(executor); + } + + public void addClient(AsyncDocumentClient client) { + this.clients.add(client); + } + + private DocumentCollection createCollectionIfNotExists(AsyncDocumentClient createClient, String databaseName, DocumentCollection collection) { + return Helpers.createCollectionIfNotExists(createClient, this.databaseName, collection) + .subscribeOn(schedulerForBlockingWork).block(); + } + + private DocumentCollection createCollectionIfNotExists(AsyncDocumentClient createClient, String databaseName, String collectionName) { + + return Helpers.createCollectionIfNotExists(createClient, this.databaseName, this.basicCollectionName) + .subscribeOn(schedulerForBlockingWork).block(); + } + + private DocumentCollection getCollectionDefForManual(String id) { + DocumentCollection collection = new DocumentCollection(); + collection.id(id); + ConflictResolutionPolicy policy = ConflictResolutionPolicy.createCustomPolicy(); + collection.setConflictResolutionPolicy(policy); + return collection; + } + + private DocumentCollection getCollectionDefForLastWinWrites(String id, String conflictResolutionPath) { + DocumentCollection collection = new DocumentCollection(); + collection.id(id); + ConflictResolutionPolicy policy = ConflictResolutionPolicy.createLastWriterWinsPolicy(conflictResolutionPath); + collection.setConflictResolutionPolicy(policy); + return collection; + } + + private DocumentCollection getCollectionDefForCustom(String id, String storedProc) { + DocumentCollection collection = new DocumentCollection(); + collection.id(id); + ConflictResolutionPolicy policy = ConflictResolutionPolicy.createCustomPolicy(storedProc); + collection.setConflictResolutionPolicy(policy); + return collection; + } + + public void initialize() throws Exception { + AsyncDocumentClient createClient = this.clients.get(0); + + Helpers.createDatabaseIfNotExists(createClient, this.databaseName).subscribeOn(schedulerForBlockingWork).block(); + + DocumentCollection basic = createCollectionIfNotExists(createClient, this.databaseName, this.basicCollectionName); + + DocumentCollection manualCollection = createCollectionIfNotExists(createClient, + Helpers.createDatabaseUri(this.databaseName), getCollectionDefForManual(this.manualCollectionName)); + + DocumentCollection lwwCollection = createCollectionIfNotExists(createClient, + Helpers.createDatabaseUri(this.databaseName), getCollectionDefForLastWinWrites(this.lwwCollectionName, "/regionId")); + + DocumentCollection udpCollection = createCollectionIfNotExists(createClient, + Helpers.createDatabaseUri(this.databaseName), getCollectionDefForCustom(this.udpCollectionName, + String.format("dbs/%s/colls/%s/sprocs/%s", this.databaseName, this.udpCollectionName, "resolver"))); + + StoredProcedure lwwSproc = new StoredProcedure(); + lwwSproc.id("resolver"); + lwwSproc.setBody(IOUtils.toString( + getClass().getClassLoader().getResourceAsStream("resolver-storedproc.txt"), "UTF-8")); + + lwwSproc = + getResource(createClient.upsertStoredProcedure( + Helpers.createDocumentCollectionUri(this.databaseName, this.udpCollectionName), lwwSproc, null)); + + } + + private T getResource(Flux> obs) { + return obs.subscribeOn(schedulerForBlockingWork).single().block().getResource(); + } + + public void runManualConflict() throws Exception { + logger.info("\r\nInsert Conflict\r\n"); + this.runInsertConflictOnManual(); + + logger.info("\r\nUPDATE Conflict\r\n"); + this.runUpdateConflictOnManual(); + + logger.info("\r\nDELETE Conflict\r\n"); + this.runDeleteConflictOnManual(); + } + + public void runLWWConflict() throws Exception { + logger.info("\r\nInsert Conflict\r\n"); + this.runInsertConflictOnLWW(); + + logger.info("\r\nUPDATE Conflict\r\n"); + this.runUpdateConflictOnLWW(); + + logger.info("\r\nDELETE Conflict\r\n"); + this.runDeleteConflictOnLWW(); + } + + public void runUDPConflict() throws Exception { + logger.info("\r\nInsert Conflict\r\n"); + this.runInsertConflictOnUdp(); + + logger.info("\r\nUPDATE Conflict\r\n"); + this.runUpdateConflictOnUdp(); + + logger.info("\r\nDELETE Conflict\r\n"); + this.runDeleteConflictOnUdp(); + } + + public void runInsertConflictOnManual() throws Exception { + do { + logger.info("1) Performing conflicting insert across {} regions on {}", this.clients.size(), this.manualCollectionName); + + ArrayList> insertTask = new ArrayList<>(); + + Document conflictDocument = new Document(); + conflictDocument.id(UUID.randomUUID().toString()); + + int index = 0; + for (AsyncDocumentClient client : this.clients) { + insertTask.add(this.tryInsertDocument(client, this.manualCollectionUri, conflictDocument, index++)); + } + + List conflictDocuments = Flux.merge(insertTask).collectList().subscribeOn(schedulerForBlockingWork).single().block(); + + if (conflictDocuments.size() == this.clients.size()) { + logger.info("2) Caused {} insert conflicts, verifying conflict resolution", conflictDocuments.size()); + + for (Document conflictingInsert : conflictDocuments) { + this.validateManualConflict(this.clients, conflictingInsert); + } + break; + } else { + logger.info("Retrying insert to induce conflicts"); + } + } while (true); + } + + public void runUpdateConflictOnManual() throws Exception { + do { + Document conflictDocument = new Document(); + conflictDocument.id(UUID.randomUUID().toString()); + + + conflictDocument = this.tryInsertDocument(clients.get(0), this.manualCollectionUri, conflictDocument, 0) + .singleOrEmpty().block(); + + TimeUnit.SECONDS.sleep(1);//1 Second for write to sync. + + + logger.info("1) Performing conflicting update across 3 regions on {}", this.manualCollectionName); + + ArrayList> updateTask = new ArrayList<>(); + + int index = 0; + for (AsyncDocumentClient client : this.clients) { + updateTask.add(this.tryUpdateDocument(client, this.manualCollectionUri, conflictDocument, index++)); + } + + List conflictDocuments = Flux.merge(updateTask).collectList().single().block(); + + if (conflictDocuments.size() > 1) { + logger.info("2) Caused {} updated conflicts, verifying conflict resolution", conflictDocuments.size()); + + for (Document conflictingUpdate : conflictDocuments) { + this.validateManualConflict(this.clients, conflictingUpdate); + } + break; + } else { + logger.info("Retrying update to induce conflicts"); + } + } while (true); + } + + public void runDeleteConflictOnManual() throws Exception { + do { + Document conflictDocument = new Document(); + conflictDocument.id(UUID.randomUUID().toString()); + + conflictDocument = this.tryInsertDocument(clients.get(0), this.manualCollectionUri, conflictDocument, 0) + .singleOrEmpty().block(); + + TimeUnit.SECONDS.sleep(10);//1 Second for write to sync. + + logger.info("1) Performing conflicting delete across 3 regions on {}", this.manualCollectionName); + + ArrayList> deleteTask = new ArrayList<>(); + + int index = 0; + for (AsyncDocumentClient client : this.clients) { + deleteTask.add(this.tryDeleteDocument(client, this.manualCollectionUri, conflictDocument, index++)); + } + + List conflictDocuments = Flux.merge(deleteTask).collectList() + .subscribeOn(schedulerForBlockingWork) + .single().block(); + + if (conflictDocuments.size() > 1) { + logger.info("2) Caused {} delete conflicts, verifying conflict resolution", conflictDocuments.size()); + + for (Document conflictingDelete : conflictDocuments) { + this.validateManualConflict(this.clients, conflictingDelete); + } + + break; + } else { + logger.info("Retrying update to induce conflicts"); + } + } while (true); + } + + public void runInsertConflictOnLWW() throws Exception { + do { + logger.info("Performing conflicting insert across 3 regions"); + + ArrayList> insertTask = new ArrayList<>(); + + Document conflictDocument = new Document(); + conflictDocument.id(UUID.randomUUID().toString()); + + int index = 0; + for (AsyncDocumentClient client : this.clients) { + insertTask.add(this.tryInsertDocument(client, this.lwwCollectionUri, conflictDocument, index++)); + } + + List conflictDocuments = Flux.merge(insertTask).collectList().single().block(); + + + if (conflictDocuments.size() > 1) { + logger.info("Inserted {} conflicts, verifying conflict resolution", conflictDocuments.size()); + + this.validateLWW(this.clients, conflictDocuments); + + break; + } else { + logger.info("Retrying insert to induce conflicts"); + } + } while (true); + } + + public void runUpdateConflictOnLWW() throws Exception { + do { + Document conflictDocument = new Document(); + conflictDocument.id(UUID.randomUUID().toString()); + + conflictDocument = this.tryInsertDocument(clients.get(0), this.lwwCollectionUri, conflictDocument, 0) + .singleOrEmpty().block(); + + + TimeUnit.SECONDS.sleep(1); //1 Second for write to sync. + + logger.info("1) Performing conflicting update across {} regions on {}", this.clients.size(), this.lwwCollectionUri); + + ArrayList> insertTask = new ArrayList<>(); + + int index = 0; + for (AsyncDocumentClient client : this.clients) { + insertTask.add(this.tryUpdateDocument(client, this.lwwCollectionUri, conflictDocument, index++)); + } + + List conflictDocuments = Flux.merge(insertTask).collectList().single().block(); + + + if (conflictDocuments.size() > 1) { + logger.info("2) Caused {} update conflicts, verifying conflict resolution", conflictDocuments.size()); + + this.validateLWW(this.clients, conflictDocuments); + + break; + } else { + logger.info("Retrying insert to induce conflicts"); + } + } while (true); + } + + public void runDeleteConflictOnLWW() throws Exception { + do { + Document conflictDocument = new Document(); + conflictDocument.id(UUID.randomUUID().toString()); + + conflictDocument = this.tryInsertDocument(clients.get(0), this.lwwCollectionUri, conflictDocument, 0) + .singleOrEmpty().block(); + + + TimeUnit.SECONDS.sleep(1); //1 Second for write to sync. + + logger.info("1) Performing conflicting delete across {} regions on {}", this.clients.size(), this.lwwCollectionUri); + + ArrayList> insertTask = new ArrayList<>(); + + int index = 0; + for (AsyncDocumentClient client : this.clients) { + if (index % 2 == 1) { + //We delete from region 1, even though region 2 always win. + insertTask.add(this.tryDeleteDocument(client, this.lwwCollectionUri, conflictDocument, index++)); + } else { + insertTask.add(this.tryUpdateDocument(client, this.lwwCollectionUri, conflictDocument, index++)); + } + } + + List conflictDocuments = Flux.merge(insertTask).collectList().single().block(); + + if (conflictDocuments.size() > 1) { + logger.info("Inserted {} conflicts, verifying conflict resolution", conflictDocuments.size()); + + //DELETE should always win. irrespective of LWW. + this.validateLWW(this.clients, conflictDocuments, true); + break; + } else { + logger.info("Retrying update/delete to induce conflicts"); + } + } while (true); + } + + public void runInsertConflictOnUdp() throws Exception { + do { + logger.info("1) Performing conflicting insert across 3 regions on {}", this.udpCollectionName); + + ArrayList> insertTask = new ArrayList<>(); + + Document conflictDocument = new Document(); + conflictDocument.id(UUID.randomUUID().toString()); + + int index = 0; + for (AsyncDocumentClient client : this.clients) { + insertTask.add(this.tryInsertDocument(client, this.udpCollectionUri, conflictDocument, index++)); + } + + List conflictDocuments = Flux.merge(insertTask).collectList().single().block(); + + + if (conflictDocuments.size() > 1) { + logger.info("2) Caused {} insert conflicts, verifying conflict resolution", conflictDocuments.size()); + + this.validateUDPAsync(this.clients, conflictDocuments); + + break; + } else { + logger.info("Retrying insert to induce conflicts"); + } + } while (true); + } + + public void runUpdateConflictOnUdp() throws Exception { + do { + Document conflictDocument = new Document(); + conflictDocument.id(UUID.randomUUID().toString()); + + conflictDocument = this.tryInsertDocument(clients.get(0), this.udpCollectionUri, conflictDocument, 0) + .singleOrEmpty().block(); + + TimeUnit.SECONDS.sleep(1); //1 Second for write to sync. + + logger.info("1) Performing conflicting update across 3 regions on {}", this.udpCollectionUri); + + ArrayList> updateTask = new ArrayList<>(); + + int index = 0; + for (AsyncDocumentClient client : this.clients) { + updateTask.add(this.tryUpdateDocument(client, this.udpCollectionUri, conflictDocument, index++)); + } + + List conflictDocuments = Flux.merge(updateTask).collectList().single().block(); + + + if (conflictDocuments.size() > 1) { + logger.info("2) Caused {} update conflicts, verifying conflict resolution", conflictDocuments.size()); + + this.validateUDPAsync(this.clients, conflictDocuments); + + break; + } else { + logger.info("Retrying update to induce conflicts"); + } + } while (true); + } + + public void runDeleteConflictOnUdp() throws Exception { + do { + Document conflictDocument = new Document(); + conflictDocument.id(UUID.randomUUID().toString()); + + conflictDocument = this.tryInsertDocument(clients.get(0), this.udpCollectionUri, conflictDocument, 0) + .singleOrEmpty().block(); + + TimeUnit.SECONDS.sleep(1); //1 Second for write to sync. + + logger.info("1) Performing conflicting update/delete across 3 regions on {}", this.udpCollectionUri); + + ArrayList> deleteTask = new ArrayList<>(); + + int index = 0; + for (AsyncDocumentClient client : this.clients) { + if (index % 2 == 1) { + //We delete from region 1, even though region 2 always win. + deleteTask.add(this.tryDeleteDocument(client, this.udpCollectionUri, conflictDocument, index++)); + } else { + deleteTask.add(this.tryUpdateDocument(client, this.udpCollectionUri, conflictDocument, index++)); + } + } + + List conflictDocuments = Flux.merge(deleteTask).collectList().single().block(); + + if (conflictDocuments.size() > 1) { + logger.info("2) Caused {} delete conflicts, verifying conflict resolution", conflictDocuments.size()); + + //DELETE should always win. irrespective of LWW. + this.validateUDPAsync(this.clients, conflictDocuments, true); + break; + } else { + logger.info("Retrying update/delete to induce conflicts"); + } + } while (true); + } + + private Flux tryInsertDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) { + + logger.debug("region: {}", client.getWriteEndpoint()); + BridgeInternal.setProperty(document, "regionId", index); + BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint()); + return client.createDocument(collectionUri, document, null, false) + .onErrorResume(e -> { + if (hasDocumentClientException(e, 409)) { + return Flux.empty(); + } else { + return Flux.error(e); + } + }).map(ResourceResponse::getResource); + } + + private boolean hasDocumentClientException(Throwable e, int statusCode) { + if (e instanceof CosmosClientException) { + CosmosClientException dce = (CosmosClientException) e; + return dce.statusCode() == statusCode; + } + + return false; + } + + private boolean hasDocumentClientExceptionCause(Throwable e) { + while (e != null) { + if (e instanceof CosmosClientException) { + return true; + } + + e = e.getCause(); + } + return false; + } + + private boolean hasDocumentClientExceptionCause(Throwable e, int statusCode) { + while (e != null) { + if (e instanceof CosmosClientException) { + CosmosClientException dce = (CosmosClientException) e; + return dce.statusCode() == statusCode; + } + + e = e.getCause(); + } + + return false; + } + + private Flux tryUpdateDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) { + BridgeInternal.setProperty(document, "regionId", index); + BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint()); + + RequestOptions options = new RequestOptions(); + options.setAccessCondition(new AccessCondition()); + options.getAccessCondition().type(AccessConditionType.IF_MATCH); + options.getAccessCondition().condition(document.etag()); + + + return client.replaceDocument(document.selfLink(), document, null).onErrorResume(e -> { + + // pre condition failed + if (hasDocumentClientException(e, 412)) { + //Lost synchronously or not document yet. No conflict is induced. + return Flux.empty(); + + } + return Flux.error(e); + }).map(ResourceResponse::getResource); + } + + private Flux tryDeleteDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) { + BridgeInternal.setProperty(document, "regionId", index); + BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint()); + + RequestOptions options = new RequestOptions(); + options.setAccessCondition(new AccessCondition()); + options.getAccessCondition().type(AccessConditionType.IF_MATCH); + options.getAccessCondition().condition(document.etag()); + + + return client.deleteDocument(document.selfLink(), options).onErrorResume(e -> { + + // pre condition failed + if (hasDocumentClientException(e, 412)) { + //Lost synchronously. No conflict is induced. + return Flux.empty(); + + } + return Flux.error(e); + }).map(rr -> document); + } + + private void validateManualConflict(List clients, Document conflictDocument) throws Exception { + boolean conflictExists = false; + for (AsyncDocumentClient client : clients) { + conflictExists = this.validateManualConflict(client, conflictDocument); + } + + if (conflictExists) { + this.deleteConflict(conflictDocument); + } + } + + private boolean isDelete(Conflict conflict) { + return StringUtils.equalsIgnoreCase(conflict.getOperationKind(), "delete"); + } + + + private boolean equals(String a, String b) { + return StringUtils.equals(a, b); + } + + private boolean validateManualConflict(AsyncDocumentClient client, Document conflictDocument) throws Exception { + while (true) { + FeedResponse response = client.readConflicts(this.manualCollectionUri, null) + .take(1).single().block(); + + for (Conflict conflict : response.results()) { + if (!isDelete(conflict)) { + Document conflictDocumentContent = conflict.getResource(Document.class); + if (equals(conflictDocument.id(), conflictDocumentContent.id())) { + if (equals(conflictDocument.resourceId(), conflictDocumentContent.resourceId()) && + equals(conflictDocument.etag(), conflictDocumentContent.etag())) { + logger.info("Document from Region {} lost conflict @ {}", + conflictDocument.id(), + conflictDocument.getInt("regionId"), + client.getReadEndpoint()); + return true; + } else { + try { + //Checking whether this is the winner. + Document winnerDocument = client.readDocument(conflictDocument.selfLink(), null) + .single().block().getResource(); + logger.info("Document from region {} won the conflict @ {}", + conflictDocument.getInt("regionId"), + client.getReadEndpoint()); + return false; + } + catch (Exception exception) { + if (hasDocumentClientException(exception, 404)) { + throw exception; + } else { + logger.info( + "Document from region {} not found @ {}", + conflictDocument.getInt("regionId"), + client.getReadEndpoint()); + } + } + } + } + } else { + if (equals(conflict.getSourceResourceId(), conflictDocument.resourceId())) { + logger.info("DELETE conflict found @ {}", + client.getReadEndpoint()); + return false; + } + } + } + + logger.error("Document {} is not found in conflict feed @ {}, retrying", + conflictDocument.id(), + client.getReadEndpoint()); + + TimeUnit.MILLISECONDS.sleep(500); + } + } + + private void deleteConflict(Document conflictDocument) { + AsyncDocumentClient delClient = clients.get(0); + + FeedResponse conflicts = delClient.readConflicts(this.manualCollectionUri, null).take(1).single().block(); + + for (Conflict conflict : conflicts.results()) { + if (!isDelete(conflict)) { + Document conflictContent = conflict.getResource(Document.class); + if (equals(conflictContent.resourceId(), conflictDocument.resourceId()) + && equals(conflictContent.etag(), conflictDocument.etag())) { + logger.info("Deleting manual conflict {} from region {}", + conflict.getSourceResourceId(), + conflictContent.getInt("regionId")); + delClient.deleteConflict(conflict.selfLink(), null) + .single().block(); + + } + } else if (equals(conflict.getSourceResourceId(), conflictDocument.resourceId())) { + logger.info("Deleting manual conflict {} from region {}", + conflict.getSourceResourceId(), + conflictDocument.getInt("regionId")); + delClient.deleteConflict(conflict.selfLink(), null) + .single().block(); + } + } + } + + private void validateLWW(List clients, List conflictDocument) throws Exception { + validateLWW(clients, conflictDocument, false); + } + + + private void validateLWW(List clients, List conflictDocument, boolean hasDeleteConflict) throws Exception { + for (AsyncDocumentClient client : clients) { + this.validateLWW(client, conflictDocument, hasDeleteConflict); + } + } + + private void validateLWW(AsyncDocumentClient client, List conflictDocument, boolean hasDeleteConflict) throws Exception { + FeedResponse response = client.readConflicts(this.lwwCollectionUri, null) + .take(1).single().block(); + + if (response.results().size() != 0) { + logger.error("Found {} conflicts in the lww collection", response.results().size()); + return; + } + + if (hasDeleteConflict) { + do { + try { + client.readDocument(conflictDocument.get(0).selfLink(), null).single().block(); + + logger.error("DELETE conflict for document {} didnt win @ {}", + conflictDocument.get(0).id(), + client.getReadEndpoint()); + + TimeUnit.MILLISECONDS.sleep(500); + } catch (Exception exception) { + if (!hasDocumentClientExceptionCause(exception)) { + throw exception; + } + + // NotFound + if (hasDocumentClientExceptionCause(exception, 404)) { + + logger.info("DELETE conflict won @ {}", client.getReadEndpoint()); + return; + } else { + logger.error("DELETE conflict for document {} didnt win @ {}", + conflictDocument.get(0).id(), + client.getReadEndpoint()); + + TimeUnit.MILLISECONDS.sleep(500); + } + } + } while (true); + } + + Document winnerDocument = null; + + for (Document document : conflictDocument) { + if (winnerDocument == null || + winnerDocument.getInt("regionId") <= document.getInt("regionId")) { + winnerDocument = document; + } + } + + logger.info("Document from region {} should be the winner", + winnerDocument.getInt("regionId")); + + while (true) { + try { + Document existingDocument = client.readDocument(winnerDocument.selfLink(), null) + .single().block().getResource(); + + if (existingDocument.getInt("regionId") == winnerDocument.getInt("regionId")) { + logger.info("Winner document from region {} found at {}", + existingDocument.getInt("regionId"), + client.getReadEndpoint()); + break; + } else { + logger.error("Winning document version from region {} is not found @ {}, retrying...", + winnerDocument.getInt("regionId"), + client.getWriteEndpoint()); + TimeUnit.MILLISECONDS.sleep(500); + } + } catch (Exception e) { + logger.error("Winner document from region {} is not found @ {}, retrying...", + winnerDocument.getInt("regionId"), + client.getWriteEndpoint()); + TimeUnit.MILLISECONDS.sleep(500); + } + } + } + + private void validateUDPAsync(List clients, List conflictDocument) throws Exception { + validateUDPAsync(clients, conflictDocument, false); + } + + private void validateUDPAsync(List clients, List conflictDocument, boolean hasDeleteConflict) throws Exception { + for (AsyncDocumentClient client : clients) { + this.validateUDPAsync(client, conflictDocument, hasDeleteConflict); + } + } + + private String documentNameLink(String collectionId, String documentId) { + return String.format("dbs/%s/colls/%s/docs/%s", databaseName, collectionId, documentId); + } + + private void validateUDPAsync(AsyncDocumentClient client, List conflictDocument, boolean hasDeleteConflict) throws Exception { + FeedResponse response = client.readConflicts(this.udpCollectionUri, null).take(1).single().block(); + + if (response.results().size() != 0) { + logger.error("Found {} conflicts in the udp collection", response.results().size()); + return; + } + + if (hasDeleteConflict) { + do { + try { + client.readDocument( + documentNameLink(udpCollectionName, conflictDocument.get(0).id()), null) + .single().block(); + + logger.error("DELETE conflict for document {} didnt win @ {}", + conflictDocument.get(0).id(), + client.getReadEndpoint()); + + TimeUnit.MILLISECONDS.sleep(500); + + } catch (Exception exception) { + if (hasDocumentClientExceptionCause(exception, 404)) { + logger.info("DELETE conflict won @ {}", client.getReadEndpoint()); + return; + } else { + logger.error("DELETE conflict for document {} didnt win @ {}", + conflictDocument.get(0).id(), + client.getReadEndpoint()); + + TimeUnit.MILLISECONDS.sleep(500); + } + } + } while (true); + } + + Document winnerDocument = null; + + for (Document document : conflictDocument) { + if (winnerDocument == null || + winnerDocument.getInt("regionId") <= document.getInt("regionId")) { + winnerDocument = document; + } + } + + logger.info("Document from region {} should be the winner", + winnerDocument.getInt("regionId")); + + while (true) { + try { + + Document existingDocument = client.readDocument( + documentNameLink(udpCollectionName, winnerDocument.id()), null) + .single().block().getResource(); + + if (existingDocument.getInt("regionId") == winnerDocument.getInt( + ("regionId"))) { + logger.info("Winner document from region {} found at {}", + existingDocument.getInt("regionId"), + client.getReadEndpoint()); + break; + } else { + logger.error("Winning document version from region {} is not found @ {}, retrying...", + winnerDocument.getInt("regionId"), + client.getWriteEndpoint()); + TimeUnit.MILLISECONDS.sleep(500); + } + } catch (Exception e) { + logger.error("Winner document from region {} is not found @ {}, retrying...", + winnerDocument.getInt("regionId"), + client.getWriteEndpoint()); + TimeUnit.MILLISECONDS.sleep(500); + } + } + } + + public void shutdown() { + this.executor.shutdown(); + for(AsyncDocumentClient client: clients) { + client.close(); + } + } +} diff --git a/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/rx/examples/multimaster/samples/Main.java b/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/rx/examples/multimaster/samples/Main.java new file mode 100644 index 0000000000000..b05e012dda6b1 --- /dev/null +++ b/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/rx/examples/multimaster/samples/Main.java @@ -0,0 +1,79 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.rx.examples.multimaster.samples; + +import com.azure.data.cosmos.rx.examples.multimaster.ConfigurationManager; +import org.apache.commons.io.IOUtils; + +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; + + +public class Main { + public static void main(String[] args) throws Exception { + + if (args.length != 1) { + help(); + System.exit(1); + } + + try (InputStream inputStream = new FileInputStream(args[0])) { + ConfigurationManager.getAppSettings().load(inputStream); + System.out.println("Using file " + args[0] + " for the setting."); + } + + Main.runScenarios(); + } + + private static void runScenarios() throws Exception { + MultiMasterScenario scenario = new MultiMasterScenario(); + scenario.initialize(); + + scenario.runBasic(); + + scenario.runManualConflict(); + scenario.runLWW(); + scenario.runUDP(); + + System.out.println("Finished"); + + //shutting down the active the resources + scenario.shutdown(); + } + + private static void help() throws IOException { + System.out.println("Provide the path to setting file in the following format: "); + try (InputStream inputStream = + Main.class.getClassLoader() + .getResourceAsStream("multi-master-sample-config.properties")) { + + IOUtils.copy(inputStream, System.out); + + System.out.println(); + } catch (Exception e) { + throw e; + } + } +} diff --git a/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/rx/examples/multimaster/samples/MultiMasterScenario.java b/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/rx/examples/multimaster/samples/MultiMasterScenario.java new file mode 100644 index 0000000000000..42d31123a67bd --- /dev/null +++ b/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/rx/examples/multimaster/samples/MultiMasterScenario.java @@ -0,0 +1,166 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.rx.examples.multimaster.samples; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.rx.examples.multimaster.ConfigurationManager; +import com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class MultiMasterScenario { + + private final static Logger logger = LoggerFactory.getLogger(MultiMasterScenario.class); + + final private String accountEndpoint; + final private String accountKey; + final private List workers; + final private ConflictWorker conflictWorker; + + public MultiMasterScenario() { + this.accountEndpoint = ConfigurationManager.getAppSettings().getProperty("endpoint"); + this.accountKey = ConfigurationManager.getAppSettings().getProperty("key"); + + String databaseName = ConfigurationManager.getAppSettings().getProperty("databaseName"); + String manualCollectionName = ConfigurationManager.getAppSettings().getProperty("manualCollectionName"); + String lwwCollectionName = ConfigurationManager.getAppSettings().getProperty("lwwCollectionName"); + String udpCollectionName = ConfigurationManager.getAppSettings().getProperty("udpCollectionName"); + String basicCollectionName = ConfigurationManager.getAppSettings().getProperty("basicCollectionName"); + String regionsAsString = ConfigurationManager.getAppSettings().getProperty("regions"); + Preconditions.checkNotNull(regionsAsString, "regions is required"); + String[] regions = regionsAsString.split(";"); + Preconditions.checkArgument(regions.length > 0, "at least one region is required"); + Preconditions.checkNotNull(accountEndpoint, "accountEndpoint is required"); + Preconditions.checkNotNull(accountKey, "accountKey is required"); + Preconditions.checkNotNull(databaseName, "databaseName is required"); + Preconditions.checkNotNull(manualCollectionName, "manualCollectionName is required"); + Preconditions.checkNotNull(lwwCollectionName, "lwwCollectionName is required"); + Preconditions.checkNotNull(udpCollectionName, "udpCollectionName is required"); + Preconditions.checkNotNull(basicCollectionName, "basicCollectionName is required"); + + this.workers = new ArrayList<>(); + this.conflictWorker = new ConflictWorker(databaseName, basicCollectionName, manualCollectionName, lwwCollectionName, udpCollectionName); + + for (String region : regions) { + ConnectionPolicy policy = new ConnectionPolicy(); + policy.usingMultipleWriteLocations(true); + policy.preferredLocations(Collections.singletonList(region)); + + AsyncDocumentClient client = + new AsyncDocumentClient.Builder() + .withMasterKeyOrResourceToken(this.accountKey) + .withServiceEndpoint(this.accountEndpoint) + .withConsistencyLevel(ConsistencyLevel.EVENTUAL) + .withConnectionPolicy(policy).build(); + + + workers.add(new Worker(client, databaseName, basicCollectionName)); + + conflictWorker.addClient(client); + } + } + + public void initialize() throws Exception { + this.conflictWorker.initialize(); + logger.info("Initialized collections."); + } + + public void runBasic() throws Exception { + logger.info("\n####################################################"); + logger.info("Basic Active-Active"); + logger.info("####################################################"); + + logger.info("1) Starting insert loops across multiple regions ..."); + + List> basicTask = new ArrayList<>(); + + int documentsToInsertPerWorker = 100; + + for (Worker worker : this.workers) { + basicTask.add(worker.runLoopAsync(documentsToInsertPerWorker)); + } + + Mono.when(basicTask).block(); + + basicTask.clear(); + + logger.info("2) Reading from every region ..."); + + int expectedDocuments = this.workers.size() * documentsToInsertPerWorker; + for (Worker worker : this.workers) { + basicTask.add(worker.readAllAsync(expectedDocuments)); + } + + Mono.when(basicTask).block(); + + basicTask.clear(); + + logger.info("3) Deleting all the documents ..."); + + this.workers.get(0).deleteAll(); + + logger.info("####################################################"); + } + + public void runManualConflict() throws Exception { + logger.info("\n####################################################"); + logger.info("Manual Conflict Resolution"); + logger.info("####################################################"); + + this.conflictWorker.runManualConflict(); + logger.info("####################################################"); + } + + public void runLWW() throws Exception { + logger.info("\n####################################################"); + logger.info("LWW Conflict Resolution"); + logger.info("####################################################"); + + this.conflictWorker.runLWWConflict(); + logger.info("####################################################"); + } + + public void runUDP() throws Exception { + logger.info("\n####################################################"); + logger.info("UDP Conflict Resolution"); + logger.info("####################################################"); + + this.conflictWorker.runUDPConflict(); + logger.info("####################################################"); + } + + public void shutdown() { + conflictWorker.shutdown(); + for(Worker worker: this.workers) { + worker.shutdown(); + } + } +} diff --git a/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/rx/examples/multimaster/samples/Worker.java b/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/rx/examples/multimaster/samples/Worker.java new file mode 100644 index 0000000000000..f5d7657f89762 --- /dev/null +++ b/sdk/cosmos/examples/src/main/java/com/azure/data/cosmos/rx/examples/multimaster/samples/Worker.java @@ -0,0 +1,186 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.rx.examples.multimaster.samples; + + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Scheduler; +import reactor.core.scheduler.Schedulers; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +public class Worker { + private final static Logger logger = LoggerFactory.getLogger(Worker.class); + + private final AsyncDocumentClient client; + private final String documentCollectionUri; + + // scheduler for blocking work + private final Scheduler schedulerForBlockingWork; + private final ExecutorService executor; + + public Worker(AsyncDocumentClient client, String databaseName, String collectionName) { + this.client = client; + this.documentCollectionUri = String.format("/dbs/%s/colls/%s", databaseName, collectionName); + this.executor = Executors.newSingleThreadExecutor(); + this.schedulerForBlockingWork = Schedulers.fromExecutor(executor); + } + + public Mono runLoopAsync(int documentsToInsert) { + return Mono.defer(() -> { + + int iterationCount = 0; + + List latency = new ArrayList<>(); + while (iterationCount++ < documentsToInsert) { + long startTick = System.currentTimeMillis(); + + Document d = new Document(); + d.id(UUID.randomUUID().toString()); + + this.client.createDocument(this.documentCollectionUri, d, null, false) + .subscribeOn(schedulerForBlockingWork).single().block(); + + long endTick = System.currentTimeMillis(); + + latency.add(endTick - startTick); + } + + Collections.sort(latency); + int p50Index = (latency.size() / 2); + + logger.info("Inserted {} documents at {} with p50 {} ms", + documentsToInsert, + this.client.getWriteEndpoint(), + latency.get(p50Index)); + + return Mono.empty(); + + }); + + } + + + public Mono readAllAsync(int expectedNumberOfDocuments) { + + return Mono.defer(() -> { + + while (true) { + int totalItemRead = 0; + FeedResponse response = null; + do { + + FeedOptions options = new FeedOptions(); + options.requestContinuation(response != null ? response.continuationToken() : null); + + response = this.client.readDocuments(this.documentCollectionUri, options).take(1) + .subscribeOn(schedulerForBlockingWork).single().block(); + + totalItemRead += response.results().size(); + } while (response.continuationToken() != null); + + if (totalItemRead < expectedNumberOfDocuments) { + logger.info("Total item read {} from {} is less than {}, retrying reads", + totalItemRead, + this.client.getReadEndpoint(), + expectedNumberOfDocuments); + + try { + TimeUnit.SECONDS.sleep(1); + } catch (InterruptedException e) { + logger.info("interrupted"); + break; + } + continue; + } else { + logger.info("READ {} items from {}", totalItemRead, this.client.getReadEndpoint()); + break; + } + } + + return Mono.empty(); + }); + } + + void deleteAll() { + List documents = new ArrayList<>(); + FeedResponse response = null; + do { + + FeedOptions options = new FeedOptions(); + options.requestContinuation(response != null ? response.continuationToken() : null); + + response = this.client.readDocuments(this.documentCollectionUri, options).take(1) + .subscribeOn(schedulerForBlockingWork).single().block(); + + documents.addAll(response.results()); + } while (response.continuationToken() != null); + + for (Document document : documents) { + try { + this.client.deleteDocument(document.selfLink(), null) + .subscribeOn(schedulerForBlockingWork).single().block(); + } catch (RuntimeException exEx) { + CosmosClientException dce = getDocumentClientExceptionCause(exEx); + + if (dce.statusCode() != 404) { + logger.info("Error occurred while deleting {} from {}", dce, client.getWriteEndpoint()); + } + } + } + + logger.info("Deleted all documents from region {}", this.client.getWriteEndpoint()); + } + + private CosmosClientException getDocumentClientExceptionCause(Throwable e) { + while (e != null) { + + if (e instanceof CosmosClientException) { + return (CosmosClientException) e; + } + + e = e.getCause(); + } + + return null; + } + + public void shutdown() { + executor.shutdown(); + client.close(); + } +} diff --git a/sdk/cosmos/examples/src/main/resources/log4j.properties b/sdk/cosmos/examples/src/main/resources/log4j.properties new file mode 100644 index 0000000000000..a8f16e5dee315 --- /dev/null +++ b/sdk/cosmos/examples/src/main/resources/log4j.properties @@ -0,0 +1,23 @@ +# this is the log4j configuration for tests + +# Set root logger level to DEBUG and its only appender to A1. +log4j.rootLogger=WARN, A1 + +log4j.category.io.netty=INFO +log4j.category.io.reactivex=INFO +log4j.category.com.azure.data.cosmos.rx.examples.multimaster.samples.ConflictWorker=INFO +log4j.category.com.azure.data.cosmos.rx.examples.multimaster.samples.Main=INFO +log4j.category.com.azure.data.cosmos.rx.examples.multimaster.samples.Worker=INFO +log4j.category.com.azure.data.cosmos.rx.examples.multimaster.samples.MultiMasterScenario=INFO +log4j.category.com.azure.data.cosmos.rx.examples.multimaster.ConfigurationManager=INFO +log4j.category.com.azure.data.cosmos.rx.examples.multimaster.Helpers=INFO + +# A1 is set to be a ConsoleAppender. +log4j.appender.A1=org.apache.log4j.ConsoleAppender + +# A1 uses PatternLayout. +log4j.appender.A1.layout=org.apache.log4j.PatternLayout +#log4j.appender.A1.layout.ConversionPattern=%d %5X{pid} [%t] %-5p - %m%n + +log4j.appender.A1.layout.ConversionPattern=%m%n + diff --git a/sdk/cosmos/examples/src/main/resources/multi-master-sample-config.properties b/sdk/cosmos/examples/src/main/resources/multi-master-sample-config.properties new file mode 100644 index 0000000000000..42c20302edc12 --- /dev/null +++ b/sdk/cosmos/examples/src/main/resources/multi-master-sample-config.properties @@ -0,0 +1,8 @@ +endpoint= +key= +regions=North Central US;North Europe;Southeast Asia +databaseName=multiMasterDemoDB +manualCollectionName=myManualCollection +lwwCollectionName=myLwwCollection +udpCollectionName=myUdpCollection +basicCollectionName=myBasicCollection \ No newline at end of file diff --git a/sdk/cosmos/examples/src/main/resources/resolver-storedproc.txt b/sdk/cosmos/examples/src/main/resources/resolver-storedproc.txt new file mode 100644 index 0000000000000..e856721a979ac --- /dev/null +++ b/sdk/cosmos/examples/src/main/resources/resolver-storedproc.txt @@ -0,0 +1,45 @@ +function resolver(incomingRecord, existingRecord, isTombstone, conflictingRecords) { + var collection = getContext().getCollection(); + if (!incomingRecord) { + if (existingRecord) { + collection.deleteDocument(existingRecord._self, {}, function(err, responseOptions) { + if (err) throw err; + }); + } + } else if (isTombstone) { + // delete always wins. + } else { + var documentToUse = incomingRecord; + if (existingRecord) { + if (documentToUse.regionId < existingRecord.regionId) { + documentToUse = existingRecord; + } + } + var i; + for (i = 0; i < conflictingRecords.length; i++) { + if (documentToUse.regionId < conflictingRecords[i].regionId) { + documentToUse = conflictingRecords[i]; + } + } + tryDelete(conflictingRecords, incomingRecord, existingRecord, documentToUse); + } + function tryDelete(documents, incoming, existing, documentToInsert) { + if (documents.length > 0) { + collection.deleteDocument(documents[0]._self, {}, function(err, responseOptions) { + if (err) throw err; + documents.shift(); + tryDelete(documents, incoming, existing, documentToInsert); + }); + } else if (existing) { + collection.replaceDocument(existing._self, documentToInsert, + function(err, documentCreated) { + if (err) throw err; + }); + } else { + collection.createDocument(collection.getSelfLink(), documentToInsert, + function(err, documentCreated) { + if (err) throw err; + }); + } + } +} \ No newline at end of file diff --git a/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/DocumentClientTest.java b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/DocumentClientTest.java new file mode 100644 index 0000000000000..2a537e8ed9b39 --- /dev/null +++ b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/DocumentClientTest.java @@ -0,0 +1,74 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.google.common.base.Strings; +import org.testng.ITest; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; + +import java.lang.reflect.Method; + +public abstract class DocumentClientTest implements ITest { + + private final AsyncDocumentClient.Builder clientBuilder; + private String testName; + + public DocumentClientTest() { + this(new AsyncDocumentClient.Builder()); + } + + public DocumentClientTest(AsyncDocumentClient.Builder clientBuilder) { + this.clientBuilder = clientBuilder; + } + + public final AsyncDocumentClient.Builder clientBuilder() { + return this.clientBuilder; + } + + @Override + public final String getTestName() { + return this.testName; + } + + @BeforeMethod(alwaysRun = true) + public final void setTestName(Method method) { + + String connectionMode = this.clientBuilder.getConnectionPolicy().connectionMode() == ConnectionMode.DIRECT + ? "Direct " + this.clientBuilder.getConfigs().getProtocol() + : "Gateway"; + + this.testName = Strings.lenientFormat("%s::%s[%s with %s consistency]", + method.getDeclaringClass().getSimpleName(), + method.getName(), + connectionMode, + clientBuilder.getDesiredConsistencyLevel()); + } + + @AfterMethod(alwaysRun = true) + public final void unsetTestName() { + this.testName = null; + } +} diff --git a/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/CollectionCRUDAsyncAPITest.java b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/CollectionCRUDAsyncAPITest.java new file mode 100644 index 0000000000000..7a882bc6540db --- /dev/null +++ b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/CollectionCRUDAsyncAPITest.java @@ -0,0 +1,415 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx.examples; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.DataType; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.DocumentClientTest; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.IncludedPath; +import com.azure.data.cosmos.Index; +import com.azure.data.cosmos.IndexingPolicy; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.internal.RequestOptions; +import com.azure.data.cosmos.internal.ResourceResponse; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.function.Consumer; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +/** + * This integration test class demonstrates how to use Async API to create, + * delete, replace, and update Document Collections. + *

+ * NOTE: you can use rxJava based async api with java8 lambda expression. Use of + * rxJava based async APIs with java8 lambda expressions is much prettier. + *

+ * You can also use the async API without java8 lambda expression support. + *

+ * For example + *

    + *
  • {@link #createCollection_MultiPartition_Async()} demonstrates how to use async api + * with java8 lambda expression. + * + *
  • {@link #createCollection_Async_withoutLambda()} demonstrates how to + * do the same thing without lambda expression. + *
+ *

+ * Also if you need to work with Future or CompletableFuture it is possible to + * transform a flux to CompletableFuture. Please see + * {@link #transformObservableToCompletableFuture()} + *

+ * To Modify the Collection's throughput after it has been created, you need to + * update the corresponding Offer. Please see + * {@see com.azure.data.cosmos.rx.examples.OfferCRUDAsyncAPITest#testUpdateOffer()} + */ +public class CollectionCRUDAsyncAPITest extends DocumentClientTest { + + private final static int TIMEOUT = 120000; + private Database createdDatabase; + private AsyncDocumentClient client; + private DocumentCollection collectionDefinition; + + @BeforeClass(groups = "samples", timeOut = TIMEOUT) + public void setUp() { + + ConnectionPolicy connectionPolicy = new ConnectionPolicy().connectionMode(ConnectionMode.DIRECT); + + this.clientBuilder() + .withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION); + + this.client = this.clientBuilder().build(); + + createdDatabase = Utils.createDatabaseForTest(client); + } + + @BeforeMethod(groups = "samples", timeOut = TIMEOUT) + public void before() { + collectionDefinition = new DocumentCollection(); + collectionDefinition.id(UUID.randomUUID().toString()); + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + collectionDefinition.setPartitionKey(partitionKeyDef); + } + + @AfterClass(groups = "samples", timeOut = TIMEOUT) + public void shutdown() { + Utils.safeClean(client, createdDatabase); + Utils.safeClose(client); + } + + /** + * CREATE a document collection using async api. + * If you want a single partition collection with 10,000 RU/s throughput, + * the only way to do so is to create a single partition collection with lower + * throughput (400) and then increase the throughput. + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void createCollection_SinglePartition_Async() throws Exception { + RequestOptions singlePartitionRequestOptions = new RequestOptions(); + singlePartitionRequestOptions.setOfferThroughput(400); + Flux> createCollectionObservable = client + .createCollection(getDatabaseLink(), collectionDefinition, singlePartitionRequestOptions); + + final CountDownLatch countDownLatch = new CountDownLatch(1); + + createCollectionObservable.single() // We know there is only single result + .subscribe(collectionResourceResponse -> { + System.out.println(collectionResourceResponse.getActivityId()); + countDownLatch.countDown(); + }, error -> { + System.err.println( + "an error occurred while creating the collection: actual cause: " + error.getMessage()); + countDownLatch.countDown(); + }); + + // Wait till collection creation completes + countDownLatch.await(); + } + + /** + * CREATE a document collection using async api. + * This test uses java8 lambda expression. + * See testCreateCollection_Async_withoutLambda for usage without lambda + * expressions. + * Set the throughput to be > 10,000 RU/s + * to create a multi partition collection. + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void createCollection_MultiPartition_Async() throws Exception { + RequestOptions multiPartitionRequestOptions = new RequestOptions(); + multiPartitionRequestOptions.setOfferThroughput(20000); + + Flux> createCollectionObservable = client.createCollection( + getDatabaseLink(), getMultiPartitionCollectionDefinition(), multiPartitionRequestOptions); + + final CountDownLatch countDownLatch = new CountDownLatch(1); + + createCollectionObservable.single() // We know there is only single result + .subscribe(collectionResourceResponse -> { + System.out.println(collectionResourceResponse.getActivityId()); + countDownLatch.countDown(); + }, error -> { + System.err.println( + "an error occurred while creating the collection: actual cause: " + error.getMessage()); + countDownLatch.countDown(); + }); + + // Wait till collection creation completes + countDownLatch.await(); + } + + /** + * CREATE a document Collection using async api, without java8 lambda expressions + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void createCollection_Async_withoutLambda() throws Exception { + Flux> createCollectionObservable = client + .createCollection(getDatabaseLink(), collectionDefinition, null); + + final CountDownLatch countDownLatch = new CountDownLatch(1); + Consumer> onCollectionCreationAction = new Consumer>() { + + @Override + public void accept(ResourceResponse resourceResponse) { + // Collection is created + System.out.println(resourceResponse.getActivityId()); + countDownLatch.countDown(); + } + }; + + Consumer onError = new Consumer() { + @Override + public void accept(Throwable error) { + System.err.println( + "an error occurred while creating the collection: actual cause: " + error.getMessage()); + countDownLatch.countDown(); + } + }; + + createCollectionObservable.single() // We know there is only a single event + .subscribe(onCollectionCreationAction, onError); + + // Wait till collection creation completes + countDownLatch.await(); + } + + /** + * CREATE a collection in a blocking manner + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void createCollection_toBlocking() { + Flux> createCollectionObservable = client + .createCollection(getDatabaseLink(), collectionDefinition, null); + + // single() converts the flux to a mono. + // block() gets the only result. + createCollectionObservable.single().block(); + } + + /** + * Attempt to create a Collection which already exists + * - First create a Collection + * - Using the async api generate an async collection creation observable + * - Converts the Observable to blocking using Observable.toBlocking() api + * - Catch already exist failure (409) + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void createCollection_toBlocking_CollectionAlreadyExists_Fails() { + client.createCollection(getDatabaseLink(), collectionDefinition, null).single().block(); + + // CREATE the collection for test. + Flux> collectionForTestObservable = client + .createCollection(getDatabaseLink(), collectionDefinition, null); + + try { + collectionForTestObservable.single() // Gets the single result + .block(); // Blocks + assertThat("Should not reach here", false); + } catch (Exception e) { + assertThat("Collection already exists.", ((CosmosClientException) e.getCause()).statusCode(), + equalTo(409)); + } + } + + /** + * You can convert a Flux to a CompletableFuture. + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void transformObservableToCompletableFuture() throws Exception { + Flux> createCollectionObservable = client + .createCollection(getDatabaseLink(), collectionDefinition, null); + CompletableFuture> future = createCollectionObservable.single().toFuture(); + + ResourceResponse rrd = future.get(); + + assertThat(rrd.getRequestCharge(), greaterThan((double) 0)); + System.out.println(rrd.getRequestCharge()); + } + + /** + * READ a Collection in an Async manner + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void createAndReadCollection() throws Exception { + // CREATE a Collection + DocumentCollection documentCollection = client + .createCollection(getDatabaseLink(), collectionDefinition, null).single().block() + .getResource(); + + // READ the created collection using async api + Flux> readCollectionObservable = client + .readCollection(getCollectionLink(documentCollection), null); + + final CountDownLatch countDownLatch = new CountDownLatch(1); + + readCollectionObservable.single() // We know there is only single result + .subscribe(collectionResourceResponse -> { + System.out.println(collectionResourceResponse.getActivityId()); + countDownLatch.countDown(); + }, error -> { + System.err.println( + "an error occurred while reading the collection: actual cause: " + error.getMessage()); + countDownLatch.countDown(); + }); + + // Wait till read collection completes + countDownLatch.await(); + } + + /** + * DELETE a Collection in an Async manner + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void createAndDeleteCollection() throws Exception { + // CREATE a Collection + DocumentCollection documentCollection = client + .createCollection(getDatabaseLink(), collectionDefinition, null).single().block() + .getResource(); + + // DELETE the created collection using async api + Flux> deleteCollectionObservable = client + .deleteCollection(getCollectionLink(documentCollection), null); + + final CountDownLatch countDownLatch = new CountDownLatch(1); + + deleteCollectionObservable.single() // We know there is only single result + .subscribe(collectionResourceResponse -> { + System.out.println(collectionResourceResponse.getActivityId()); + countDownLatch.countDown(); + }, error -> { + System.err.println( + "an error occurred while deleting the collection: actual cause: " + error.getMessage()); + countDownLatch.countDown(); + }); + + // Wait till collection deletion completes + countDownLatch.await(); + } + + /** + * Query a Collection in an Async manner + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void collectionCreateAndQuery() throws Exception { + // CREATE a Collection + DocumentCollection collection = client + .createCollection(getDatabaseLink(), collectionDefinition, null).single().block() + .getResource(); + + // Query the created collection using async api + Flux> queryCollectionObservable = client.queryCollections( + getDatabaseLink(), String.format("SELECT * FROM r where r.id = '%s'", collection.id()), + null); + + final CountDownLatch countDownLatch = new CountDownLatch(1); + + queryCollectionObservable.collectList().subscribe(collectionFeedResponseList -> { + // toList() should return a list of size 1 + assertThat(collectionFeedResponseList.size(), equalTo(1)); + + // First element of the list should have only 1 result + FeedResponse collectionFeedResponse = collectionFeedResponseList.get(0); + assertThat(collectionFeedResponse.results().size(), equalTo(1)); + + // This collection should have the same id as the one we created + DocumentCollection foundCollection = collectionFeedResponse.results().get(0); + assertThat(foundCollection.id(), equalTo(collection.id())); + + System.out.println(collectionFeedResponse.activityId()); + countDownLatch.countDown(); + }, error -> { + System.err.println("an error occurred while querying the collection: actual cause: " + error.getMessage()); + countDownLatch.countDown(); + }); + + // Wait till collection query completes + countDownLatch.await(); + } + + private String getDatabaseLink() { + return "dbs/" + createdDatabase.id(); + } + + private String getCollectionLink(DocumentCollection collection) { + return "dbs/" + createdDatabase.id() + "/colls/" + collection.id(); + } + + private DocumentCollection getMultiPartitionCollectionDefinition() { + DocumentCollection collectionDefinition = new DocumentCollection(); + collectionDefinition.id(UUID.randomUUID().toString()); + + // Set the partitionKeyDefinition for a partitioned collection. + // Here, we are setting the partitionKey of the Collection to be /city + PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); + List paths = new ArrayList<>(); + paths.add("/city"); + partitionKeyDefinition.paths(paths); + collectionDefinition.setPartitionKey(partitionKeyDefinition); + + // Set indexing policy to be range range for string and number + IndexingPolicy indexingPolicy = new IndexingPolicy(); + List includedPaths = new ArrayList<>(); + IncludedPath includedPath = new IncludedPath(); + includedPath.path("/*"); + Collection indexes = new ArrayList<>(); + Index stringIndex = Index.Range(DataType.STRING); + BridgeInternal.setProperty(stringIndex, "precision", -1); + indexes.add(stringIndex); + + Index numberIndex = Index.Range(DataType.NUMBER); + BridgeInternal.setProperty(numberIndex, "precision", -1); + indexes.add(numberIndex); + includedPath.indexes(indexes); + includedPaths.add(includedPath); + indexingPolicy.setIncludedPaths(includedPaths); + collectionDefinition.setIndexingPolicy(indexingPolicy); + + return collectionDefinition; + } +} diff --git a/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/ConflictAPITest.java b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/ConflictAPITest.java new file mode 100644 index 0000000000000..b6523b40f163f --- /dev/null +++ b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/ConflictAPITest.java @@ -0,0 +1,177 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx.examples; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.internal.Conflict; +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.DocumentClientTest; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.internal.HttpConstants; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.notNullValue; + +/** + * This integration test class demonstrates how to use Async API for + * Conflicts. + *

+ * Also if you need to work with Future or CompletableFuture it is possible to + * transform a flux to CompletableFuture. Please see + * {@link #transformObservableToCompletableFuture()} + */ +public class ConflictAPITest extends DocumentClientTest { + private final static int TIMEOUT = 60000; + + private AsyncDocumentClient client; + private DocumentCollection createdCollection; + private Database createdDatabase; + + @BeforeClass(groups = "samples", timeOut = TIMEOUT) + public void setUp() { + + ConnectionPolicy connectionPolicy = new ConnectionPolicy().connectionMode(ConnectionMode.DIRECT); + + this.clientBuilder() + .withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION); + + this.client = this.clientBuilder().build(); + + DocumentCollection collectionDefinition = new DocumentCollection(); + collectionDefinition.id(UUID.randomUUID().toString()); + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + collectionDefinition.setPartitionKey(partitionKeyDef); + + // CREATE database + createdDatabase = Utils.createDatabaseForTest(client); + + // CREATE collection + createdCollection = client + .createCollection("/dbs/" + createdDatabase.id(), collectionDefinition, null) + .single().block().getResource(); + + int numberOfDocuments = 20; + // Add documents + for (int i = 0; i < numberOfDocuments; i++) { + Document doc = new Document(String.format("{ 'id': 'loc%d', 'counter': %d}", i, i)); + client.createDocument(getCollectionLink(), doc, null, true).single().block(); + } + } + + @AfterClass(groups = "samples", timeOut = TIMEOUT) + public void shutdown() { + Utils.safeClean(client, createdDatabase); + Utils.safeClose(client); + } + + /** + * READ conflicts + * Converts the conflict read feed observable to blocking observable and + * uses that to find all conflicts + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void readConflicts_toBlocking_toIterator() { + // read all conflicts + int requestPageSize = 3; + FeedOptions options = new FeedOptions(); + options.maxItemCount(requestPageSize); + + Flux> conflictReadFeedObservable = client + .readConflicts(getCollectionLink(), options); + + // Covert the flux to an iterable, and then to iterator + Iterator> it = conflictReadFeedObservable.toIterable().iterator(); + + int expectedNumberOfConflicts = 0; + + int numberOfResults = 0; + while (it.hasNext()) { + FeedResponse page = it.next(); + System.out.println("items: " + page.results()); + String pageSizeAsString = page.responseHeaders().get(HttpConstants.HttpHeaders.ITEM_COUNT); + assertThat("header item count must be present", pageSizeAsString, notNullValue()); + int pageSize = Integer.valueOf(pageSizeAsString); + assertThat("Result size must match header item count", page.results(), hasSize(pageSize)); + numberOfResults += pageSize; + } + assertThat("number of total results", numberOfResults, equalTo(expectedNumberOfConflicts)); + } + + /** + * You can convert a Flux to a CompletableFuture. + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void transformObservableToCompletableFuture() throws Exception { + int requestPageSize = 3; + FeedOptions options = new FeedOptions(); + options.maxItemCount(requestPageSize); + + Flux> conflictReadFeedObservable = client + .readConflicts(getCollectionLink(), options); + + // Convert to observable of list of pages + Mono>> allPagesObservable = conflictReadFeedObservable.collectList(); + + // Convert the observable of list of pages to a Future + CompletableFuture>> future = allPagesObservable.toFuture(); + + List> pageList = future.get(); + + int totalNumberOfRetrievedConflicts = 0; + for (FeedResponse page : pageList) { + totalNumberOfRetrievedConflicts += page.results().size(); + } + assertThat(0, equalTo(totalNumberOfRetrievedConflicts)); + } + + private String getCollectionLink() { + return "dbs/" + createdDatabase.id() + "/colls/" + createdCollection.id(); + } +} + diff --git a/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/DatabaseCRUDAsyncAPITest.java b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/DatabaseCRUDAsyncAPITest.java new file mode 100644 index 0000000000000..02805f1ab1e76 --- /dev/null +++ b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/DatabaseCRUDAsyncAPITest.java @@ -0,0 +1,316 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx.examples; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.DocumentClientTest; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.ResourceResponse; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.function.Consumer; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +/** + * This integration test class demonstrates how to use Async API to create, + * delete, replace, and update Databases. + *

+ * NOTE: you can use rxJava based async api with java8 lambda expression. Use of + * rxJava based async APIs with java8 lambda expressions is much prettier. + *

+ * You can also use the async API without java8 lambda expression support. + *

+ * For example + *

    + *
  • {@link #createDatabase_Async()} demonstrates how to use async api + * with java8 lambda expression. + * + *
  • {@link #createDatabase_Async_withoutLambda()} demonstrates how to + * do the same thing without lambda expression. + *
+ *

+ * Also if you need to work with Future or CompletableFuture it is possible to + * transform a flux to CompletableFuture. Please see + * {@link #transformObservableToCompletableFuture()} + */ +public class DatabaseCRUDAsyncAPITest extends DocumentClientTest { + private final static int TIMEOUT = 60000; + private final List databaseIds = new ArrayList<>(); + + private AsyncDocumentClient client; + + @BeforeClass(groups = "samples", timeOut = TIMEOUT) + public void setUp() { + + ConnectionPolicy connectionPolicy = new ConnectionPolicy().connectionMode(ConnectionMode.DIRECT); + + this.clientBuilder() + .withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION); + + this.client = this.clientBuilder().build(); + } + + private Database getDatabaseDefinition() { + Database databaseDefinition = new Database(); + databaseDefinition.id(Utils.generateDatabaseId()); + + databaseIds.add(databaseDefinition.id()); + + return databaseDefinition; + } + + @AfterClass(groups = "samples", timeOut = TIMEOUT) + public void shutdown() { + for (String id : databaseIds) { + Utils.safeClean(client, id); + } + Utils.safeClose(client); + } + + /** + * CREATE a database using async api. + * This test uses java8 lambda expression. + * See testCreateDatabase_Async_withoutLambda for usage without lambda. + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void createDatabase_Async() throws Exception { + Flux> createDatabaseObservable = client.createDatabase(getDatabaseDefinition(), + null); + + final CountDownLatch completionLatch = new CountDownLatch(1); + + createDatabaseObservable.single() // We know there is only single result + .subscribe(databaseResourceResponse -> { + System.out.println(databaseResourceResponse.getActivityId()); + completionLatch.countDown(); + }, error -> { + System.err.println( + "an error occurred while creating the database: actual cause: " + error.getMessage()); + completionLatch.countDown(); + }); + + // Wait till database creation completes + completionLatch.await(); + } + + /** + * CREATE a database using async api, without java8 lambda expressions + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void createDatabase_Async_withoutLambda() throws Exception { + Flux> createDatabaseObservable = client.createDatabase(getDatabaseDefinition(), + null); + + final CountDownLatch completionLatch = new CountDownLatch(1); + Consumer> onDatabaseCreationAction = new Consumer>() { + + @Override + public void accept(ResourceResponse resourceResponse) { + // Database is created + System.out.println(resourceResponse.getActivityId()); + completionLatch.countDown(); + } + }; + + Consumer onError = new Consumer() { + @Override + public void accept(Throwable error) { + System.err + .println("an error occurred while creating the database: actual cause: " + error.getMessage()); + completionLatch.countDown(); + } + }; + + createDatabaseObservable.single() // We know there is only a single event + .subscribe(onDatabaseCreationAction, onError); + + // Wait till database creation completes + completionLatch.await(); + } + + /** + * CREATE a database in a blocking manner + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void createDatabase_toBlocking() { + Flux> createDatabaseObservable = client.createDatabase(getDatabaseDefinition(), + null); + + // toBlocking() converts to a blocking observable. + // single() gets the only result. + createDatabaseObservable.single().block(); + } + + /** + * Attempt to create a database which already exists + * - First create a database + * - Using the async api generate an async database creation observable + * - Converts the Observable to blocking using Observable.toBlocking() api + * - Catch already exist failure (409) + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void createDatabase_toBlocking_DatabaseAlreadyExists_Fails() { + Database databaseDefinition = getDatabaseDefinition(); + client.createDatabase(databaseDefinition, null).single().block(); + + // CREATE the database for test. + Flux> databaseForTestObservable = client + .createDatabase(databaseDefinition, null); + + try { + databaseForTestObservable.single() // Single + .block(); // Blocks to get the result + assertThat("Should not reach here", false); + } catch (Exception e) { + assertThat("Database already exists.", ((CosmosClientException) e.getCause()).statusCode(), + equalTo(409)); + } + } + + /** + * You can convert a Flux to a CompletableFuture. + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void transformObservableToCompletableFuture() throws Exception { + Flux> createDatabaseObservable = client.createDatabase(getDatabaseDefinition(), + null); + CompletableFuture> future = createDatabaseObservable.single().toFuture(); + + ResourceResponse rrd = future.get(); + + assertThat(rrd.getRequestCharge(), greaterThan((double) 0)); + System.out.print(rrd.getRequestCharge()); + } + + /** + * READ a Database in an Async manner + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void createAndReadDatabase() throws Exception { + // CREATE a database + Database database = client.createDatabase(getDatabaseDefinition(), null).single().block().getResource(); + + // READ the created database using async api + Flux> readDatabaseObservable = client.readDatabase("dbs/" + database.id(), + null); + + final CountDownLatch completionLatch = new CountDownLatch(1); + + readDatabaseObservable.single() // We know there is only single result + .subscribe(databaseResourceResponse -> { + System.out.println(databaseResourceResponse.getActivityId()); + completionLatch.countDown(); + }, error -> { + System.err.println( + "an error occurred while reading the database: actual cause: " + error.getMessage()); + completionLatch.countDown(); + }); + + // Wait till read database completes + completionLatch.await(); + } + + /** + * DELETE a Database in an Async manner + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void createAndDeleteDatabase() throws Exception { + // CREATE a database + Database database = client.createDatabase(getDatabaseDefinition(), null).single().block().getResource(); + + // DELETE the created database using async api + Flux> deleteDatabaseObservable = client + .deleteDatabase("dbs/" + database.id(), null); + + final CountDownLatch completionLatch = new CountDownLatch(1); + + deleteDatabaseObservable.single() // We know there is only single result + .subscribe(databaseResourceResponse -> { + System.out.println(databaseResourceResponse.getActivityId()); + completionLatch.countDown(); + }, error -> { + System.err.println( + "an error occurred while deleting the database: actual cause: " + error.getMessage()); + completionLatch.countDown(); + }); + + // Wait till database deletion completes + completionLatch.await(); + } + + /** + * Query a Database in an Async manner + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void databaseCreateAndQuery() throws Exception { + // CREATE a database + Database databaseDefinition = getDatabaseDefinition(); + client.createDatabase(databaseDefinition, null).single().block().getResource(); + + // Query the created database using async api + Flux> queryDatabaseObservable = client + .queryDatabases(String.format("SELECT * FROM r where r.id = '%s'", databaseDefinition.id()), null); + + final CountDownLatch completionLatch = new CountDownLatch(1); + + queryDatabaseObservable.collectList().subscribe(databaseFeedResponseList -> { + // toList() should return a list of size 1 + assertThat(databaseFeedResponseList.size(), equalTo(1)); + + // First element of the list should have only 1 result + FeedResponse databaseFeedResponse = databaseFeedResponseList.get(0); + assertThat(databaseFeedResponse.results().size(), equalTo(1)); + + // This database should have the same id as the one we created + Database foundDatabase = databaseFeedResponse.results().get(0); + assertThat(foundDatabase.id(), equalTo(databaseDefinition.id())); + + System.out.println(databaseFeedResponse.activityId()); + completionLatch.countDown(); + }, error -> { + System.err.println("an error occurred while querying the database: actual cause: " + error.getMessage()); + completionLatch.countDown(); + }); + + // Wait till database query completes + completionLatch.await(); + } +} diff --git a/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/DocumentCRUDAsyncAPITest.java b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/DocumentCRUDAsyncAPITest.java new file mode 100644 index 0000000000000..6232cb561f2fc --- /dev/null +++ b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/DocumentCRUDAsyncAPITest.java @@ -0,0 +1,542 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx.examples; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.DocumentClientTest; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.internal.RequestOptions; +import com.azure.data.cosmos.internal.ResourceResponse; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.commons.lang3.RandomUtils; +import org.testng.Assert; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.function.Consumer; + +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; + +/** + * This integration test class demonstrates how to use Async API to create, + * delete, replace, and upsert Documents. If you are interested in examples for + * querying for documents please see {@link DocumentQueryAsyncAPITest} + *

+ * NOTE: you can use rxJava based async api with java8 lambda expression. Use + * of rxJava based async APIs with java8 lambda expressions is much prettier. + *

+ * You can also use the async API without java8 lambda expression. + *

+ * For example + *

    + *
  • {@link #createDocument_Async()} demonstrates how to use async api + * with java8 lambda expression. + * + *
  • {@link #createDocument_Async_withoutLambda()} demonstrates how to do + * the same thing without lambda expression. + *
+ *

+ * Also if you need to work with Future or CompletableFuture it is possible to + * transform a flux to CompletableFuture. Please see + * {@link #transformObservableToCompletableFuture()} + */ +public class DocumentCRUDAsyncAPITest extends DocumentClientTest { + + private final static String PARTITION_KEY_PATH = "/mypk"; + private final static int TIMEOUT = 60000; + + private AsyncDocumentClient client; + private Database createdDatabase; + private DocumentCollection createdCollection; + + @BeforeClass(groups = "samples", timeOut = TIMEOUT) + public void setUp() { + + ConnectionPolicy connectionPolicy = new ConnectionPolicy().connectionMode(ConnectionMode.DIRECT); + + this.clientBuilder() + .withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION); + + this.client = this.clientBuilder().build(); + + DocumentCollection collectionDefinition = new DocumentCollection(); + collectionDefinition.id(UUID.randomUUID().toString()); + + PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); + ArrayList partitionKeyPaths = new ArrayList(); + partitionKeyPaths.add(PARTITION_KEY_PATH); + partitionKeyDefinition.paths(partitionKeyPaths); + collectionDefinition.setPartitionKey(partitionKeyDefinition); + + // CREATE database + createdDatabase = Utils.createDatabaseForTest(client); + + // CREATE collection + createdCollection = client + .createCollection("dbs/" + createdDatabase.id(), collectionDefinition, null) + .single().block().getResource(); + } + + @AfterClass(groups = "samples", timeOut = TIMEOUT) + public void shutdown() { + Utils.safeClean(client, createdDatabase); + Utils.safeClose(client); + } + + /** + * CREATE a document using java8 lambda expressions + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void createDocument_Async() throws Exception { + Document doc = new Document(String.format("{ 'id': 'doc%s', 'counter': '%d'}", UUID.randomUUID().toString(), 1)); + Flux> createDocumentObservable = client + .createDocument(getCollectionLink(), doc, null, true); + + final CountDownLatch completionLatch = new CountDownLatch(1); + + // Subscribe to Document resource response emitted by the observable + createDocumentObservable.single() // We know there will be one response + .subscribe(documentResourceResponse -> { + System.out.println(documentResourceResponse.getActivityId()); + completionLatch.countDown(); + }, error -> { + System.err.println( + "an error occurred while creating the document: actual cause: " + error.getMessage()); + completionLatch.countDown(); + }); + + // Wait till document creation completes + completionLatch.await(); + } + + /** + * CREATE a document without java8 lambda expressions + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void createDocument_Async_withoutLambda() throws Exception { + Document doc = new Document(String.format("{ 'id': 'doc%s', 'counter': '%d'}", UUID.randomUUID().toString(), 1)); + Flux> createDocumentObservable = client + .createDocument(getCollectionLink(), doc, null, true); + + final CountDownLatch completionLatch = new CountDownLatch(1); + + Consumer> onNext = new Consumer>() { + + @Override + public void accept(ResourceResponse documentResourceResponse) { + System.out.println(documentResourceResponse.getActivityId()); + completionLatch.countDown(); + } + }; + + Consumer onError = new Consumer() { + + @Override + public void accept(Throwable error) { + System.err + .println("an error occurred while creating the document: actual cause: " + error.getMessage()); + completionLatch.countDown(); + } + }; + + // Subscribe to Document resource response emitted by the observable + createDocumentObservable.single() // We know there will be one response + .subscribe(onNext, onError); + + // Wait till document creation completes + completionLatch.await(); + } + + /** + * CREATE a document in a blocking manner + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void createDocument_toBlocking() { + Document doc = new Document(String.format("{ 'id': 'doc%s', 'counter': '%d'}", UUID.randomUUID().toString(), 1)); + Flux> createDocumentObservable = client + .createDocument(getCollectionLink(), doc, null, true); + + // toBlocking() converts to a blocking observable. + // single() gets the only result. + createDocumentObservable.single().block(); + } + + /** + * CREATE a document with a programmatically set definition, in an Async manner + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void createDocumentWithProgrammableDocumentDefinition() throws Exception { + Document documentDefinition = new Document(); + documentDefinition.id("test-document"); + BridgeInternal.setProperty(documentDefinition, "counter", 1); + + // CREATE a document + Document createdDocument = client + .createDocument(getCollectionLink(), documentDefinition, null, false).single().block() + .getResource(); + + RequestOptions options = new RequestOptions(); + options.setPartitionKey(PartitionKey.None); + // READ the created document + Flux> readDocumentObservable = client + .readDocument(getDocumentLink(createdDocument), null); + + final CountDownLatch completionLatch = new CountDownLatch(1); + + readDocumentObservable.subscribe(documentResourceResponse -> { + Document readDocument = documentResourceResponse.getResource(); + + // The read document must be the same as the written document + assertThat(readDocument.id(), equalTo("test-document")); + assertThat(readDocument.getInt("counter"), equalTo(1)); + System.out.println(documentResourceResponse.getActivityId()); + completionLatch.countDown(); + }, error -> { + System.err.println("an error occured while creating the document: actual cause: " + error.getMessage()); + completionLatch.countDown(); + }); + + completionLatch.await(); + } + + /** + * CREATE 10 documents and sum up all the documents creation request charges + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void documentCreation_SumUpRequestCharge() throws Exception { + // CREATE 10 documents + List>> listOfCreateDocumentObservables = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + Document doc = new Document(String.format("{ 'id': 'doc%s', 'counter': '%d'}", UUID.randomUUID().toString(), i)); + + Flux> createDocumentObservable = client + .createDocument(getCollectionLink(), doc, null, false); + listOfCreateDocumentObservables.add(createDocumentObservable); + } + + // Merge all document creation observables into one observable + Flux> mergedObservable = Flux.merge(listOfCreateDocumentObservables); + + // CREATE a new observable emitting the total charge of creating all 10 + // documents. + Flux totalChargeObservable = mergedObservable + .map(ResourceResponse::getRequestCharge) + // Map to request charge + .reduce(Double::sum).flux(); + // Sum up all the charges + + final CountDownLatch completionLatch = new CountDownLatch(1); + + // Subscribe to the total request charge observable + totalChargeObservable.subscribe(totalCharge -> { + // Print the total charge + System.out.println(totalCharge); + completionLatch.countDown(); + }, e -> completionLatch.countDown() + ); + + completionLatch.await(); + } + + /** + * Attempt to create a document which already exists + * - First create a document + * - Using the async api generate an async document creation observable + * - Converts the Observable to blocking using Observable.toBlocking() api + * - Catch already exist failure (409) + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void createDocument_toBlocking_DocumentAlreadyExists_Fails() { + Document doc = new Document(String.format("{ 'id': 'doc%s', 'counter': '%d'}", UUID.randomUUID().toString(), 1)); + client.createDocument(getCollectionLink(), doc, null, false).single().block(); + + // CREATE the document + Flux> createDocumentObservable = client + .createDocument(getCollectionLink(), doc, null, false); + + try { + createDocumentObservable.single() // Converts the observable to a single observable + .block(); // Blocks and gets the result + Assert.fail("Document Already Exists. Document Creation must fail"); + } catch (Exception e) { + assertThat("Document already exists.", ((CosmosClientException) e.getCause()).statusCode(), + equalTo(409)); + } + } + + /** + * Attempt to create a document which already exists + * - First create a document + * - Using the async api generate an async document creation observable + * - Converts the Observable to blocking using Observable.toBlocking() api + * - Catch already exist failure (409) + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void createDocument_Async_DocumentAlreadyExists_Fails() throws Exception { + Document doc = new Document(String.format("{ 'id': 'doc%s', 'counter': '%d'}", UUID.randomUUID().toString(), 1)); + client.createDocument(getCollectionLink(), doc, null, false).single().block(); + + // CREATE the document + Flux> createDocumentObservable = client + .createDocument(getCollectionLink(), doc, null, false); + + List errorList = Collections.synchronizedList(new ArrayList<>()); + + createDocumentObservable.subscribe(resourceResponse -> { + }, error -> { + errorList.add(error); + System.err.println("failed to create a document due to: " + error.getMessage()); + }); + + Thread.sleep(2000); + assertThat(errorList, hasSize(1)); + assertThat(errorList.get(0), is(instanceOf(CosmosClientException.class))); + assertThat(((CosmosClientException) errorList.get(0)).statusCode(), equalTo(409)); + } + + /** + * REPLACE a document + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void documentReplace_Async() throws Exception { + // CREATE a document + Document createdDocument = new Document(String.format("{ 'id': 'doc%s', 'counter': '%d'}", UUID.randomUUID().toString(), 1)); + createdDocument = client.createDocument(getCollectionLink(), createdDocument, null, false).single() + .block().getResource(); + + // Try to replace the existing document + Document replacingDocument = new Document( + String.format("{ 'id': 'doc%s', 'counter': '%d', 'new-prop' : '2'}", createdDocument.id(), 1)); + Flux> replaceDocumentObservable = client + .replaceDocument(getDocumentLink(createdDocument), replacingDocument, null); + + List> capturedResponse = Collections + .synchronizedList(new ArrayList<>()); + + replaceDocumentObservable.subscribe(resourceResponse -> { + capturedResponse.add(resourceResponse); + }); + + Thread.sleep(2000); + + assertThat(capturedResponse, hasSize(1)); + assertThat(capturedResponse.get(0).getResource().get("new-prop"), equalTo("2")); + } + + /** + * Upsert a document + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void documentUpsert_Async() throws Exception { + // CREATE a document + Document doc = new Document(String.format("{ 'id': 'doc%s', 'counter': '%d'}", UUID.randomUUID().toString(), 1)); + client.createDocument(getCollectionLink(), doc, null, false).single().block(); + + // Upsert the existing document + Document upsertingDocument = new Document( + String.format("{ 'id': 'doc%s', 'counter': '%d', 'new-prop' : '2'}", doc.id(), 1)); + Flux> upsertDocumentObservable = client + .upsertDocument(getCollectionLink(), upsertingDocument, null, false); + + List> capturedResponse = Collections + .synchronizedList(new ArrayList<>()); + + upsertDocumentObservable.subscribe(resourceResponse -> { + capturedResponse.add(resourceResponse); + }); + + Thread.sleep(4000); + + assertThat(capturedResponse, hasSize(1)); + assertThat(capturedResponse.get(0).getResource().get("new-prop"), equalTo("2")); + } + + /** + * DELETE a document + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void documentDelete_Async() throws Exception { + // CREATE a document + Document createdDocument = new Document(String.format("{ 'id': 'doc%s', 'counter': '%d', 'mypk' : '%s'}", UUID.randomUUID().toString(), 1, UUID.randomUUID().toString())); + createdDocument = client.createDocument(getCollectionLink(), createdDocument, null, false).single() + .block().getResource(); + + RequestOptions options = new RequestOptions(); + options.setPartitionKey(new PartitionKey(createdDocument.getString("mypk"))); + + // DELETE the existing document + Flux> deleteDocumentObservable = client + .deleteDocument(getDocumentLink(createdDocument), options); + + List> capturedResponse = Collections + .synchronizedList(new ArrayList<>()); + + deleteDocumentObservable.subscribe(resourceResponse -> { + capturedResponse.add(resourceResponse); + }); + + Thread.sleep(2000); + + assertThat(capturedResponse, hasSize(1)); + + // Assert document is deleted + FeedOptions queryOptions = new FeedOptions(); + queryOptions.enableCrossPartitionQuery(true); + List listOfDocuments = client + .queryDocuments(getCollectionLink(), String.format("SELECT * FROM r where r.id = '%s'", createdDocument.id()), queryOptions) + .map(FeedResponse::results) // Map page to its list of documents + .concatMap(Flux::fromIterable) // Flatten the observable + .collectList() // Transform to a observable + .single() // Gets the Mono> + .block(); // Block + + // Assert that there is no document found + assertThat(listOfDocuments, hasSize(0)); + } + + /** + * READ a document + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void documentRead_Async() throws Exception { + // CREATE a document + Document createdDocument = new Document(String.format("{ 'id': 'doc%s', 'counter': '%d', 'mypk' : '%s'}", UUID.randomUUID().toString(), 1, UUID.randomUUID().toString())); + createdDocument = client.createDocument(getCollectionLink(), createdDocument, null, false).single() + .block().getResource(); + + // READ the document + RequestOptions options = new RequestOptions(); + options.setPartitionKey(new PartitionKey(createdDocument.getString("mypk"))); + Flux> readDocumentObservable = client + .readDocument(getDocumentLink(createdDocument), options); + + List> capturedResponse = Collections + .synchronizedList(new ArrayList<>()); + + readDocumentObservable.subscribe(resourceResponse -> { + capturedResponse.add(resourceResponse); + }); + + Thread.sleep(2000); + + // Assert document is retrieved + assertThat(capturedResponse, hasSize(1)); + } + + private static class TestObject { + @JsonProperty("mypk") + private String mypk; + + @JsonProperty("id") + private String id; + + @JsonProperty("prop") + private String prop; + } + + @Test(groups = {"samples"}, timeOut = TIMEOUT) + public void customSerialization() throws Exception { + ObjectMapper mapper = new ObjectMapper(); + mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + mapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); + + TestObject testObject = new TestObject(); + testObject.id = UUID.randomUUID().toString(); + testObject.mypk = UUID.randomUUID().toString(); + testObject.prop = UUID.randomUUID().toString(); + String itemAsJsonString = mapper.writeValueAsString(testObject); + Document doc = new Document(itemAsJsonString); + + Document createdDocument = client + .createDocument(getCollectionLink(), doc, null, false) + .single() + .block() + .getResource(); + + RequestOptions options = new RequestOptions(); + options.setPartitionKey(new PartitionKey(testObject.mypk)); + + Document readDocument = client + .readDocument(createdDocument.selfLink(), options) + .single() + .block() + .getResource(); + + TestObject readObject = mapper.readValue(readDocument.toJson(), TestObject.class); + assertThat(readObject.prop, equalTo(testObject.prop)); + } + + /** + * You can convert a Flux to a CompletableFuture. + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void transformObservableToCompletableFuture() throws Exception { + Document doc = new Document(String.format("{ 'id': 'doc%d', 'counter': '%d'}", RandomUtils.nextInt(), 1)); + Flux> createDocumentObservable = client + .createDocument(getCollectionLink(), doc, null, false); + CompletableFuture> listenableFuture = createDocumentObservable.single().toFuture(); + + ResourceResponse rrd = listenableFuture.get(); + + assertThat(rrd.getRequestCharge(), greaterThan((double) 0)); + System.out.print(rrd.getRequestCharge()); + } + + private String getCollectionLink() { + return "dbs/" + createdDatabase.id() + "/colls/" + createdCollection.id(); + } + + private String getDocumentLink(Document createdDocument) { + return "dbs/" + createdDatabase.id() + "/colls/" + createdCollection.id() + "/docs/" + createdDocument.id(); + } +} diff --git a/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/DocumentQueryAsyncAPITest.java b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/DocumentQueryAsyncAPITest.java new file mode 100644 index 0000000000000..733097ab8cd7e --- /dev/null +++ b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/DocumentQueryAsyncAPITest.java @@ -0,0 +1,496 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx.examples; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.DocumentClientTest; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.SqlParameterList; +import com.azure.data.cosmos.internal.RequestOptions; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.SqlQuerySpec; +import com.azure.data.cosmos.internal.HttpConstants; +import org.apache.commons.lang3.RandomStringUtils; +import org.reactivestreams.Subscription; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; +import java.util.function.Predicate; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.notNullValue; + +/** + * This integration test class demonstrates how to use Async API to query for + * Documents. + *

+ * NOTE: you can use rxJava based async api with java8 lambda expression. Use + * of rxJava based async APIs with java8 lambda expressions is much prettier. + *

+ * You can also use the async API without java8 lambda expression. + *

+ * For example + *

    + *
  • {@link #queryDocuments_Async()} demonstrates how to use async api + * with java8 lambda expression. + * + *
  • {@link #queryDocuments_Async_withoutLambda()} demonstrates how to do + * the same thing without lambda expression. + *
+ *

+ * Also if you need to work with Future or CompletableFuture it is possible to + * transform a flux to CompletableFuture. Please see + * {@link #transformObservableToCompletableFuture()} + */ +public class DocumentQueryAsyncAPITest extends DocumentClientTest { + + private final static int TIMEOUT = 3 * 60000; + + private AsyncDocumentClient client; + private DocumentCollection createdCollection; + private Database createdDatabase; + private int numberOfDocuments; + + @BeforeClass(groups = "samples", timeOut = TIMEOUT) + public void setUp() { + + ConnectionPolicy connectionPolicy = new ConnectionPolicy().connectionMode(ConnectionMode.DIRECT); + + this.clientBuilder() + .withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION); + + this.client = this.clientBuilder().build(); + + DocumentCollection collectionDefinition = new DocumentCollection(); + collectionDefinition.id(UUID.randomUUID().toString()); + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + collectionDefinition.setPartitionKey(partitionKeyDef); + + // CREATE database + + createdDatabase = Utils.createDatabaseForTest(client); + + // CREATE collection + createdCollection = client + .createCollection("dbs/" + createdDatabase.id(), collectionDefinition, null) + .single().block().getResource(); + + numberOfDocuments = 20; + // Add documents + for (int i = 0; i < numberOfDocuments; i++) { + Document doc = new Document(String.format("{ 'id': 'loc%d', 'counter': %d}", i, i)); + client.createDocument(getCollectionLink(), doc, null, true).single().block(); + } + } + + @AfterClass(groups = "samples", timeOut = TIMEOUT) + public void shutdown() { + Utils.safeClean(client, createdDatabase); + Utils.safeClose(client); + } + + /** + * Query for documents using java8 lambda expressions + * Creates a document query observable and verifies the async behavior + * of document query observable + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void queryDocuments_Async() throws Exception { + int requestPageSize = 3; + FeedOptions options = new FeedOptions(); + options.maxItemCount(requestPageSize); + options.enableCrossPartitionQuery(true); + + Flux> documentQueryObservable = client + .queryDocuments(getCollectionLink(), "SELECT * FROM root", options); + + final CountDownLatch mainThreadBarrier = new CountDownLatch(1); + + final CountDownLatch resultsCountDown = new CountDownLatch(numberOfDocuments); + + documentQueryObservable.subscribe(page -> { + try { + // Waits on the barrier + mainThreadBarrier.await(); + } catch (InterruptedException e) { + } + + for (@SuppressWarnings("unused") + Document d : page.results()) { + resultsCountDown.countDown(); + } + }); + + // The following code will run concurrently + System.out.println("action is subscribed to the observable"); + + // Release main thread barrier + System.out.println("after main thread barrier is released, subscribed observable action can continue"); + mainThreadBarrier.countDown(); + + System.out.println("waiting for all the results using result count down latch"); + + resultsCountDown.await(); + } + + /** + * Query for documents, without using java8 lambda expressions + * Creates a document query observable and verifies the async behavior + * of document query observable + * NOTE: does the same thing as testQueryDocuments_Async without java8 lambda + * expression + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void queryDocuments_Async_withoutLambda() throws Exception { + int requestPageSize = 3; + FeedOptions options = new FeedOptions(); + options.maxItemCount(requestPageSize); + options.enableCrossPartitionQuery(true); + + Flux> documentQueryObservable = client + .queryDocuments(getCollectionLink(), "SELECT * FROM root", options); + + final CountDownLatch mainThreadBarrier = new CountDownLatch(1); + + final CountDownLatch resultsCountDown = new CountDownLatch(numberOfDocuments); + + Consumer> actionPerPage = new Consumer>() { + + @SuppressWarnings("unused") + @Override + public void accept(FeedResponse t) { + + try { + // waits on the barrier + mainThreadBarrier.await(); + } catch (InterruptedException e) { + } + + for (Document d : t.results()) { + resultsCountDown.countDown(); + } + } + }; + + documentQueryObservable.subscribe(actionPerPage); + // The following code will run concurrently + + System.out.println("action is subscribed to the observable"); + + // Release main thread barrier + System.out.println("after main thread barrier is released, subscribed observable action can continue"); + mainThreadBarrier.countDown(); + + System.out.println("waiting for all the results using result count down latch"); + + resultsCountDown.await(); + } + + /** + * Queries for documents and sum up the total request charge + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void queryDocuments_findTotalRequestCharge() throws Exception { + int requestPageSize = 3; + FeedOptions options = new FeedOptions(); + options.maxItemCount(requestPageSize); + options.enableCrossPartitionQuery(true); + + Flux totalChargeObservable = client + .queryDocuments(getCollectionLink(), "SELECT * FROM root", options) + .map(FeedResponse::requestCharge) // Map the page to its request charge + .reduce(Double::sum).flux(); // Sum up all the request charges + + final CountDownLatch successfulCompletionLatch = new CountDownLatch(1); + + totalChargeObservable.subscribe(totalCharge -> { + System.out.println(totalCharge); + successfulCompletionLatch.countDown(); + }); + + successfulCompletionLatch.await(); + } + + /** + * Subscriber unsubscribes after first page + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void queryDocuments_unsubscribeAfterFirstPage() throws Exception { + int requestPageSize = 3; + FeedOptions options = new FeedOptions(); + options.maxItemCount(requestPageSize); + options.enableCrossPartitionQuery(true); + + Flux> requestChargeObservable = client + .queryDocuments(getCollectionLink(), "SELECT * FROM root", options); + + AtomicInteger onNextCounter = new AtomicInteger(); + AtomicInteger onCompletedCounter = new AtomicInteger(); + AtomicInteger onErrorCounter = new AtomicInteger(); + + // Subscribe to the pages of Documents emitted by the observable + AtomicReference s = new AtomicReference<>(); + requestChargeObservable.subscribe(documentFeedResponse -> { + onNextCounter.incrementAndGet(); + s.get().cancel(); + }, error -> { + onErrorCounter.incrementAndGet(); + }, onCompletedCounter::incrementAndGet, subscription -> { + s.set(subscription); + subscription.request(1); + }); + + Thread.sleep(4000); + + // After subscriber unsubscribes, it doesn't receive any more pages. + assertThat(onNextCounter.get(), equalTo(1)); + assertThat(onCompletedCounter.get(), equalTo(0)); + assertThat(onErrorCounter.get(), equalTo(0)); + } + + /** + * Queries for documents and filter out the fetched results + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void queryDocuments_filterFetchedResults() throws Exception { + int requestPageSize = 3; + FeedOptions options = new FeedOptions(); + options.maxItemCount(requestPageSize); + options.enableCrossPartitionQuery(true); + + Predicate isPrimeNumber = new Predicate() { + + @Override + public boolean test(Document doc) { + int n = doc.getInt("counter"); + if (n <= 1) + return false; + for (int i = 2; 2 * i < n; i++) { + if (n % i == 0) + return false; + } + return true; + } + }; + + List resultList = Collections.synchronizedList(new ArrayList()); + + client.queryDocuments(getCollectionLink(), "SELECT * FROM root", options) + .map(FeedResponse::results) // Map the page to the list of documents + .concatMap(Flux::fromIterable) // Flatten the Flux> to Flux + .filter(isPrimeNumber) // Filter documents using isPrimeNumber predicate + .subscribe(doc -> resultList.add(doc)); // Collect the results + + Thread.sleep(4000); + + int expectedNumberOfPrimes = 0; + // Find all the documents with prime number counter + for (int i = 0; i < numberOfDocuments; i++) { + boolean isPrime = true; + if (i <= 1) + isPrime = false; + for (int j = 2; 2 * j < i; j++) { + if (i % j == 0) { + isPrime = false; + break; + } + } + + if (isPrime) { + expectedNumberOfPrimes++; + } + } + + // Assert that we only collected what's expected + assertThat(resultList, hasSize(expectedNumberOfPrimes)); + } + + /** + * Queries for documents + * Converts the document query observable to blocking observable and + * uses that to find all documents + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void queryDocuments_toBlocking_toIterator() { + // Query for documents + int requestPageSize = 3; + FeedOptions options = new FeedOptions(); + options.maxItemCount(requestPageSize); + options.enableCrossPartitionQuery(true); + + Flux> documentQueryObservable = client + .queryDocuments(getCollectionLink(), "SELECT * FROM root", options); + + // Covert the observable to a blocking observable, then convert the blocking + // observable to an iterator + Iterator> it = documentQueryObservable.toIterable().iterator(); + + int pageCounter = 0; + int numberOfResults = 0; + while (it.hasNext()) { + FeedResponse page = it.next(); + pageCounter++; + + String pageSizeAsString = page.responseHeaders().get(HttpConstants.HttpHeaders.ITEM_COUNT); + assertThat("header item count must be present", pageSizeAsString, notNullValue()); + int pageSize = Integer.valueOf(pageSizeAsString); + assertThat("Result size must match header item count", page.results(), hasSize(pageSize)); + numberOfResults += pageSize; + } + assertThat("number of total results", numberOfResults, equalTo(numberOfDocuments)); + assertThat("number of result pages", pageCounter, + equalTo((numberOfDocuments + requestPageSize - 1) / requestPageSize)); + } + + /** + * Queries for documents using an Orderby query. + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void qrderBy_Async() throws Exception { + // CREATE a partitioned collection + String collectionId = UUID.randomUUID().toString(); + DocumentCollection multiPartitionCollection = createMultiPartitionCollection("dbs/" + createdDatabase.id(), + collectionId, "/key"); + + // Insert documents + int totalNumberOfDocumentsInMultiPartitionCollection = 10; + for (int i = 0; i < totalNumberOfDocumentsInMultiPartitionCollection; i++) { + + Document doc = new Document(String.format("{\"id\":\"documentId%d\",\"key\":\"%s\",\"prop\":%d}", i, + RandomStringUtils.randomAlphabetic(2), i)); + client.createDocument("dbs/" + createdDatabase.id() + "/colls/" + multiPartitionCollection.id(), + doc, null, true).single().block(); + } + + // Query for the documents order by the prop field + SqlQuerySpec query = new SqlQuerySpec("SELECT r.id FROM r ORDER BY r.prop", new SqlParameterList()); + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + options.maxItemCount(5); + + // Max degree of parallelism determines the number of partitions that + // the SDK establishes simultaneous connections to. + options.maxDegreeOfParallelism(2); + + // Get the observable order by query documents + Flux> documentQueryObservable = client.queryDocuments( + "dbs/" + createdDatabase.id() + "/colls/" + multiPartitionCollection.id(), query, options); + + List resultList = Collections.synchronizedList(new ArrayList<>()); + + documentQueryObservable.map(FeedResponse::results) + // Map the logical page to the list of documents in the page + .concatMap(Flux::fromIterable) // Flatten the list of documents + .map(Resource::id) // Map to the document Id + .subscribe(resultList::add); // Add each document Id to the resultList + + Thread.sleep(4000); + + // Assert we found all the results + assertThat(resultList, hasSize(totalNumberOfDocumentsInMultiPartitionCollection)); + for (int i = 0; i < totalNumberOfDocumentsInMultiPartitionCollection; i++) { + String docId = resultList.get(i); + // Assert that the order of the documents are valid + assertThat(docId, equalTo("documentId" + i)); + } + } + + /** + * You can convert a Flux to a CompletableFuture. + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void transformObservableToCompletableFuture() throws Exception { + int requestPageSize = 3; + FeedOptions options = new FeedOptions(); + options.maxItemCount(requestPageSize); + options.enableCrossPartitionQuery(true); + + Flux> documentQueryObservable = client + .queryDocuments(getCollectionLink(), "SELECT * FROM root", options); + + // Convert to observable of list of pages + Mono>> allPagesObservable = documentQueryObservable.collectList(); + + // Convert the observable of list of pages to a Future + CompletableFuture>> future = allPagesObservable.toFuture(); + + List> pageList = future.get(); + + int totalNumberOfRetrievedDocuments = 0; + for (FeedResponse page : pageList) { + totalNumberOfRetrievedDocuments += page.results().size(); + } + assertThat(numberOfDocuments, equalTo(totalNumberOfRetrievedDocuments)); + } + + private String getCollectionLink() { + return "dbs/" + createdDatabase.id() + "/colls/" + createdCollection.id(); + } + + private DocumentCollection createMultiPartitionCollection(String databaseLink, String collectionId, + String partitionKeyPath) { + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add(partitionKeyPath); + partitionKeyDef.paths(paths); + + RequestOptions options = new RequestOptions(); + options.setOfferThroughput(10100); + DocumentCollection collectionDefinition = new DocumentCollection(); + collectionDefinition.id(collectionId); + collectionDefinition.setPartitionKey(partitionKeyDef); + DocumentCollection createdCollection = client.createCollection(databaseLink, collectionDefinition, options) + .single().block().getResource(); + + return createdCollection; + } +} diff --git a/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/InMemoryGroupbyTest.java b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/InMemoryGroupbyTest.java new file mode 100644 index 0000000000000..333edfe75b0c2 --- /dev/null +++ b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/InMemoryGroupbyTest.java @@ -0,0 +1,188 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx.examples; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.DocumentClientTest; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.SqlParameter; +import com.azure.data.cosmos.SqlParameterList; +import com.azure.data.cosmos.SqlQuerySpec; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; +import reactor.core.publisher.GroupedFlux; + +import java.time.LocalDateTime; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +public class InMemoryGroupbyTest extends DocumentClientTest { + + private final static int TIMEOUT = 60000; + + private AsyncDocumentClient client; + private Database createdDatabase; + private DocumentCollection createdCollection; + + @BeforeClass(groups = "samples", timeOut = 2 * TIMEOUT) + public void setUp() throws Exception { + + ConnectionPolicy connectionPolicy = new ConnectionPolicy().connectionMode(ConnectionMode.DIRECT); + + this.clientBuilder() + .withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION); + + this.client = this.clientBuilder().build(); + + // CREATE database + createdDatabase = Utils.createDatabaseForTest(client); + + DocumentCollection collectionDefinition = new DocumentCollection(); + collectionDefinition.id(UUID.randomUUID().toString()); + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + collectionDefinition.setPartitionKey(partitionKeyDef); + + // CREATE collection + createdCollection = client + .createCollection("dbs/" + createdDatabase.id(), collectionDefinition, null) + .single().block().getResource(); + + int numberOfPayers = 10; + int numberOfDocumentsPerPayer = 10; + + for (int i = 0; i < numberOfPayers; i++) { + + for (int j = 0; j < numberOfDocumentsPerPayer; j++) { + + LocalDateTime currentTime = LocalDateTime.now(); + + Document doc = new Document(String.format("{ " + + "'id' : '%s'," + + "'site_id': 'ABC', " + + "'payer_id': %d, " + + " 'created_time' : %d " + + "}", UUID.randomUUID().toString(), i, currentTime.getSecond())); + client.createDocument(getCollectionLink(), doc, null, true).single().block(); + + Thread.sleep(100); + } + } + System.out.println("finished inserting documents"); + } + + @AfterClass(groups = "samples", timeOut = TIMEOUT) + public void shutdown() { + Utils.safeClean(client, createdDatabase); + client.close(); + } + + /** + * Queries Documents and performs Group by operation after fetching the Documents. + * If you want to understand the steps in more details see {@link #groupByInMemory_MoreDetail()} + * @throws Exception + */ + @Test(groups = "samples", timeOut = 2 * TIMEOUT) + public void groupByInMemory() { + // If you want to understand the steps in more details see groupByInMemoryMoreDetail() + int requestPageSize = 3; + FeedOptions options = new FeedOptions(); + options.maxItemCount(requestPageSize); + options.enableCrossPartitionQuery(true); + + Flux documentsObservable = client + .queryDocuments(getCollectionLink(), + new SqlQuerySpec("SELECT * FROM root r WHERE r.site_id=@site_id", + new SqlParameterList(new SqlParameter("@site_id", "ABC"))), + options) + .flatMap(page -> Flux.fromIterable(page.results())); + + final LocalDateTime now = LocalDateTime.now(); + + List> resultsGroupedAsLists = documentsObservable + .filter(doc -> Math.abs(now.getSecond() - doc.getInt("created_time")) <= 90) + .groupBy(doc -> doc.getInt("payer_id")).flatMap(Flux::collectList) + .collectList() + .block(); + + for(List resultsForEachPayer :resultsGroupedAsLists) { + System.out.println("documents with payer_id : " + resultsForEachPayer.get(0).getInt("payer_id") + " are " + resultsForEachPayer); + } + } + + /** + * This does the same thing as {@link #groupByInMemory_MoreDetail()} but with pedagogical details + * @throws Exception + */ + @Test(groups = "samples", timeOut = 2 * TIMEOUT) + public void groupByInMemory_MoreDetail() { + + int requestPageSize = 3; + FeedOptions options = new FeedOptions(); + options.maxItemCount(requestPageSize); + options.enableCrossPartitionQuery(true); + + Flux documentsObservable = client + .queryDocuments(getCollectionLink(), + new SqlQuerySpec("SELECT * FROM root r WHERE r.site_id=@site_id", + new SqlParameterList(new SqlParameter("@site_id", "ABC"))), + options) + .flatMap(page -> Flux.fromIterable(page.results())); + + final LocalDateTime now = LocalDateTime.now(); + + Flux> groupedByPayerIdObservable = documentsObservable + .filter(doc -> Math.abs(now.getSecond() - doc.getInt("created_time")) <= 90) + .groupBy(doc -> doc.getInt("payer_id")); + + Flux> docsGroupedAsList = groupedByPayerIdObservable.flatMap(grouped -> { + Flux> list = grouped.collectList().flux(); + return list; + }); + + List> resultsGroupedAsLists = docsGroupedAsList.collectList().single().block(); + + for(List resultsForEachPayer : resultsGroupedAsLists) { + System.out.println("documents with payer_id : " + resultsForEachPayer.get(0).getInt("payer_id") + " are " + resultsForEachPayer); + } + } + + private String getCollectionLink() { + return "dbs/" + createdDatabase.id() + "/colls/" + createdCollection.id(); + } +} diff --git a/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/OfferCRUDAsyncAPITest.java b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/OfferCRUDAsyncAPITest.java new file mode 100644 index 0000000000000..bfbd72ab5232c --- /dev/null +++ b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/OfferCRUDAsyncAPITest.java @@ -0,0 +1,177 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2017 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.rx.examples; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.DataType; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.DocumentClientTest; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.IncludedPath; +import com.azure.data.cosmos.Index; +import com.azure.data.cosmos.IndexingPolicy; +import com.azure.data.cosmos.internal.Offer; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.internal.RequestOptions; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.CountDownLatch; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; + +/** + * This integration test class demonstrates how to use Async API to query and + * replace an Offer. + */ +public class OfferCRUDAsyncAPITest extends DocumentClientTest { + private final static int TIMEOUT = 60000; + private Database createdDatabase; + private AsyncDocumentClient client; + + @BeforeClass(groups = "samples", timeOut = TIMEOUT) + public void setUp() { + + ConnectionPolicy connectionPolicy = new ConnectionPolicy().connectionMode(ConnectionMode.DIRECT); + + this.clientBuilder() + .withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION); + + this.client = this.clientBuilder().build(); + + // CREATE database + createdDatabase = Utils.createDatabaseForTest(client); + } + + @AfterClass(groups = "samples", timeOut = TIMEOUT) + public void shutdown() { + Utils.safeClean(client, createdDatabase); + Utils.safeClose(client); + } + + /** + * Query for all the offers existing in the database account. + * REPLACE the required offer so that it has a higher throughput. + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void updateOffer() throws Exception { + + int initialThroughput = 10200; + int newThroughput = 10300; + + // Set the throughput to be 10,200 + RequestOptions multiPartitionRequestOptions = new RequestOptions(); + multiPartitionRequestOptions.setOfferThroughput(initialThroughput); + + // CREATE the collection + DocumentCollection createdCollection = client.createCollection("dbs/" + createdDatabase.id(), + getMultiPartitionCollectionDefinition(), multiPartitionRequestOptions).single().block() + .getResource(); + + final CountDownLatch successfulCompletionLatch = new CountDownLatch(1); + + // Find offer associated with this collection + client.queryOffers( + String.format("SELECT * FROM r where r.offerResourceId = '%s'", createdCollection.resourceId()), + null).flatMap(offerFeedResponse -> { + List offerList = offerFeedResponse.results(); + // NUMBER of offers returned should be 1 + assertThat(offerList.size(), equalTo(1)); + + // This offer must correspond to the collection we created + Offer offer = offerList.get(0); + int currentThroughput = offer.getThroughput(); + assertThat(offer.getString("offerResourceId"), equalTo(createdCollection.resourceId())); + assertThat(currentThroughput, equalTo(initialThroughput)); + System.out.println("initial throughput: " + currentThroughput); + + // UPDATE the offer's throughput + offer.setThroughput(newThroughput); + + // REPLACE the offer + return client.replaceOffer(offer); + }).subscribe(offerResourceResponse -> { + Offer offer = offerResourceResponse.getResource(); + int currentThroughput = offer.getThroughput(); + + // The current throughput of the offer must be equal to the new throughput value + assertThat(offer.getString("offerResourceId"), equalTo(createdCollection.resourceId())); + assertThat(currentThroughput, equalTo(newThroughput)); + + System.out.println("updated throughput: " + currentThroughput); + successfulCompletionLatch.countDown(); + }, error -> { + System.err + .println("an error occurred while updating the offer: actual cause: " + error.getMessage()); + }); + + successfulCompletionLatch.await(); + } + + private DocumentCollection getMultiPartitionCollectionDefinition() { + DocumentCollection collectionDefinition = new DocumentCollection(); + collectionDefinition.id(UUID.randomUUID().toString()); + + // Set the partitionKeyDefinition for a partitioned collection + // Here, we are setting the partitionKey of the Collection to be /city + PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); + List paths = new ArrayList<>(); + paths.add("/city"); + partitionKeyDefinition.paths(paths); + collectionDefinition.setPartitionKey(partitionKeyDefinition); + + // Set indexing policy to be range range for string and number + IndexingPolicy indexingPolicy = new IndexingPolicy(); + List includedPaths = new ArrayList<>(); + IncludedPath includedPath = new IncludedPath(); + includedPath.path("/*"); + Collection indexes = new ArrayList<>(); + Index stringIndex = Index.Range(DataType.STRING); + BridgeInternal.setProperty(stringIndex, "precision", -1); + indexes.add(stringIndex); + + Index numberIndex = Index.Range(DataType.NUMBER); + BridgeInternal.setProperty(numberIndex, "precision", -1); + indexes.add(numberIndex); + includedPath.indexes(indexes); + includedPaths.add(includedPath); + indexingPolicy.setIncludedPaths(includedPaths); + collectionDefinition.setIndexingPolicy(indexingPolicy); + + return collectionDefinition; + } +} \ No newline at end of file diff --git a/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/StoredProcedureAsyncAPITest.java b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/StoredProcedureAsyncAPITest.java new file mode 100644 index 0000000000000..e0159e7510340 --- /dev/null +++ b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/StoredProcedureAsyncAPITest.java @@ -0,0 +1,274 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2017 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.rx.examples; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.DataType; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.DocumentClientTest; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.IncludedPath; +import com.azure.data.cosmos.Index; +import com.azure.data.cosmos.IndexingPolicy; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.internal.RequestOptions; +import com.azure.data.cosmos.internal.StoredProcedure; +import com.azure.data.cosmos.internal.HttpConstants; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.CountDownLatch; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.core.Is.is; + +/** + * This integration test class demonstrates how to use Async API to create + * and execute Stored Procedures. + */ +public class StoredProcedureAsyncAPITest extends DocumentClientTest { + private final static int TIMEOUT = 60000; + + private Database createdDatabase; + private DocumentCollection createdCollection; + private AsyncDocumentClient client; + + @BeforeClass(groups = "samples", timeOut = TIMEOUT) + public void setUp() { + + ConnectionPolicy connectionPolicy = new ConnectionPolicy().connectionMode(ConnectionMode.DIRECT); + + this.clientBuilder() + .withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION); + + this.client = this.clientBuilder().build(); + + createdDatabase = Utils.createDatabaseForTest(client); + + createdCollection = client + .createCollection("dbs/" + createdDatabase.id(), getMultiPartitionCollectionDefinition(), null) + .single().block().getResource(); + } + + @AfterClass(groups = "samples", timeOut = TIMEOUT) + public void shutdown() { + Utils.safeClean(client, createdDatabase); + Utils.safeClose(client); + } + + /** + * Execute Stored Procedure and retrieve the Script Log + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void scriptConsoleLogEnabled() throws Exception { + // CREATE a stored procedure + StoredProcedure storedProcedure = new StoredProcedure( + "{" + + " 'id':'storedProcedureSample'," + + " 'body':" + + " 'function() {" + + " var mytext = \"x\";" + + " var myval = 1;" + + " try {" + + " console.log(\"The value of %s is %s.\", mytext, myval);" + + " getContext().getResponse().setBody(\"Success!\");" + + " }" + + " catch(err) {" + + " getContext().getResponse().setBody(\"inline err: [\" + err.number + \"] \" + err);" + + " }" + + " }'" + + "}"); + + storedProcedure = client.createStoredProcedure(getCollectionLink(), storedProcedure, null) + .single().block().getResource(); + + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setScriptLoggingEnabled(true); + requestOptions.setPartitionKey(new PartitionKey("Seattle")); + + final CountDownLatch successfulCompletionLatch = new CountDownLatch(1); + + // Execute the stored procedure + client.executeStoredProcedure(getSprocLink(storedProcedure), requestOptions, new Object[]{}) + .subscribe(storedProcedureResponse -> { + String logResult = "The value of x is 1."; + try { + assertThat(URLDecoder.decode(storedProcedureResponse.getScriptLog(), "UTF-8"), is(logResult)); + assertThat(URLDecoder.decode(storedProcedureResponse.getResponseHeaders() + .get(HttpConstants.HttpHeaders.SCRIPT_LOG_RESULTS), "UTF-8"), is(logResult)); + } catch (UnsupportedEncodingException e) { + e.printStackTrace(); + } + successfulCompletionLatch.countDown(); + System.out.println(storedProcedureResponse.getActivityId()); + }, error -> { + System.err.println("an error occurred while executing the stored procedure: actual cause: " + + error.getMessage()); + }); + + successfulCompletionLatch.await(); + } + + /** + * Execute Stored Procedure that takes arguments + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void executeStoredProcWithArgs() throws Exception { + // CREATE stored procedure + StoredProcedure storedProcedure = new StoredProcedure( + "{" + + " 'id': 'multiplySample'," + + " 'body':" + + " 'function (value, num) {" + + " getContext().getResponse().setBody(" + + " \"2*\" + value + \" is \" + num * 2 );" + + " }'" + + "}"); + + storedProcedure = client.createStoredProcedure(getCollectionLink(), storedProcedure, null) + .single().block().getResource(); + + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setPartitionKey(new PartitionKey("Seattle")); + + final CountDownLatch successfulCompletionLatch = new CountDownLatch(1); + + // Execute the stored procedure + Object[] storedProcedureArgs = new Object[]{"a", 123}; + client.executeStoredProcedure(getSprocLink(storedProcedure), requestOptions, storedProcedureArgs) + .subscribe(storedProcedureResponse -> { + String storedProcResultAsString = storedProcedureResponse.getResponseAsString(); + assertThat(storedProcResultAsString, equalTo("\"2*a is 246\"")); + successfulCompletionLatch.countDown(); + System.out.println(storedProcedureResponse.getActivityId()); + }, error -> { + System.err.println("an error occurred while executing the stored procedure: actual cause: " + + error.getMessage()); + }); + + successfulCompletionLatch.await(); + } + + /** + * Execute Stored Procedure that takes arguments, passing a Pojo object + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void executeStoredProcWithPojoArgs() throws Exception { + // create stored procedure + StoredProcedure storedProcedure = new StoredProcedure( + "{" + + " 'id': 'storedProcedurePojoSample'," + + " 'body':" + + " 'function (value) {" + + " getContext().getResponse().setBody(" + + " \"a is \" + value.temp);" + + " }'" + + "}"); + + storedProcedure = client.createStoredProcedure(getCollectionLink(), storedProcedure, null) + .single().block().getResource(); + + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setPartitionKey(new PartitionKey("Seattle")); + + final CountDownLatch successfulCompletionLatch = new CountDownLatch(1); + + // POJO + class SamplePojo { + public String temp = "my temp value"; + } + SamplePojo samplePojo = new SamplePojo(); + + // Execute the stored procedure + Object[] storedProcedureArgs = new Object[]{samplePojo}; + client.executeStoredProcedure(getSprocLink(storedProcedure), requestOptions, storedProcedureArgs) + .subscribe(storedProcedureResponse -> { + String storedProcResultAsString = storedProcedureResponse.getResponseAsString(); + assertThat(storedProcResultAsString, equalTo("\"a is my temp value\"")); + successfulCompletionLatch.countDown(); + System.out.println(storedProcedureResponse.getActivityId()); + }, error -> { + System.err.println("an error occurred while executing the stored procedure: actual cause: " + + error.getMessage()); + }); + + successfulCompletionLatch.await(); + } + + private static DocumentCollection getMultiPartitionCollectionDefinition() { + DocumentCollection collectionDefinition = new DocumentCollection(); + collectionDefinition.id(UUID.randomUUID().toString()); + + // Set the partitionKeyDefinition for a partitioned collection + // Here, we are setting the partitionKey of the Collection to be /city + PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); + List paths = new ArrayList(); + paths.add("/city"); + partitionKeyDefinition.paths(paths); + collectionDefinition.setPartitionKey(partitionKeyDefinition); + + // Set indexing policy to be range range for string and number + IndexingPolicy indexingPolicy = new IndexingPolicy(); + List includedPaths = new ArrayList(); + IncludedPath includedPath = new IncludedPath(); + includedPath.path("/*"); + List indexes = new ArrayList(); + Index stringIndex = Index.Range(DataType.STRING); + BridgeInternal.setProperty(stringIndex, "precision", -1); + indexes.add(stringIndex); + + Index numberIndex = Index.Range(DataType.NUMBER); + BridgeInternal.setProperty(numberIndex, "precision", -1); + indexes.add(numberIndex); + includedPath.indexes(indexes); + includedPaths.add(includedPath); + indexingPolicy.setIncludedPaths(includedPaths); + collectionDefinition.setIndexingPolicy(indexingPolicy); + + return collectionDefinition; + } + + private String getCollectionLink() { + return "dbs/" + createdDatabase.id() + "/colls/" + createdCollection.id(); + } + + private String getSprocLink(StoredProcedure sproc) { + return "dbs/" + createdDatabase.id() + "/colls/" + createdCollection.id() + "/sprocs/" + sproc.id(); + } +} \ No newline at end of file diff --git a/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/TestConfigurations.java b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/TestConfigurations.java new file mode 100644 index 0000000000000..7054ca8e7a08d --- /dev/null +++ b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/TestConfigurations.java @@ -0,0 +1,56 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx.examples; + +import com.google.common.base.Strings; +import org.apache.commons.lang3.StringUtils; + +/** + * Contains the configurations for tests. + *

+ * For running tests, you can pass a customized endpoint configuration in one of the following + * ways: + *

    + *
  • -DACCOUNT_KEY="[your-key]" -ACCOUNT_HOST="[your-endpoint]" as JVM + * command-line option.
  • + *
  • You can set ACCOUNT_KEY and ACCOUNT_HOST as environment variables.
  • + *
+ *

+ * If none of the above is set, emulator endpoint will be used. + */ +public final class TestConfigurations { + // REPLACE MASTER_KEY and HOST with values from your Azure Cosmos DB account. + // The default values are credentials of the local emulator, which are not used in any production environment. + // + public static String MASTER_KEY = + System.getProperty("ACCOUNT_KEY", + StringUtils.defaultString(Strings.emptyToNull( + System.getenv().get("ACCOUNT_KEY")), + "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==")); + + public static String HOST = + System.getProperty("ACCOUNT_HOST", + StringUtils.defaultString(Strings.emptyToNull( + System.getenv().get("ACCOUNT_HOST")), + "https://localhost:443/")); +} diff --git a/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/TokenResolverTest.java b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/TokenResolverTest.java new file mode 100644 index 0000000000000..810d8409a0105 --- /dev/null +++ b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/TokenResolverTest.java @@ -0,0 +1,342 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.rx.examples; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosResourceType; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.DocumentClientTest; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.internal.Permission; +import com.azure.data.cosmos.PermissionMode; +import com.azure.data.cosmos.internal.RequestOptions; +import com.azure.data.cosmos.internal.ResourceResponse; +import com.azure.data.cosmos.TokenResolver; +import com.azure.data.cosmos.internal.User; +import com.google.common.collect.ImmutableMap; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.core.IsInstanceOf.instanceOf; + +public class TokenResolverTest extends DocumentClientTest { + + private final static int TIMEOUT = 180000; + private final static String USER_ID = "userId"; + private AsyncDocumentClient client; + private Database createdDatabase; + private DocumentCollection createdCollection; + private Map userToReadOnlyResourceTokenMap = new HashMap<>(); + private Map documentToReadUserMap = new HashMap<>(); + + private Map documentToReadWriteUserMap = new HashMap<>(); + private Map userToReadWriteResourceTokenMap = new HashMap<>(); + + + /** + * This Example walks you through how to use a token resolver to + * control authorization and access to Cosmos DB resources. + */ + @BeforeClass(groups = "samples", timeOut = TIMEOUT) + public void setUp() { + + ConnectionPolicy connectionPolicy = new ConnectionPolicy().connectionMode(ConnectionMode.DIRECT); + + this.clientBuilder() + .withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION); + + this.client = this.clientBuilder().build(); + + DocumentCollection collectionDefinition = new DocumentCollection(); + collectionDefinition.id(UUID.randomUUID().toString()); + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + collectionDefinition.setPartitionKey(partitionKeyDef); + + // CREATE database + createdDatabase = Utils.createDatabaseForTest(client); + + // CREATE collection + createdCollection = client + .createCollection("dbs/" + createdDatabase.id(), collectionDefinition, null) + .single().block().getResource(); + + for (int i = 0; i < 10; i++) { + // CREATE a document + Document documentDefinition = new Document(); + documentDefinition.id(UUID.randomUUID().toString()); + Document createdDocument = client.createDocument(createdCollection.selfLink(), documentDefinition, null, true).blockFirst().getResource(); + + // CREATE a User who is meant to only read this document + User readUserDefinition = new User(); + readUserDefinition.id(UUID.randomUUID().toString()); + User createdReadUser = client.createUser(createdDatabase.selfLink(), readUserDefinition, null).blockFirst().getResource(); + + // CREATE a read only permission for the above document + Permission readOnlyPermissionDefinition = new Permission(); + readOnlyPermissionDefinition.id(UUID.randomUUID().toString()); + readOnlyPermissionDefinition.setResourceLink(createdDocument.selfLink()); + readOnlyPermissionDefinition.setPermissionMode(PermissionMode.READ); + + // Assign the permission to the above user + Permission readOnlyCreatedPermission = client.createPermission(createdReadUser.selfLink(), readOnlyPermissionDefinition, null).blockFirst().getResource(); + userToReadOnlyResourceTokenMap.put(createdReadUser.id(), readOnlyCreatedPermission.getToken()); + + documentToReadUserMap.put(createdDocument.selfLink(), createdReadUser.id()); + + // CREATE a User who can both read and write this document + User readWriteUserDefinition = new User(); + readWriteUserDefinition.id(UUID.randomUUID().toString()); + User createdReadWriteUser = client.createUser(createdDatabase.selfLink(), readWriteUserDefinition, null).blockFirst().getResource(); + + // CREATE a read/write permission for the above document + Permission readWritePermissionDefinition = new Permission(); + readWritePermissionDefinition.id(UUID.randomUUID().toString()); + readWritePermissionDefinition.setResourceLink(createdDocument.selfLink()); + readWritePermissionDefinition.setPermissionMode(PermissionMode.ALL); + + // Assign the permission to the above user + Permission readWriteCreatedPermission = client.createPermission(createdReadWriteUser.selfLink(), readWritePermissionDefinition, null).blockFirst().getResource(); + userToReadWriteResourceTokenMap.put(createdReadWriteUser.id(), readWriteCreatedPermission.getToken()); + + documentToReadWriteUserMap.put(createdDocument.selfLink(), createdReadWriteUser.id()); + } + } + + /** + * READ a document with a user having read permission + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void readDocumentThroughTokenResolver() throws Exception { + AsyncDocumentClient asyncClientWithTokenResolver = null; + try { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + asyncClientWithTokenResolver = new AsyncDocumentClient.Builder() + .withServiceEndpoint(TestConfigurations.HOST) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .withTokenResolver(getTokenResolverForRead()) + .build(); + List> capturedResponse = Collections + .synchronizedList(new ArrayList<>()); + for (String documentLink : documentToReadUserMap.keySet()) { + + // Each document has one User who can only read it. Pass that User Id in the item. + // The token resolver will resolve the token for that User based on 'userId'. + ImmutableMap properties = ImmutableMap. builder() + .put(USER_ID, documentToReadUserMap.get(documentLink)) + .build(); + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setProperties(properties); + requestOptions.setPartitionKey(PartitionKey.None); + Flux> readDocumentObservable = asyncClientWithTokenResolver + .readDocument(documentLink, requestOptions); + readDocumentObservable.collectList().block().forEach(capturedResponse::add); + } + System.out.println("capturedResponse.size() = " + capturedResponse.size()); + assertThat(capturedResponse, hasSize(10)); + } finally { + Utils.safeClose(asyncClientWithTokenResolver); + } + } + + /** + * DELETE a document with a user having all permission + */ + @Test(groups = "samples", timeOut = TIMEOUT, dependsOnMethods = {"readDocumentThroughTokenResolver"}) + public void deleteDocumentThroughTokenResolver() throws Exception { + AsyncDocumentClient asyncClientWithTokenResolver = null; + try { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + asyncClientWithTokenResolver = new AsyncDocumentClient.Builder() + .withServiceEndpoint(TestConfigurations.HOST) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .withTokenResolver(getTokenResolverForReadWrite()) + .build(); + List> capturedResponse = Collections + .synchronizedList(new ArrayList<>()); + for (String documentLink : documentToReadWriteUserMap.keySet()) { + + // Each document has one User who can read and write it. Pass that User Id in the item. + // The token resolver will resolve the token for that User based on 'userId'. + ImmutableMap properties = ImmutableMap. builder() + .put(USER_ID, documentToReadWriteUserMap.get(documentLink)) + .build(); + + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setProperties(properties); + requestOptions.setPartitionKey(PartitionKey.None); + Flux> readDocumentObservable = asyncClientWithTokenResolver + .deleteDocument(documentLink, requestOptions); + readDocumentObservable.collectList().block().forEach(capturedResponse::add); + } + assertThat(capturedResponse, hasSize(10)); + } finally { + Utils.safeClose(asyncClientWithTokenResolver); + } + } + + /** + * Block list an user and throw error from token resolver + */ + @Test(groups = "samples", timeOut = TIMEOUT) + public void blockListUserThroughTokenResolver() throws Exception { + String blockListedUserId = "block listed user"; + String errorMessage = "block listed user! access denied!"; + + AsyncDocumentClient asyncClientWithTokenResolver = null; + + try { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + asyncClientWithTokenResolver = new AsyncDocumentClient.Builder() + .withServiceEndpoint(TestConfigurations.HOST) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .withTokenResolver(getTokenResolverWithBlockList(blockListedUserId, errorMessage)) + .build(); + + // READ a document using a block listed user, passing the 'userId' in the item. + // Token resolver will throw RuntimeException. + RequestOptions options = new RequestOptions(); + ImmutableMap properties = ImmutableMap. builder() + .put(USER_ID, blockListedUserId) + .build(); + + options.setProperties(properties); + Flux> readObservable = asyncClientWithTokenResolver.readCollection(createdCollection.selfLink(), options); + List capturedErrors = Collections + .synchronizedList(new ArrayList<>()); + readObservable.subscribe(response -> {}, throwable -> capturedErrors.add(throwable)); + Thread.sleep(4000); + assertThat(capturedErrors, hasSize(1)); + assertThat(capturedErrors.get(0), instanceOf(RuntimeException.class)); + assertThat(capturedErrors.get(0).getMessage(), equalTo(errorMessage)); + + // READ a document using a valid user, passing the 'userId' in the item. + // Token resolver will pass on the correct token for authentication. + String validUserId = userToReadWriteResourceTokenMap.keySet().iterator().next(); + System.out.println(validUserId); + properties = ImmutableMap. builder() + .put(USER_ID, validUserId) + .build(); + options.setProperties(properties); + readObservable = asyncClientWithTokenResolver.readCollection(createdCollection.selfLink(), options); + List capturedResponse = Collections + .synchronizedList(new ArrayList<>()); + readObservable.subscribe(resourceResponse -> capturedResponse.add(resourceResponse.getResource()), error -> error.printStackTrace()); + Thread.sleep(4000); + assertThat(capturedErrors, hasSize(1)); + assertThat(capturedResponse.get(0).id(), equalTo(createdCollection.id())); + } finally { + Utils.safeClose(asyncClientWithTokenResolver); + } + } + + /** + * For Reading DatabaseAccount on client initialization, use any User's token. + * For subsequent Reads, get the correct read only token based on 'userId'. + */ + private TokenResolver getTokenResolverForRead() { + return (String requestVerb, String resourceIdOrFullName, CosmosResourceType resourceType, Map properties) -> { + if (resourceType.equals(CosmosResourceType.System)) { + //Choose any token it should have the read access on database account + for (String token : userToReadOnlyResourceTokenMap.values()) { + return token; + } + } else { + return userToReadOnlyResourceTokenMap.get(properties.get(USER_ID)); + } + return null; + }; + } + + /** + * For Reading DatabaseAccount on client initialization, use any User's token. + * For subsequent Reads/Writes, get the correct read/write token based on 'userId'. + */ + private TokenResolver getTokenResolverForReadWrite() { + return (String requestVerb, String resourceIdOrFullName, CosmosResourceType resourceType, Map properties) -> { + if (resourceType.equals(CosmosResourceType.System)) { + //Choose any token it should have the read access on database account + for (String token : userToReadWriteResourceTokenMap.values()) { + return token; + } + } else { + return userToReadWriteResourceTokenMap.get(properties.get(USER_ID)); + } + return null; + }; + } + + /** + * For Reading DatabaseAccount on client initialization, use any User's token. + * For subsequent Reads, get the correct read/write token based on 'userId', + * only if user is not block listed. In this scenario, the block listed user id + * is compared to the current user's id, passed into the item for the request. + */ + private TokenResolver getTokenResolverWithBlockList(String blockListedUserId, String errorMessage) { + return (String requestVerb, String resourceIdOrFullName, CosmosResourceType resourceType, Map properties) -> { + if (resourceType == CosmosResourceType.System) { + return userToReadWriteResourceTokenMap.values().iterator().next(); + } else if (!properties.get(USER_ID).toString().equals(blockListedUserId)) { + return userToReadWriteResourceTokenMap.get(properties.get(USER_ID)); + } else { + throw new RuntimeException(errorMessage); + } + }; + } + + @AfterClass(groups = "samples", timeOut = TIMEOUT) + public void shutdown() { + Utils.safeClean(client, createdDatabase); + Utils.safeClose(client); + } +} diff --git a/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/UniqueIndexAsyncAPITest.java b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/UniqueIndexAsyncAPITest.java new file mode 100644 index 0000000000000..f1d48c7f0197f --- /dev/null +++ b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/UniqueIndexAsyncAPITest.java @@ -0,0 +1,133 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx.examples; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.DocumentClientTest; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.UniqueKey; +import com.azure.data.cosmos.UniqueKeyPolicy; +import com.azure.data.cosmos.internal.ResourceResponse; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import io.reactivex.subscribers.TestSubscriber; +import org.hamcrest.Matchers; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.UUID; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; + +public class UniqueIndexAsyncAPITest extends DocumentClientTest { + + private final static int TIMEOUT = 60000; + + private AsyncDocumentClient client; + private Database createdDatabase; + + @Test(groups = "samples", timeOut = TIMEOUT) + public void uniqueIndex() { + DocumentCollection collectionDefinition = new DocumentCollection(); + collectionDefinition.id(UUID.randomUUID().toString()); + UniqueKeyPolicy uniqueKeyPolicy = new UniqueKeyPolicy(); + UniqueKey uniqueKey = new UniqueKey(); + uniqueKey.paths(ImmutableList.of("/name", "/field")); + uniqueKeyPolicy.uniqueKeys(Lists.newArrayList(uniqueKey)); + collectionDefinition.setUniqueKeyPolicy(uniqueKeyPolicy); + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + collectionDefinition.setPartitionKey(partitionKeyDef); + + DocumentCollection collection = client.createCollection(getDatabaseLink(), collectionDefinition, null).single().block().getResource(); + + Document doc1 = new Document("{ 'name':'Alan Turning', 'field': 'Mathematics', 'other' : 'Logic' }"); + Document doc2 = new Document("{ 'name':'Al-Khwarizmi', 'field': 'Mathematics' , 'other' : 'Algebra '}"); + Document doc3 = new Document("{ 'name':'Alan Turning', 'field': 'Mathematics', 'other' : 'CS' }"); + + client.createDocument(getCollectionLink(collection), doc1, null, false).single().block().getResource(); + client.createDocument(getCollectionLink(collection), doc2, null, false).single().block().getResource(); + + // doc1 got inserted with the same values for 'name' and 'field' + // so inserting a new one with the same values will violate unique index constraint. + Flux> docCreation = + client.createDocument(getCollectionLink(collection), doc3, null, false); + + TestSubscriber> subscriber = new TestSubscriber<>(); + docCreation.subscribe(subscriber); + + subscriber.awaitTerminalEvent(); + subscriber.assertError(CosmosClientException.class); + assertThat(subscriber.errorCount(), Matchers.equalTo(1)); + + // error code for failure is conflict + assertThat(((CosmosClientException) subscriber.getEvents().get(1).get(0)).statusCode(), equalTo(409)); + } + + @BeforeClass(groups = "samples", timeOut = TIMEOUT) + public void setUp() { + + ConnectionPolicy connectionPolicy = new ConnectionPolicy().connectionMode(ConnectionMode.DIRECT); + + this.clientBuilder() + .withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION); + + this.client = this.clientBuilder().build(); + + DocumentCollection collectionDefinition = new DocumentCollection(); + collectionDefinition.id(UUID.randomUUID().toString()); + + // CREATE database + createdDatabase = Utils.createDatabaseForTest(client); + } + + @AfterClass(groups = "samples", timeOut = TIMEOUT) + public void shutdown() { + Utils.safeClean(client, createdDatabase); + Utils.safeClose(client); + } + + private String getCollectionLink(DocumentCollection collection) { + return "dbs/" + createdDatabase.id() + "/colls/" + collection.id(); + } + + private String getDatabaseLink() { + return "dbs/" + createdDatabase.id(); + } +} diff --git a/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/Utils.java b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/Utils.java new file mode 100644 index 0000000000000..87d3ab1a170ec --- /dev/null +++ b/sdk/cosmos/examples/src/test/java/com/azure/data/cosmos/rx/examples/Utils.java @@ -0,0 +1,124 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.rx.examples; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.DatabaseForTest; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.ResourceResponse; +import com.azure.data.cosmos.RetryOptions; +import com.azure.data.cosmos.SqlQuerySpec; +import com.azure.data.cosmos.internal.TestConfigurations; +import org.testng.annotations.AfterSuite; +import reactor.core.publisher.Flux; + +public class Utils { + + @AfterSuite(groups = "samples") + public void cleanupStaleDatabase() { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + RetryOptions options = new RetryOptions(); + connectionPolicy.retryOptions(options); + AsyncDocumentClient client = new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .build(); + safeCleanDatabases(client); + client.close(); + } + + public static String getCollectionLink(Database db, DocumentCollection collection) { + return "dbs/" + db.id() + "/colls/" + collection; + } + + public static Database createDatabaseForTest(AsyncDocumentClient client) { + return DatabaseForTest.create(DatabaseManagerImpl.getInstance(client)).createdDatabase; + } + + private static void safeCleanDatabases(AsyncDocumentClient client) { + if (client != null) { + DatabaseForTest.cleanupStaleTestDatabases(DatabaseManagerImpl.getInstance(client)); + } + } + + public static void safeClean(AsyncDocumentClient client, Database database) { + if (database != null) { + safeClean(client, database.id()); + } + } + + public static void safeClean(AsyncDocumentClient client, String databaseId) { + if (client != null) { + if (databaseId != null) { + try { + client.deleteDatabase("/dbs/" + databaseId, null).single().block(); + } catch (Exception e) { + } + } + } + } + + public static String generateDatabaseId() { + return DatabaseForTest.generateId(); + } + + public static void safeClose(AsyncDocumentClient client) { + if (client != null) { + client.close(); + } + } + + private static class DatabaseManagerImpl implements DatabaseForTest.DatabaseManager { + public static DatabaseManagerImpl getInstance(AsyncDocumentClient client) { + return new DatabaseManagerImpl(client); + } + + private final AsyncDocumentClient client; + + private DatabaseManagerImpl(AsyncDocumentClient client) { + this.client = client; + } + + @Override + public Flux> queryDatabases(SqlQuerySpec query) { + return client.queryDatabases(query, null); + } + + @Override + public Flux> createDatabase(Database databaseDefinition) { + return client.createDatabase(databaseDefinition, null); + } + + @Override + public Flux> deleteDatabase(String id) { + + return client.deleteDatabase("dbs/" + id, null); + } + } +} diff --git a/sdk/cosmos/examples/src/test/resources/log4j.properties b/sdk/cosmos/examples/src/test/resources/log4j.properties new file mode 100644 index 0000000000000..b7947ea7907d1 --- /dev/null +++ b/sdk/cosmos/examples/src/test/resources/log4j.properties @@ -0,0 +1,13 @@ +# this is the log4j configuration for tests + +# Set root logger level to DEBUG and its only appender to A1. +log4j.rootLogger=INFO, A1 + +log4j.category.io.netty=INFO +log4j.category.io.reactivex=INFO +# A1 is set to be a ConsoleAppender. +log4j.appender.A1=org.apache.log4j.ConsoleAppender + +# A1 uses PatternLayout. +log4j.appender.A1.layout=org.apache.log4j.PatternLayout +log4j.appender.A1.layout.ConversionPattern=%d %5X{pid} [%t] %-5p %c - %m%n diff --git a/sdk/cosmos/faq/README.md b/sdk/cosmos/faq/README.md new file mode 100644 index 0000000000000..8fa637f1570d8 --- /dev/null +++ b/sdk/cosmos/faq/README.md @@ -0,0 +1,9 @@ + +### FAQ + +#### I am getting this error: + +- ``Request Rate too Large`` Request too large is an error from service indicating that you temporarily went beyond the provisioned throughput. You should retry after the provided +``DocumentClientException#getRetryAfterInMilliseconds()``. + +- ``CollectionPoolExhausted`` this is a SDK side error indicating that the SDK's connection pool is saturated. Consider to retry later, increase the connection pool size or use a semaphore to throttle your workload. diff --git a/sdk/cosmos/pom.xml b/sdk/cosmos/pom.xml new file mode 100644 index 0000000000000..2c3e793e2c91a --- /dev/null +++ b/sdk/cosmos/pom.xml @@ -0,0 +1,414 @@ + + + 4.0.0 + com.microsoft.azure + azure-cosmos-parent + 3.0.0 + pom + Azure Cosmos DB SQL API + Java Async SDK (with Reactive Extension RX support) for Azure Cosmos DB SQL API + https://docs.microsoft.com/en-us/azure/cosmos-db + + sdk + benchmark + examples + + + UTF-8 + UTF-8 + 3.11.1 + 2.5 + 3.8.1 + 1.6 + 1.6 + 1.6 + 3.0.0 + 3.0.0 + 27.0.1-jre + 1.3 + 2.9.8 + 3.1.4 + 1.58 + 1.2.17 + 4.0.5 + 1.10.19 + 4.1.36.Final + 2.0.25.Final + 3.2.2.RELEASE + Californium-SR7 + 2.2.4 + 3.0.0 + 1.7.6 + unit + 6.14.3 + ${project.basedir}/target/collectedArtifactsForRelease + + + + + + unit + + default + unit + + + true + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + + + + + + + fast + + simple,cosmosv3 + + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + + + + + long + + long + + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + + + + + direct + + direct + + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + + + + + multi-master + + multi-master + + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + + + + + examples + + + samples,examples + + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + + + + integration-test + verify + + + + + + + + + + emulator + + emulator + + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + + + + + non-emulator + + non-emulator + + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + + + + + e2e + + e2e + + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.22.0 + + unit + + %regex[.*] + + + + surefire.testng.verbose + 2 + + + + + + org.apache.maven.plugins + maven-failsafe-plugin + 2.22.0 + + + %regex[.*] + + + + surefire.testng.verbose + 2 + + + ${test.groups} + + + + + integration-test + verify + + + + + + + + + maven-javadoc-plugin + 3.0.1 + true + + true + false + ${javadoc.opts} + + **/internal/**/*.java + **/*BridgeInternal.java + + + + + attach-javadocs + + jar + + + + + + org.apache.maven.plugins + maven-source-plugin + 2.2.1 + + + attach-sources + + jar-no-fork + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.6.0 + + 1.8 + 1.8 + + + + org.apache.maven.plugins + maven-eclipse-plugin + 2.8 + + + + org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8 + + + + + + org.apache.maven.plugins + maven-antrun-plugin + 1.8 + + + + default-cli + + + + + + + + + + + + run + + + + + + + + + + org.apache.maven.plugins + maven-surefire-report-plugin + 2.22.0 + + + org.codehaus.mojo + findbugs-maven-plugin + 3.0.4 + + + org.apache.maven.plugins + maven-jxr-plugin + 2.1 + + + + + + + com.microsoft.azure + azure-cosmos + ${project.parent.version} + + + io.projectreactor + reactor-bom + ${reactor-bom.version} + pom + import + + + + + + + MIT License + http://www.opensource.org/licenses/mit-license.php + + + + scm:git:https://github.com/Azure/azure-cosmosdb-java.git + scm:git:https://github.com/Azure/azure-cosmosdb-java.git + https://github.com/Azure/azure-cosmosdb-java.git + + + + Azure Cosmos DB Developer Platform Devs + docdbdevplatdevs@microsoft.com + Microsoft + http://www.microsoft.com/ + + + diff --git a/sdk/cosmos/sdk/CosmosConflict b/sdk/cosmos/sdk/CosmosConflict new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/sdk/cosmos/sdk/pom.xml b/sdk/cosmos/sdk/pom.xml new file mode 100644 index 0000000000000..9481c2952c1b8 --- /dev/null +++ b/sdk/cosmos/sdk/pom.xml @@ -0,0 +1,280 @@ + + + 4.0.0 + + com.microsoft.azure + azure-cosmos-parent + 3.0.0 + + azure-cosmos + Async SDK for SQL API of Azure Cosmos DB Service + Java Async SDK (with Reactive Extension rx support) for Azure Cosmos DB SQL API + jar + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.6.0 + + 1.8 + 1.8 + + + + org.apache.maven.plugins + maven-eclipse-plugin + 2.8 + + + + org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8 + + + + + + org.apache.maven.plugins + maven-source-plugin + 2.2.1 + + + attach-sources + + jar-no-fork + + + + + + org.apache.maven.plugins + maven-antrun-plugin + 1.8 + false + + + none + default-cli + + + + true + + + + + + + + fast + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + src/test/resources/fast-testng.xml + + + + + + + + + long + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + src/test/resources/long-testng.xml + + + + + + + + + emulator + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + src/test/resources/emulator-testng.xml + + + + + + + + + + + org.apache.maven.plugins + maven-surefire-report-plugin + 2.19.1 + + + org.codehaus.mojo + findbugs-maven-plugin + 3.0.4 + + + org.apache.maven.plugins + maven-jxr-plugin + 2.1 + + + + + + com.fasterxml.jackson.core + jackson-databind + ${jackson-databind.version} + + + com.fasterxml.uuid + java-uuid-generator + ${java-uuid-generator.version} + + + commons-io + commons-io + ${commons-io.version} + + + commons-validator + commons-validator + ${commons-validator.version} + + + io.projectreactor + reactor-core + + + io.netty + netty-codec-http + ${netty.version} + + + io.netty + netty-handler + ${netty.version} + + + io.netty + netty-transport + ${netty.version} + + + org.slf4j + slf4j-api + ${slf4j.version} + + + org.apache.commons + commons-lang3 + ${commons-lang3.version} + + + org.apache.commons + commons-collections4 + 4.2 + + + org.apache.commons + commons-text + ${commons-text.version} + + + org.testng + testng + ${testng.version} + test + + + org.assertj + assertj-core + ${assertj.version} + test + + + org.slf4j + slf4j-log4j12 + ${slf4j.version} + test + + + log4j + log4j + ${log4j.version} + test + + + com.google.guava + guava + ${guava.version} + + + io.projectreactor + reactor-test + test + + + io.reactivex.rxjava2 + rxjava + ${rxjava2.version} + + + io.projectreactor.netty + reactor-netty + + + io.projectreactor.addons + reactor-adapter + + + io.dropwizard.metrics + metrics-core + ${metrics.version} + + + org.mockito + mockito-core + ${mockito.version} + test + + + diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/AccessCondition.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/AccessCondition.java new file mode 100644 index 0000000000000..4ea7951980ce8 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/AccessCondition.java @@ -0,0 +1,75 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +/** + * Represents a set of access conditions to be used for operations against the Azure Cosmos DB database service. + */ +public final class AccessCondition { + + private AccessConditionType type = AccessConditionType.IF_MATCH; + private String condition; + + /** + * Gets the condition type. + * + * @return the condition type. + */ + public AccessConditionType type() { + return this.type; + } + + /** + * Sets the condition type. + * + * @param type the condition type to use. + * @return the Access Condition + */ + public AccessCondition type(AccessConditionType type) { + this.type = type; + return this; + } + + /** + * Gets the value of the condition - for AccessConditionType IfMatchs and IfNotMatch, this is the ETag that has to + * be compared to. + * + * @return the condition. + */ + public String condition() { + return this.condition; + } + + /** + * Sets the value of the condition - for AccessConditionType IfMatchs and IfNotMatch, this is the ETag that has to + * be compared to. + * + * @param condition the condition to use. + * @return the Access Condition + */ + public AccessCondition condition(String condition) { + this.condition = condition; + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/AccessConditionType.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/AccessConditionType.java new file mode 100644 index 0000000000000..7dd4538e76c75 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/AccessConditionType.java @@ -0,0 +1,39 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +/** + * Specifies the set of access condition types that can be used for operations in the Azure Cosmos DB database service. + */ +public enum AccessConditionType { + /** + * Check if the resource's ETag value matches the ETag value performed. + */ + IF_MATCH, + + /** + * Check if the resource's ETag value does not match ETag value performed. + */ + IF_NONE_MATCH +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/BadRequestException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/BadRequestException.java new file mode 100644 index 0000000000000..7785b882f7a6b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/BadRequestException.java @@ -0,0 +1,81 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.http.HttpHeaders; + +import java.net.URI; +import java.util.HashMap; +import java.util.Map; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class BadRequestException extends CosmosClientException { + private static final long serialVersionUID = 1L; + + public BadRequestException(String message, Exception innerException) { + super(message, innerException, new HashMap<>(), HttpConstants.StatusCodes.BADREQUEST, null); + } + + public BadRequestException() { + this(RMResources.BadRequest); + } + + public BadRequestException(CosmosError cosmosError, long lsn, String partitionKeyRangeId, Map responseHeaders) { + super(HttpConstants.StatusCodes.BADREQUEST, cosmosError, responseHeaders); + BridgeInternal.setLSN(this, lsn); + BridgeInternal.setPartitionKeyRangeId(this, partitionKeyRangeId); + } + + public BadRequestException(String message) { + this(message, null, null, null); + } + + BadRequestException(String message, HttpHeaders headers, String requestUrlString) { + this(message, null, headers, requestUrlString); + } + + public BadRequestException(String message, HttpHeaders headers, URI requestUri) { + this(message, headers, requestUri != null ? requestUri.toString() : null); + } + + BadRequestException(Exception innerException) { + this(RMResources.BadRequest, innerException, null, null); + } + + BadRequestException(String message, + Exception innerException, + HttpHeaders headers, + String requestUrlString) { + super(String.format("%s: %s", RMResources.BadRequest, message), + innerException, + HttpUtils.asMap(headers), + HttpConstants.StatusCodes.BADREQUEST, + requestUrlString); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/BridgeInternal.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/BridgeInternal.java new file mode 100644 index 0000000000000..f0e29b03accd6 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/BridgeInternal.java @@ -0,0 +1,438 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Configs; +import com.azure.data.cosmos.internal.Constants; +import com.azure.data.cosmos.internal.DatabaseAccount; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.QueryMetrics; +import com.azure.data.cosmos.internal.ReplicationPolicy; +import com.azure.data.cosmos.internal.ResourceResponse; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.RxDocumentServiceResponse; +import com.azure.data.cosmos.internal.StoredProcedureResponse; +import com.azure.data.cosmos.internal.Strings; +import com.azure.data.cosmos.internal.directconnectivity.StoreResult; +import com.azure.data.cosmos.internal.query.metrics.ClientSideMetrics; +import com.azure.data.cosmos.internal.routing.PartitionKeyInternal; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; + +import java.net.URI; +import java.time.OffsetDateTime; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentMap; + +import static com.azure.data.cosmos.internal.Constants.QueryExecutionContext.INCREMENTAL_FEED_HEADER_VALUE; + +/** + * This is meant to be used only internally as a bridge access to classes in + * com.azure.data.cosmos + **/ +public class BridgeInternal { + + public static CosmosError createCosmosError(ObjectNode objectNode) { + return new CosmosError(objectNode); + } + + public static CosmosError createCosmosError(String jsonString) { + return new CosmosError(jsonString); + } + + public static Document documentFromObject(Object document, ObjectMapper mapper) { + return Document.FromObject(document, mapper); + } + + public static ResourceResponse toResourceResponse(RxDocumentServiceResponse response, + Class cls) { + return new ResourceResponse(response, cls); + } + + public static FeedResponse toFeedResponsePage(RxDocumentServiceResponse response, + Class cls) { + return new FeedResponse(response.getQueryResponse(cls), response.getResponseHeaders()); + } + + public static FeedResponse toChaneFeedResponsePage(RxDocumentServiceResponse response, + Class cls) { + return new FeedResponse(noChanges(response) ? Collections.emptyList() : response.getQueryResponse(cls), + response.getResponseHeaders(), noChanges(response)); + } + + public static StoredProcedureResponse toStoredProcedureResponse(RxDocumentServiceResponse response) { + return new StoredProcedureResponse(response); + } + + public static DatabaseAccount toDatabaseAccount(RxDocumentServiceResponse response) { + DatabaseAccount account = response.getResource(DatabaseAccount.class); + + // read the headers and set to the account + Map responseHeader = response.getResponseHeaders(); + + account.setMaxMediaStorageUsageInMB( + Long.valueOf(responseHeader.get(HttpConstants.HttpHeaders.MAX_MEDIA_STORAGE_USAGE_IN_MB))); + account.setMediaStorageUsageInMB( + Long.valueOf(responseHeader.get(HttpConstants.HttpHeaders.CURRENT_MEDIA_STORAGE_USAGE_IN_MB))); + + return account; + } + + public static Map getFeedHeaders(ChangeFeedOptions options) { + + if (options == null) + return new HashMap<>(); + + Map headers = new HashMap<>(); + + if (options.maxItemCount() != null) { + headers.put(HttpConstants.HttpHeaders.PAGE_SIZE, options.maxItemCount().toString()); + } + + String ifNoneMatchValue = null; + if (options.requestContinuation() != null) { + ifNoneMatchValue = options.requestContinuation(); + } else if (!options.startFromBeginning()) { + ifNoneMatchValue = "*"; + } + // On REST level, change feed is using IF_NONE_MATCH/ETag instead of + // continuation. + if (ifNoneMatchValue != null) { + headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, ifNoneMatchValue); + } + + headers.put(HttpConstants.HttpHeaders.A_IM, INCREMENTAL_FEED_HEADER_VALUE); + + return headers; + } + + public static Map getFeedHeaders(FeedOptions options) { + + if (options == null) + return new HashMap<>(); + + Map headers = new HashMap<>(); + + if (options.maxItemCount() != null) { + headers.put(HttpConstants.HttpHeaders.PAGE_SIZE, options.maxItemCount().toString()); + } + + if (options.requestContinuation() != null) { + headers.put(HttpConstants.HttpHeaders.CONTINUATION, options.requestContinuation()); + } + + if (options != null) { + if (options.sessionToken() != null) { + headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.sessionToken()); + } + + if (options.enableScanInQuery() != null) { + headers.put(HttpConstants.HttpHeaders.ENABLE_SCAN_IN_QUERY, options.enableScanInQuery().toString()); + } + + if (options.emitVerboseTracesInQuery() != null) { + headers.put(HttpConstants.HttpHeaders.EMIT_VERBOSE_TRACES_IN_QUERY, + options.emitVerboseTracesInQuery().toString()); + } + + if (options.enableCrossPartitionQuery() != null) { + headers.put(HttpConstants.HttpHeaders.ENABLE_CROSS_PARTITION_QUERY, + options.enableCrossPartitionQuery().toString()); + } + + if (options.maxDegreeOfParallelism() != 0) { + headers.put(HttpConstants.HttpHeaders.PARALLELIZE_CROSS_PARTITION_QUERY, Boolean.TRUE.toString()); + } + + if (options.responseContinuationTokenLimitInKb() > 0) { + headers.put(HttpConstants.HttpHeaders.RESPONSE_CONTINUATION_TOKEN_LIMIT_IN_KB, + Strings.toString(options.responseContinuationTokenLimitInKb())); + } + + if (options.populateQueryMetrics()) { + headers.put(HttpConstants.HttpHeaders.POPULATE_QUERY_METRICS, + String.valueOf(options.populateQueryMetrics())); + } + } + + return headers; + } + + public static boolean noChanges(FeedResponse page) { + return page.nochanges; + } + + public static boolean noChanges(RxDocumentServiceResponse rsp) { + return rsp.getStatusCode() == HttpConstants.StatusCodes.NOT_MODIFIED; + } + + public static FeedResponse createFeedResponse(List results, + Map headers) { + return new FeedResponse<>(results, headers); + } + + public static FeedResponse createFeedResponseWithQueryMetrics(List results, + Map headers, ConcurrentMap queryMetricsMap) { + return new FeedResponse<>(results, headers, queryMetricsMap); + } + + public static E setResourceAddress(E e, String resourceAddress) { + e.resourceAddress = resourceAddress; + return e; + } + + public static long getLSN(E e) { + return e.lsn; + } + + public static String getPartitionKeyRangeId(E e) { + return e.partitionKeyRangeId; + } + + public static String getResourceAddress(E e) { + return e.resourceAddress; + } + + public static E setLSN(E e, long lsn) { + e.lsn = lsn; + return e; + } + + public static E setPartitionKeyRangeId(E e, String partitionKeyRangeId) { + e.partitionKeyRangeId = partitionKeyRangeId; + return e; + } + + public static boolean isEnableMultipleWriteLocations(DatabaseAccount account) { + return account.isEnableMultipleWriteLocations(); + } + + public static boolean getUseMultipleWriteLocations(ConnectionPolicy policy) { + return policy.usingMultipleWriteLocations(); + } + + public static void setUseMultipleWriteLocations(ConnectionPolicy policy, boolean value) { + policy.usingMultipleWriteLocations(value); + } + + public static URI getRequestUri(CosmosClientException cosmosClientException) { + return cosmosClientException.requestUri; + } + + public static void setRequestHeaders(CosmosClientException cosmosClientException, + Map requestHeaders) { + cosmosClientException.requestHeaders = requestHeaders; + } + + public static Map getRequestHeaders( + CosmosClientException cosmosClientException) { + return cosmosClientException.requestHeaders; + } + + public static Map getQueryEngineConfiuration(DatabaseAccount databaseAccount) { + return databaseAccount.getQueryEngineConfiuration(); + } + + public static ReplicationPolicy getReplicationPolicy(DatabaseAccount databaseAccount) { + return databaseAccount.getReplicationPolicy(); + } + + public static ReplicationPolicy getSystemReplicationPolicy(DatabaseAccount databaseAccount) { + return databaseAccount.getSystemReplicationPolicy(); + } + + public static ConsistencyPolicy getConsistencyPolicy(DatabaseAccount databaseAccount) { + return databaseAccount.getConsistencyPolicy(); + } + + public static String getAltLink(Resource resource) { + return resource.altLink(); + } + + public static void setAltLink(Resource resource, String altLink) { + resource.altLink(altLink); + } + + public static void setMaxReplicaSetSize(ReplicationPolicy replicationPolicy, int value) { + replicationPolicy.setMaxReplicaSetSize(value); + } + + public static void putQueryMetricsIntoMap(FeedResponse response, String partitionKeyRangeId, + QueryMetrics queryMetrics) { + response.queryMetricsMap().put(partitionKeyRangeId, queryMetrics); + } + + public static QueryMetrics createQueryMetricsFromDelimitedStringAndClientSideMetrics( + String queryMetricsDelimitedString, ClientSideMetrics clientSideMetrics, String activityId) { + return QueryMetrics.createFromDelimitedStringAndClientSideMetrics(queryMetricsDelimitedString, + clientSideMetrics, activityId); + } + + public static QueryMetrics createQueryMetricsFromCollection(Collection queryMetricsCollection) { + return QueryMetrics.createFromCollection(queryMetricsCollection); + } + + public static ClientSideMetrics getClientSideMetrics(QueryMetrics queryMetrics) { + return queryMetrics.getClientSideMetrics(); + } + + public static String getInnerErrorMessage(CosmosClientException cosmosClientException) { + if (cosmosClientException == null) { + return null; + } + return cosmosClientException.innerErrorMessage(); + } + + public static PartitionKeyInternal getNonePartitionKey(PartitionKeyDefinition partitionKeyDefinition) { + return partitionKeyDefinition.getNonePartitionKeyValue(); + } + + public static PartitionKey getPartitionKey(PartitionKeyInternal partitionKeyInternal) { + return new PartitionKey(partitionKeyInternal); + } + + public static void setProperty(JsonSerializable jsonSerializable, String propertyName, T value) { + jsonSerializable.set(propertyName, value); + } + + public static ObjectNode getObject(JsonSerializable jsonSerializable, String propertyName) { + return jsonSerializable.getObject(propertyName); + } + + public static void remove(JsonSerializable jsonSerializable, String propertyName) { + jsonSerializable.remove(propertyName); + } + + public static CosmosStoredProcedureProperties createCosmosStoredProcedureProperties(String jsonString) { + return new CosmosStoredProcedureProperties(jsonString); + } + + public static Object getValue(JsonNode value) { + return JsonSerializable.getValue(value); + } + + public static CosmosClientException setCosmosResponseDiagnostics(CosmosClientException cosmosClientException, CosmosResponseDiagnostics cosmosResponseDiagnostics) { + return cosmosClientException.cosmosResponseDiagnostics(cosmosResponseDiagnostics); + } + + public static CosmosClientException createCosmosClientException(int statusCode) { + return new CosmosClientException(statusCode, null, null, null); + } + + public static CosmosClientException createCosmosClientException(int statusCode, String errorMessage) { + CosmosClientException cosmosClientException = new CosmosClientException(statusCode, errorMessage, null, null); + cosmosClientException.error(new CosmosError()); + cosmosClientException.error().set(Constants.Properties.MESSAGE, errorMessage); + return cosmosClientException; + } + + public static CosmosClientException createCosmosClientException(int statusCode, Exception innerException) { + return new CosmosClientException(statusCode, null, null, innerException); + } + + public static CosmosClientException createCosmosClientException(int statusCode, CosmosError cosmosErrorResource, Map responseHeaders) { + return new CosmosClientException(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders); + } + + public static CosmosClientException createCosmosClientException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map responseHeaders) { + CosmosClientException cosmosClientException = new CosmosClientException(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null); + cosmosClientException.resourceAddress = resourceAddress; + cosmosClientException.error(cosmosErrorResource); + return cosmosClientException; + } + + public static CosmosClientException createCosmosClientException(String message, Exception exception, Map responseHeaders, int statusCode, String resourceAddress) { + CosmosClientException cosmosClientException = new CosmosClientException(statusCode, message, responseHeaders, exception); + cosmosClientException.resourceAddress = resourceAddress; + return cosmosClientException; + } + + public static Configs extractConfigs(CosmosClientBuilder cosmosClientBuilder) { + return cosmosClientBuilder.configs(); + } + + public static CosmosClientBuilder injectConfigs(CosmosClientBuilder cosmosClientBuilder, Configs configs) { + return cosmosClientBuilder.configs(configs); + } + + public static String extractContainerSelfLink(CosmosContainer container) { + return container.getLink(); + } + + public static String extractResourceSelfLink(Resource resource) { return resource.selfLink(); } + + public static void setResourceSelfLink(Resource resource, String selfLink) { resource.selfLink(selfLink); } + + public static void populatePropertyBagJsonSerializable(JsonSerializable jsonSerializable) { jsonSerializable.populatePropertyBag(); } + + public static void setMapper(JsonSerializable jsonSerializable, ObjectMapper om) { + jsonSerializable.setMapper(om); + } + + public static void setTimestamp(Resource resource, OffsetDateTime date) { + resource.timestamp(date); + } + + public static CosmosResponseDiagnostics createCosmosResponseDiagnostics() { + return new CosmosResponseDiagnostics(); + } + + public static void recordResponse(CosmosResponseDiagnostics cosmosResponseDiagnostics, + RxDocumentServiceRequest request, StoreResult storeResult) { + cosmosResponseDiagnostics.clientSideRequestStatistics().recordResponse(request, storeResult); + } + + public static String recordAddressResolutionStart(CosmosResponseDiagnostics cosmosResponseDiagnostics, + URI targetEndpoint) { + return cosmosResponseDiagnostics.clientSideRequestStatistics().recordAddressResolutionStart(targetEndpoint); + } + + public static void recordAddressResolutionEnd(CosmosResponseDiagnostics cosmosResponseDiagnostics, + String identifier) { + cosmosResponseDiagnostics.clientSideRequestStatistics().recordAddressResolutionEnd(identifier); + } + + public static List getContactedReplicas(CosmosResponseDiagnostics cosmosResponseDiagnostics) { + return cosmosResponseDiagnostics.clientSideRequestStatistics().getContactedReplicas(); + } + + public static void setContactedReplicas(CosmosResponseDiagnostics cosmosResponseDiagnostics, List contactedReplicas) { + cosmosResponseDiagnostics.clientSideRequestStatistics().setContactedReplicas(contactedReplicas); + } + + public static Set getFailedReplicas(CosmosResponseDiagnostics cosmosResponseDiagnostics) { + return cosmosResponseDiagnostics.clientSideRequestStatistics().getFailedReplicas(); + } + + public static ConcurrentMap queryMetricsFromFeedResponse(FeedResponse feedResponse) { + return feedResponse.queryMetrics(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ChangeFeedOptions.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ChangeFeedOptions.java new file mode 100644 index 0000000000000..5635320ebc8ae --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ChangeFeedOptions.java @@ -0,0 +1,221 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.PartitionKeyRange; + +import java.time.OffsetDateTime; +import java.util.Map; + +/** + * Specifies the options associated with change feed methods (enumeration + * operations) in the Azure Cosmos DB database service. + */ +public final class ChangeFeedOptions { + private String partitionKeyRangeId; + private boolean startFromBeginning; + private OffsetDateTime startDateTime; + private Integer maxItemCount; + private String requestContinuation; + private PartitionKey partitionkey; + private boolean populateQueryMetrics; + private Map properties; + + public ChangeFeedOptions() { + } + + public ChangeFeedOptions(ChangeFeedOptions options) { + this.partitionKeyRangeId = options.partitionKeyRangeId; + this.startFromBeginning = options.startFromBeginning; + this.startDateTime = options.startDateTime; + this.maxItemCount = options.maxItemCount; + this.requestContinuation = options.requestContinuation; + this.partitionkey = options.partitionkey; + this.populateQueryMetrics = options.populateQueryMetrics; + } + + /** + * Get the partition key range id for the current request + *

+ * ChangeFeed requests can be executed against specific partition key ranges. + * This is used to process the change feed in parallel across multiple + * consumers. + *

+ * + * @return a string indicating the partition key range ID + * @see PartitionKeyRange + */ + String partitionKeyRangeId() { + return partitionKeyRangeId; + } + + /** + * Set the partition key range id for the current request + *

+ * ChangeFeed requests can be executed against specific partition key ranges. + * This is used to process the change feed in parallel across multiple + * consumers. + *

+ * + * @param partitionKeyRangeId a string indicating the partition key range ID + * @see PartitionKeyRange + * @return the ChangeFeedOptions. + */ + ChangeFeedOptions partitionKeyRangeId(String partitionKeyRangeId) { + this.partitionKeyRangeId = partitionKeyRangeId; + return this; + } + + /** + * Get whether change feed should start from beginning (true) or from current + * (false). By default it's start from current (false). + * + * @return a boolean value indicating change feed should start from beginning or + * not + */ + public boolean startFromBeginning() { + return startFromBeginning; + } + + /** + * Set whether change feed should start from beginning (true) or from current + * (false). By default it's start from current (false). + * + * @param startFromBeginning a boolean value indicating change feed should start + * from beginning or not + * @return the ChangeFeedOptions. + */ + public ChangeFeedOptions startFromBeginning(boolean startFromBeginning) { + this.startFromBeginning = startFromBeginning; + return this; + } + + /** + * Gets the zoned date time to start looking for changes after. + * + * @return a zoned date time to start looking for changes after, if set or null + * otherwise + */ + public OffsetDateTime startDateTime() { + return startDateTime; + } + + /** + * Sets the zoned date time (exclusive) to start looking for changes after. If + * this is specified, startFromBeginning is ignored. + * + * @param startDateTime a zoned date time to start looking for changes after. + * @return the ChangeFeedOptions. + */ + public ChangeFeedOptions startDateTime(OffsetDateTime startDateTime) { + this.startDateTime = startDateTime; + return this; + } + + /** + * Gets the maximum number of items to be returned in the enumeration + * operation. + * + * @return the max number of items. + */ + public Integer maxItemCount() { + return this.maxItemCount; + } + + /** + * Sets the maximum number of items to be returned in the enumeration + * operation. + * + * @param maxItemCount the max number of items. + * @return the FeedOptionsBase. + */ + public ChangeFeedOptions maxItemCount(Integer maxItemCount) { + this.maxItemCount = maxItemCount; + return this; + } + + /** + * Gets the request continuation token. + * + * @return the request continuation. + */ + public String requestContinuation() { + return this.requestContinuation; + } + + /** + * Sets the request continuation token. + * + * @param requestContinuation + * the request continuation. + * @return the FeedOptionsBase. + */ + public ChangeFeedOptions requestContinuation(String requestContinuation) { + this.requestContinuation = requestContinuation; + return this; + } + + /** + * Gets the partition key used to identify the current request's target + * partition. + * + * @return the partition key. + */ + public PartitionKey partitionKey() { + return this.partitionkey; + } + + /** + * Sets the partition key used to identify the current request's target + * partition. + * + * @param partitionkey + * the partition key value. + * @return the FeedOptionsBase. + */ + public ChangeFeedOptions partitionKey(PartitionKey partitionkey) { + this.partitionkey = partitionkey; + return this; + } + + /** + * Gets the properties + * + * @return Map of request options properties + */ + public Map properties() { + return properties; + } + + /** + * Sets the properties used to identify the request token. + * + * @param properties the properties. + * @return the FeedOptionsBase. + */ + public ChangeFeedOptions properties(Map properties) { + this.properties = properties; + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ChangeFeedProcessor.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ChangeFeedProcessor.java new file mode 100644 index 0000000000000..22f4f6072d5bb --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ChangeFeedProcessor.java @@ -0,0 +1,155 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserver; +import com.azure.data.cosmos.internal.changefeed.implementation.ChangeFeedProcessorBuilderImpl; +import reactor.core.publisher.Mono; + +import java.util.List; +import java.util.function.Consumer; + +/** + * Simple host for distributing change feed events across observers and thus allowing these observers scale. + * It distributes the load across its instances and allows dynamic scaling: + * - Partitions in partitioned collections are distributed across instances/observers. + * - New instance takes leases from existing instances to make distribution equal. + * - If an instance dies, the leases are distributed across remaining instances. + * It's useful for scenario when partition count is high so that one host/VM is not capable of processing that many change feed events. + * Client application needs to implement {@link ChangeFeedObserver} and register processor implementation with {@link ChangeFeedProcessor}. + *

+ * It uses auxiliary document collection for managing leases for a partition. + * Every EventProcessorHost instance is performing the following two tasks: + * 1) Renew Leases: It keeps track of leases currently owned by the host and continuously keeps on renewing the leases. + * 2) Acquire Leases: Each instance continuously polls all leases to check if there are any leases it should acquire + * for the system to get into balanced state. + *

+ * {@code + * ChangeFeedProcessor changeFeedProcessor = ChangeFeedProcessor.Builder() + * .hostName(hostName) + * .feedContainer(feedContainer) + * .leaseContainer(leaseContainer) + * .handleChanges(docs -> { + * // Implementation for handling and processing CosmosItemProperties list goes here + * }) + * .build(); + * } + */ +public interface ChangeFeedProcessor { + + /** + * Start listening for changes asynchronously. + * + * @return a representation of the deferred computation of this call. + */ + Mono start(); + + /** + * Stops listening for changes asynchronously. + * + * @return a representation of the deferred computation of this call. + */ + Mono stop(); + + /** + * Helper static method to build {@link ChangeFeedProcessor} instances + * as logical representation of the Azure Cosmos DB database service. + *

+ * {@code + * + * ChangeFeedProcessor.Builder() + * .hostName("SampleHost") + * .feedContainer(feedContainer) + * .leaseContainer(leaseContainer) + * .handleChanges(docs -> { + * // Implementation for handling and processing CosmosItemProperties list goes here + * }) + * .build(); + * } + * + * @return a builder definition instance. + */ + static BuilderDefinition Builder() { + return new ChangeFeedProcessorBuilderImpl(); + } + + /** + * The {@link ChangeFeedProcessor} builder definitions for setting the properties. + */ + interface BuilderDefinition { + /** + * Sets the host name. + * + * @param hostName the name to be used for the host. When using multiple hosts, each host must have a unique name. + * @return current Builder. + */ + BuilderDefinition hostName(String hostName); + + /** + * Sets and existing {@link CosmosContainer} to be used to read from the monitored collection. + * + * @param feedContainer the instance of {@link CosmosContainer} to be used. + * @return current Builder. + */ + BuilderDefinition feedContainer(CosmosContainer feedContainer); + + /** + * Sets the {@link ChangeFeedProcessorOptions} to be used. + *

+ * Unless specifically set the default values that will be used are: + * - maximum items per page or FeedResponse: 100 + * - lease renew interval: 17 seconds + * - lease acquire interval: 13 seconds + * - lease expiration interval: 60 seconds + * - feed poll delay: 5 seconds + * - maximum scale count: unlimited + * + * @param changeFeedProcessorOptions the change feed processor options to use. + * @return current Builder. + */ + BuilderDefinition options(ChangeFeedProcessorOptions changeFeedProcessorOptions); + + /** + * Sets a consumer function which will be called to process changes. + * + * @param consumer the consumer of {@link ChangeFeedObserver} to call for handling the feeds. + * @return current Builder. + */ + BuilderDefinition handleChanges(Consumer> consumer); + + /** + * Sets an existing {@link CosmosContainer} to be used to read from the leases collection. + * + * @param leaseContainer the instance of {@link CosmosContainer} to use. + * @return current Builder. + */ + BuilderDefinition leaseContainer(CosmosContainer leaseContainer); + + /** + * Builds a new instance of the {@link ChangeFeedProcessor} with the specified configuration asynchronously. + * + * @return an instance of {@link ChangeFeedProcessor}. + */ + ChangeFeedProcessor build(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ChangeFeedProcessorOptions.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ChangeFeedProcessorOptions.java new file mode 100644 index 0000000000000..a712913581b3a --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ChangeFeedProcessorOptions.java @@ -0,0 +1,348 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import java.time.Duration; +import java.time.OffsetDateTime; + +/** + * Specifies the options associated with {@link ChangeFeedProcessor}. + */ +public class ChangeFeedProcessorOptions { + private static Duration DefaultRenewInterval = Duration.ofMillis(0).plusSeconds(17); + private static Duration DefaultAcquireInterval = Duration.ofMillis(0).plusSeconds(13); + private static Duration DefaultExpirationInterval = Duration.ofMillis(0).plusSeconds(60); + private static Duration DefaultFeedPollDelay = Duration.ofMillis(0).plusSeconds(5); + + private Duration leaseRenewInterval; + private Duration leaseAcquireInterval; + private Duration leaseExpirationInterval; + private Duration feedPollDelay; + + private String leasePrefix; + private int maxItemCount; + private String startContinuation; + private OffsetDateTime startTime; + private boolean startFromBeginning; + private int minScaleCount; + private int maxScaleCount; + private boolean discardExistingLeases; + + public ChangeFeedProcessorOptions() { + this.maxItemCount = 100; + this.startFromBeginning = false; + this.leaseRenewInterval = DefaultRenewInterval; + this.leaseAcquireInterval = DefaultAcquireInterval; + this.leaseExpirationInterval = DefaultExpirationInterval; + this.feedPollDelay = DefaultFeedPollDelay; + this.maxScaleCount = 0; // unlimited + } + + /** + * Gets the renew interval for all leases for partitions currently held by {@link ChangeFeedProcessor} instance. + * + * @return the renew interval for all leases for partitions. + */ + public Duration leaseRenewInterval() { + return this.leaseRenewInterval; + } + + /** + * Sets the renew interval for all leases for partitions currently held by {@link ChangeFeedProcessor} instance. + * + * @param leaseRenewInterval the renew interval for all leases for partitions currently held by {@link ChangeFeedProcessor} instance. + * @return the current ChangeFeedProcessorOptions instance. + */ + public ChangeFeedProcessorOptions leaseRenewInterval(Duration leaseRenewInterval) { + this.leaseRenewInterval = leaseRenewInterval; + return this; + } + + /** + * Gets the interval to kick off a task to compute if partitions are distributed evenly among known host instances. + * + * @return the interval to kick off a task to compute if partitions are distributed evenly among known host instances. + */ + public Duration leaseAcquireInterval() { + return this.leaseAcquireInterval; + } + + /** + * Sets he interval to kick off a task to compute if partitions are distributed evenly among known host instances. + * @param leaseAcquireInterval he interval to kick off a task to compute if partitions are distributed evenly among known host instances. + * @return the current ChangeFeedProcessorOptions instance. + */ + public ChangeFeedProcessorOptions leaseAcquireInterval(Duration leaseAcquireInterval) { + this.leaseAcquireInterval = leaseAcquireInterval; + return this; + } + + /** + * Gets the interval for which the lease is taken on a lease representing a partition. + * + *

+ * If the lease is not renewed within this interval, it will cause it to expire and ownership of the partition will + * move to another {@link ChangeFeedProcessor} instance. + * + * @return the interval for which the lease is taken on a lease representing a partition. + */ + public Duration leaseExpirationInterval() { + return this.leaseExpirationInterval; + } + + /** + * Sets the interval for which the lease is taken on a lease representing a partition. + * + *

+ * If the lease is not renewed within this interval, it will cause it to expire and ownership of the partition will + * move to another {@link ChangeFeedProcessor} instance. + * + * @param leaseExpirationInterval the interval for which the lease is taken on a lease representing a partition. + * @return the current ChangeFeedProcessorOptions instance. + */ + public ChangeFeedProcessorOptions leaseExpirationInterval(Duration leaseExpirationInterval) { + this.leaseExpirationInterval = leaseExpirationInterval; + return this; + } + + /** + * Gets the delay in between polling a partition for new changes on the feed, after all current changes are drained. + * + * @return the delay in between polling a partition for new changes on the feed. + */ + public Duration feedPollDelay() { + return this.feedPollDelay; + } + + /** + * Sets the delay in between polling a partition for new changes on the feed, after all current changes are drained. + * + * @param feedPollDelay the delay in between polling a partition for new changes on the feed, after all current changes are drained. + * @return the current ChangeFeedProcessorOptions instance. + */ + public ChangeFeedProcessorOptions feedPollDelay(Duration feedPollDelay) { + this.feedPollDelay = feedPollDelay; + return this; + } + + /** + * Gets a prefix to be used as part of the lease ID. + *

+ * This can be used to support multiple instances of {@link ChangeFeedProcessor} instances pointing at the same + * feed while using the same auxiliary collection. + * + * @return a prefix to be used as part of the lease ID. + */ + public String leasePrefix() { + return this.leasePrefix; + } + + /** + * Sets a prefix to be used as part of the lease ID. + * + * @param leasePrefix a prefix to be used as part of the lease ID. + * @return the current ChangeFeedProcessorOptions instance. + */ + public ChangeFeedProcessorOptions leasePrefix(String leasePrefix) { + this.leasePrefix = leasePrefix; + return this; + } + + /** + * Gets the maximum number of items to be returned in the enumeration operation in the Azure Cosmos DB service. + * + * @return the maximum number of items to be returned in the enumeration operation in the Azure Cosmos DB service. + */ + public int maxItemCount() { + return this.maxItemCount; + } + + /** + * Sets the maximum number of items to be returned in the enumeration operation. + * + * @param maxItemCount the maximum number of items to be returned in the enumeration operation. + * @return the current ChangeFeedProcessorOptions instance. + */ + public ChangeFeedProcessorOptions maxItemCount(int maxItemCount) { + this.maxItemCount = maxItemCount; + return this; + } + + /** + * Gets the start request continuation token to start looking for changes after. + *

+ * This is only used when lease store is not initialized and is ignored if a lease for partition exists and + * has continuation token. If this is specified, both StartTime and StartFromBeginning are ignored. + * + * @return the start request continuation token to start looking for changes after. + */ + public String startContinuation() { + return this.startContinuation; + } + + /** + * Sets the start request continuation token to start looking for changes after. + *

+ * This is only used when lease store is not initialized and is ignored if a lease for partition exists and + * has continuation token. If this is specified, both StartTime and StartFromBeginning are ignored. + * + * @param startContinuation the start request continuation token to start looking for changes after. + * @return the current ChangeFeedProcessorOptions instance. + */ + public ChangeFeedProcessorOptions startContinuation(String startContinuation) { + this.startContinuation= startContinuation; + return this; + } + + /** + * Gets the time (exclusive) to start looking for changes after. + *

+ * This is only used when: + * (1) Lease store is not initialized and is ignored if a lease for partition exists and has continuation token. + * (2) StartContinuation is not specified. + * If this is specified, StartFromBeginning is ignored. + * + * @return the time (exclusive) to start looking for changes after. + */ + public OffsetDateTime startTime() { + return this.startTime; + } + + /** + * Sets the time (exclusive) to start looking for changes after (UTC time). + *

+ * This is only used when: + * (1) Lease store is not initialized and is ignored if a lease for partition exists and has continuation token. + * (2) StartContinuation is not specified. + * If this is specified, StartFromBeginning is ignored. + * + * @param startTime the time (exclusive) to start looking for changes after. + * @return the current ChangeFeedProcessorOptions instance. + */ + public ChangeFeedProcessorOptions startTime(OffsetDateTime startTime) { + this.startTime = startTime; + return this; + } + + /** + * Gets a value indicating whether change feed in the Azure Cosmos DB service should start from beginning (true) + * or from current (false). By default it's start from current (false). + *

+ * This is only used when: + * (1) Lease store is not initialized and is ignored if a lease for partition exists and has continuation token. + * (2) StartContinuation is not specified. + * (3) StartTime is not specified. + * + * @return a value indicating whether change feed in the Azure Cosmos DB service should start from. + */ + public boolean startFromBeginning() { + return this.startFromBeginning; + } + + /** + * Sets a value indicating whether change feed in the Azure Cosmos DB service should start from beginning. + *

+ * This is only used when: + * (1) Lease store is not initialized and is ignored if a lease for partition exists and has continuation token. + * (2) StartContinuation is not specified. + * (3) StartTime is not specified. + * + * @param startFromBeginning Indicates to start from beginning if true + * @return the current ChangeFeedProcessorOptions instance. + */ + public ChangeFeedProcessorOptions startFromBeginning(boolean startFromBeginning) { + this.startFromBeginning = startFromBeginning; + return this; + } + + /** + * Gets the minimum partition count for the host. + *

+ * This can be used to increase the number of partitions for the host and thus override equal distribution (which + * is the default) of leases between hosts. + * + * @return the minimum scale count for the host. + */ + public int minScaleCount() { + return this.minScaleCount; + } + + /** + * Sets the minimum partition count for the host. + *

+ * This can be used to increase the number of partitions for the host and thus override equal distribution (which + * is the default) of leases between hosts. + * + * @param minScaleCount the minimum partition count for the host. + * @return the current ChangeFeedProcessorOptions instance. + */ + public ChangeFeedProcessorOptions minScaleCount(int minScaleCount) { + this.minScaleCount = minScaleCount; + return this; + } + + /** + * Gets the maximum number of partitions the host can serve. + *

+ * This can be used property to limit the number of partitions for the host and thus override equal distribution + * (which is the default) of leases between hosts. DEFAULT is 0 (unlimited). + * + * @return the maximum number of partitions the host can serve. + */ + public int maxScaleCount() { + return this.maxScaleCount; + } + + /** + * Sets the maximum number of partitions the host can serve. + * + * @param maxScaleCount the maximum number of partitions the host can serve. + * @return the current ChangeFeedProcessorOptions instance. + */ + public ChangeFeedProcessorOptions maxScaleCount(int maxScaleCount) { + this.maxScaleCount = maxScaleCount; + return this; + } + + /** + * Gets a value indicating whether on start of the host all existing leases should be deleted and the host + * should start from scratch. + * + * @return a value indicating whether on start of the host all existing leases should be deleted and the host should start from scratch. + */ + public boolean discardExistingLeases() { + return this.discardExistingLeases; + } + + /** + * Sets a value indicating whether on start of the host all existing leases should be deleted and the host + * should start from scratch. + * + * @param discardExistingLeases Indicates whether to discard all existing leases if true + * @return the current ChangeFeedProcessorOptions instance. + */ + public ChangeFeedProcessorOptions discardExistingLeases(boolean discardExistingLeases) { + this.discardExistingLeases = discardExistingLeases; + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ClientSideRequestStatistics.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ClientSideRequestStatistics.java new file mode 100644 index 0000000000000..39d4d16f3159a --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ClientSideRequestStatistics.java @@ -0,0 +1,275 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.Utils; +import com.azure.data.cosmos.internal.directconnectivity.StoreResult; +import org.apache.commons.lang3.StringUtils; + +import java.net.URI; +import java.net.URISyntaxException; +import java.time.Duration; +import java.time.LocalDateTime; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; + +class ClientSideRequestStatistics { + + private final static int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10; + + private final static DateTimeFormatter responseTimeFormatter = DateTimeFormatter.ofPattern("dd MMM yyyy HH:mm:ss.SSS").withLocale(Locale.US); + + private ZonedDateTime requestStartTime; + private ZonedDateTime requestEndTime; + + private List responseStatisticsList; + private List supplementalResponseStatisticsList; + private Map addressResolutionStatistics; + + private List contactedReplicas; + private Set failedReplicas; + private Set regionsContacted; + + ClientSideRequestStatistics() { + this.requestStartTime = ZonedDateTime.now(ZoneOffset.UTC); + this.requestEndTime = ZonedDateTime.now(ZoneOffset.UTC); + this.responseStatisticsList = new ArrayList<>(); + this.supplementalResponseStatisticsList = new ArrayList<>(); + this.addressResolutionStatistics = new HashMap<>(); + this.contactedReplicas = new ArrayList<>(); + this.failedReplicas = new HashSet<>(); + this.regionsContacted = new HashSet<>(); + } + + Duration getRequestLatency() { + return Duration.between(requestStartTime, requestEndTime); + } + + private boolean isCPUOverloaded() { + // NOTE: CPUMonitor and CPULoadHistory is not implemented in async SDK yet. + return false; + } + + void recordResponse(RxDocumentServiceRequest request, StoreResult storeResult) { + ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); + + StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics(); + storeResponseStatistics.requestResponseTime = responseTime; + storeResponseStatistics.storeResult = storeResult; + storeResponseStatistics.requestOperationType = request.getOperationType(); + storeResponseStatistics.requestResourceType = request.getResourceType(); + + URI locationEndPoint = null; + if (request.requestContext.locationEndpointToRoute != null) { + try { + locationEndPoint = request.requestContext.locationEndpointToRoute.toURI(); + } catch (URISyntaxException e) { + throw new IllegalArgumentException(e); + } + } + + synchronized (this) { + if (responseTime.isAfter(this.requestEndTime)) { + this.requestEndTime = responseTime; + } + + if (locationEndPoint != null) { + this.regionsContacted.add(locationEndPoint); + } + + if (storeResponseStatistics.requestOperationType == OperationType.Head || + storeResponseStatistics.requestOperationType == OperationType.HeadFeed) { + this.supplementalResponseStatisticsList.add(storeResponseStatistics); + } else { + this.responseStatisticsList.add(storeResponseStatistics); + } + } + } + + String recordAddressResolutionStart(URI targetEndpoint) { + String identifier = Utils.randomUUID().toString(); + + AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics(); + resolutionStatistics.startTime = ZonedDateTime.now(ZoneOffset.UTC); + // Very far in the future + resolutionStatistics.endTime = ZonedDateTime.of(LocalDateTime.MAX, ZoneOffset.UTC); + resolutionStatistics.targetEndpoint = targetEndpoint == null ? "" : targetEndpoint.toString(); + + synchronized (this) { + this.addressResolutionStatistics.put(identifier, resolutionStatistics); + } + + return identifier; + } + + void recordAddressResolutionEnd(String identifier) { + if (StringUtils.isEmpty(identifier)) { + return; + } + ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); + + synchronized (this) { + if (!this.addressResolutionStatistics.containsKey(identifier)) { + throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start before calling end"); + } + + if (responseTime.isAfter(this.requestEndTime)) { + this.requestEndTime = responseTime; + } + + AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier); + resolutionStatistics.endTime = responseTime; + } + } + + @Override + public String toString() { + StringBuilder stringBuilder = new StringBuilder(); + + // need to lock in case of concurrent operations. this should be extremely rare since toString() + // should only be called at the end of request. + synchronized (this) { + + // first trace request start time, as well as total non-head/headfeed requests made. + stringBuilder.append("RequestStartTime: ") + .append("\"").append(this.requestStartTime.format(responseTimeFormatter)).append("\"") + .append(", ") + .append("RequestEndTime: ") + .append("\"").append(this.requestEndTime.format(responseTimeFormatter)).append("\"") + .append(", ") + .append("Duration: ") + .append(Duration.between(requestStartTime, requestEndTime).toMillis()) + .append(" ms, ") + .append("NUMBER of regions attempted: ") + .append(this.regionsContacted.isEmpty() ? 1 : this.regionsContacted.size()) + .append(System.lineSeparator()); + + // take all responses here - this should be limited in number and each one contains relevant information. + for (StoreResponseStatistics storeResponseStatistics : this.responseStatisticsList) { + stringBuilder.append(storeResponseStatistics.toString()).append(System.lineSeparator()); + } + + // take all responses here - this should be limited in number and each one is important. + for (AddressResolutionStatistics value : this.addressResolutionStatistics.values()) { + stringBuilder.append(value.toString()).append(System.lineSeparator()); + } + + // only take last 10 responses from this list - this has potential of having large number of entries. + // since this is for establishing consistency, we can make do with the last responses to paint a meaningful picture. + int supplementalResponseStatisticsListCount = this.supplementalResponseStatisticsList.size(); + int initialIndex = Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0); + if (initialIndex != 0) { + stringBuilder.append(" -- Displaying only the last ") + .append(MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING) + .append(" head/headfeed requests. Total head/headfeed requests: ") + .append(supplementalResponseStatisticsListCount); + } + for (int i = initialIndex; i < supplementalResponseStatisticsListCount; i++) { + stringBuilder.append(this.supplementalResponseStatisticsList.get(i).toString()).append(System.lineSeparator()); + } + } + String requestStatsString = stringBuilder.toString(); + if (!requestStatsString.isEmpty()) { + return System.lineSeparator() + requestStatsString; + } + return StringUtils.EMPTY; + } + + List getContactedReplicas() { + return contactedReplicas; + } + + void setContactedReplicas(List contactedReplicas) { + this.contactedReplicas = contactedReplicas; + } + + Set getFailedReplicas() { + return failedReplicas; + } + + void setFailedReplicas(Set failedReplicas) { + this.failedReplicas = failedReplicas; + } + + Set getRegionsContacted() { + return regionsContacted; + } + + void setRegionsContacted(Set regionsContacted) { + this.regionsContacted = regionsContacted; + } + + private static String formatDateTime(ZonedDateTime dateTime) { + if (dateTime == null) { + return null; + } + return dateTime.format(responseTimeFormatter); + } + + private class StoreResponseStatistics { + + private ZonedDateTime requestResponseTime; + private StoreResult storeResult; + private ResourceType requestResourceType; + private OperationType requestOperationType; + + @Override + public String toString() { + return "StoreResponseStatistics{" + + "requestResponseTime=\"" + formatDateTime(requestResponseTime) + "\"" + + ", storeResult=" + storeResult + + ", requestResourceType=" + requestResourceType + + ", requestOperationType=" + requestOperationType + + '}'; + } + } + + private class AddressResolutionStatistics { + private ZonedDateTime startTime; + private ZonedDateTime endTime; + private String targetEndpoint; + + AddressResolutionStatistics() { + } + + @Override + public String toString() { + return "AddressResolutionStatistics{" + + "startTime=\"" + formatDateTime(startTime) + "\"" + + ", endTime=\"" + formatDateTime(endTime) + "\"" + + ", targetEndpoint='" + targetEndpoint + '\'' + + '}'; + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CommonsBridgeInternal.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CommonsBridgeInternal.java new file mode 100644 index 0000000000000..00d1f014a5382 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CommonsBridgeInternal.java @@ -0,0 +1,70 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +public class CommonsBridgeInternal { + public static boolean isV2(PartitionKeyDefinition pkd) { + return pkd.version() != null && PartitionKeyDefinitionVersion.V2.val == pkd.version().val; + } + + public static void setV2(PartitionKeyDefinition pkd) { + pkd.version(PartitionKeyDefinitionVersion.V2); + } + + /** + * Gets the partitionKeyRangeId. + * + * @return the partitionKeyRangeId. + */ + public static String partitionKeyRangeIdInternal(FeedOptions options) { + return options.partitionKeyRangeIdInternal(); + } + + /** + * Gets the partitionKeyRangeId. + * + * @return the partitionKeyRangeId. + */ + public static String partitionKeyRangeIdInternal(ChangeFeedOptions options) { + return options.partitionKeyRangeId(); + } + + /** + * Sets the partitionKeyRangeId. + * + * @return the partitionKeyRangeId. + */ + public static FeedOptions partitionKeyRangeIdInternal(FeedOptions options, String partitionKeyRangeId) { + return options.partitionKeyRangeIdInternal(partitionKeyRangeId); + } + + /** + * Sets the partitionKeyRangeId. + * + * @return the partitionKeyRangeId. + */ + public static ChangeFeedOptions partitionKeyRangeIdInternal(ChangeFeedOptions options, String partitionKeyRangeId) { + return options.partitionKeyRangeId(partitionKeyRangeId); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CompositePath.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CompositePath.java new file mode 100644 index 0000000000000..d20e46b4128c1 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CompositePath.java @@ -0,0 +1,106 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; +import org.apache.commons.lang3.StringUtils; + +/** + * Represents a composite path of the IndexingPolicy in the Azure Cosmos DB database service. + * A composite path is used in a composite index. For example if you want to run a query like + * "SELECT * FROM c ORDER BY c.age, c.height", then you need to add "/age" and "/height" + * as composite paths to your composite index. + */ +public class CompositePath extends JsonSerializable { + /** + * Constructor. + */ + public CompositePath() { + super(); + } + + /** + * Constructor. + * + * @param jsonString the json string that represents the included path. + */ + public CompositePath(String jsonString) { + super(jsonString); + } + + /** + * Gets path. + * + * @return the path. + */ + public String path() { + return super.getString(Constants.Properties.PATH); + } + + /** + * Sets path. + * + * @param path the path. + * @return the CompositePath. + */ + public CompositePath path(String path) { + super.set(Constants.Properties.PATH, path); + return this; + } + + /** + * Gets the sort order for the composite path. + * + * For example if you want to run the query "SELECT * FROM c ORDER BY c.age asc, c.height desc", + * then you need to make the order for "/age" "ascending" and the order for "/height" "descending". + * + * @return the sort order. + */ + public CompositePathSortOrder order() { + String strValue = super.getString(Constants.Properties.ORDER); + if (!StringUtils.isEmpty(strValue)) { + try { + return CompositePathSortOrder.valueOf(StringUtils.upperCase(super.getString(Constants.Properties.ORDER))); + } catch (IllegalArgumentException e) { + this.getLogger().warn("INVALID indexingMode value {}.", super.getString(Constants.Properties.ORDER)); + return CompositePathSortOrder.ASCENDING; + } + } + return CompositePathSortOrder.ASCENDING; + } + + /** + * Gets the sort order for the composite path. + * + * For example if you want to run the query "SELECT * FROM c ORDER BY c.age asc, c.height desc", + * then you need to make the order for "/age" "ascending" and the order for "/height" "descending". + * + * @param order the sort order. + * @return the CompositePath. + */ + public CompositePath order(CompositePathSortOrder order) { + super.set(Constants.Properties.ORDER, order.toString()); + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CompositePathSortOrder.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CompositePathSortOrder.java new file mode 100644 index 0000000000000..7440421065c5d --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CompositePathSortOrder.java @@ -0,0 +1,48 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +/** + * Represents the sorting order for a path in a composite index, for a + * collection in the Azure Cosmos DB database service. + */ +public enum CompositePathSortOrder { + /** + * ASCENDING sort order for composite paths. + */ + ASCENDING { + public String toString() { + return "ascending"; + } + }, + + /** + * DESCENDING sort order for composite paths. + */ + DESCENDING { + public String toString() { + return "descending"; + } + }, +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ConflictException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ConflictException.java new file mode 100644 index 0000000000000..a7bbd09d2ab37 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ConflictException.java @@ -0,0 +1,80 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.http.HttpHeaders; + +import java.util.Map; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class ConflictException extends CosmosClientException { + + private static final long serialVersionUID = 1L; + + ConflictException() { + this(RMResources.EntityAlreadyExists); + } + + public ConflictException(CosmosError cosmosError, long lsn, String partitionKeyRangeId, Map responseHeaders) { + super(HttpConstants.StatusCodes.CONFLICT, cosmosError, responseHeaders); + BridgeInternal.setLSN(this, lsn); + BridgeInternal.setPartitionKeyRangeId(this, partitionKeyRangeId); + } + + ConflictException(String msg) { + super(HttpConstants.StatusCodes.CONFLICT, msg); + } + + ConflictException(String msg, String resourceAddress) { + super(msg, null, null, HttpConstants.StatusCodes.CONFLICT, resourceAddress); + } + + public ConflictException(String message, HttpHeaders headers, String requestUriString) { + this(message, null, headers, requestUriString); + } + + ConflictException(Exception innerException) { + this(RMResources.EntityAlreadyExists, innerException, null, null); + } + + ConflictException(CosmosError cosmosError, Map headers) { + super(HttpConstants.StatusCodes.CONFLICT, cosmosError, headers); + } + + ConflictException(String message, + Exception innerException, + HttpHeaders headers, + String requestUriString) { + super(String.format("%s: %s", RMResources.EntityAlreadyExists, message), + innerException, + HttpUtils.asMap(headers), + HttpConstants.StatusCodes.CONFLICT, + requestUriString); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ConflictResolutionMode.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ConflictResolutionMode.java new file mode 100644 index 0000000000000..5854e31f17e98 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ConflictResolutionMode.java @@ -0,0 +1,63 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Conflict; +import com.azure.data.cosmos.internal.StoredProcedure; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.text.WordUtils; + +public enum ConflictResolutionMode { + /** + * Last writer wins conflict resolution mode + * + * Setting the ConflictResolutionMode to "LAST_WRITER_WINS" indicates that conflict resolution should be done by inspecting a field in the conflicting documents + * and picking the document which has the higher value in that path. See {@link ConflictResolutionPolicy#conflictResolutionPath()} for details on how to specify the path + * to be checked for conflict resolution. Also note that Deletes win. + */ + LAST_WRITER_WINS, + + /** + * CUSTOM conflict resolution mode + * + * Setting the ConflictResolutionMode to "CUSTOM" indicates that conflict resolution is custom handled by a user. + * The user could elect to register a user specified {@link StoredProcedure} for handling conflicting resources. + * Should the user not register a user specified StoredProcedure, conflicts will default to being made available as {@link Conflict} resources, + * which the user can inspect and manually resolve. + * See {@link ConflictResolutionPolicy#conflictResolutionProcedure()} for details on how to specify the stored procedure + * to run for conflict resolution. + */ + CUSTOM, + + /** + * INVALID or unknown mode. + */ + INVALID; + + @Override + public String toString() { + return StringUtils.remove(WordUtils.capitalizeFully(this.name(), '_'), '_'); + } +} + diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ConflictResolutionPolicy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ConflictResolutionPolicy.java new file mode 100644 index 0000000000000..9f862e8ed9a16 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ConflictResolutionPolicy.java @@ -0,0 +1,225 @@ +package com.azure.data.cosmos; + + +import com.azure.data.cosmos.internal.Constants; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.internal.StoredProcedure; +import com.azure.data.cosmos.internal.Strings; + + +/** + * Represents the conflict resolution policy configuration for specifying how to resolve conflicts + * in case writes from different regions result in conflicts on documents in the collection in the Azure Cosmos DB service. + * + * A collection with custom conflict resolution with no user-registered stored procedure. + *

{@code
+ * DocumentCollection collectionSpec = new DocumentCollection();
+ * collectionSpec.id("Multi-master collection");
+ *
+ * ConflictResolutionPolicy policy = ConflictResolutionPolicy.createCustomPolicy();
+ * collectionSpec.conflictResolutionPolicy(policy);
+ *
+ * DocumentCollection collection = client.createCollection(databaseLink, collectionSpec, null)
+ *         .toBlocking().single().getResource();
+ *
+ * }
+ * 
+ * + * A collection with custom conflict resolution with a user-registered stored procedure. + *
{@code
+ * DocumentCollection collectionSpec = new DocumentCollection();
+ * collectionSpec.id("Multi-master collection");
+ *
+ * ConflictResolutionPolicy policy = ConflictResolutionPolicy.createCustomPolicy(conflictResolutionSprocName);
+ * collectionSpec.conflictResolutionPolicy(policy);
+ *
+ * DocumentCollection collection = client.createCollection(databaseLink, collectionSpec, null)
+ *         .toBlocking().single().getResource();
+ *
+ * }
+ * 
+ * + * A collection with last writer wins conflict resolution, based on a path in the conflicting documents. + * A collection with custom conflict resolution with a user-registered stored procedure. + *
{@code
+ * DocumentCollection collectionSpec = new DocumentCollection();
+ * collectionSpec.id("Multi-master collection");
+ *
+ * ConflictResolutionPolicy policy = ConflictResolutionPolicy.createLastWriterWinsPolicy("/path/for/conflict/resolution");
+ * collectionSpec.conflictResolutionPolicy(policy);
+ *
+ * DocumentCollection collection = client.createCollection(databaseLink, collectionSpec, null)
+ *         .toBlocking().single().getResource();
+ *
+ * }
+ * 
+ */ +public class ConflictResolutionPolicy extends JsonSerializable { + + /** + * Creates a LAST_WRITER_WINS {@link ConflictResolutionPolicy} with "/_ts" as the resolution path. + * + * In case of a conflict occurring on a document, the document with the higher integer value in the default path + * {@link Resource#timestamp()}, i.e., "/_ts" will be used. + * + * @return ConflictResolutionPolicy. + */ + public static ConflictResolutionPolicy createLastWriterWinsPolicy() { + ConflictResolutionPolicy policy = new ConflictResolutionPolicy(); + policy.mode(ConflictResolutionMode.LAST_WRITER_WINS); + return policy; + } + + /** + * + * Creates a LAST_WRITER_WINS {@link ConflictResolutionPolicy} with path as the resolution path. + * + * The specified path must be present in each document and must be an integer value. + * In case of a conflict occurring on a document, the document with the higher integer value in the specified path + * will be picked. + * + * @param conflictResolutionPath The path to check values for last-writer wins conflict resolution. + * That path is a rooted path of the property in the document, such as "/name/first". + * @return ConflictResolutionPolicy. + */ + public static ConflictResolutionPolicy createLastWriterWinsPolicy(String conflictResolutionPath) { + ConflictResolutionPolicy policy = new ConflictResolutionPolicy(); + policy.mode(ConflictResolutionMode.LAST_WRITER_WINS); + if (conflictResolutionPath != null) { + policy.conflictResolutionPath(conflictResolutionPath); + } + return policy; + } + + /** + * Creates a CUSTOM {@link ConflictResolutionPolicy} which uses the specified stored procedure + * to perform conflict resolution + * + * This stored procedure may be created after the {@link DocumentCollection} is created and can be changed as required. + * + *
    + *
  • In case the stored procedure fails or throws an exception, + * the conflict resolution will default to registering conflicts in the conflicts feed
  • + *
  • The user can provide the stored procedure @see {@link Resource#id()}
  • + *
+ * @param conflictResolutionSprocName stored procedure to perform conflict resolution. + * @return ConflictResolutionPolicy. + */ + public static ConflictResolutionPolicy createCustomPolicy(String conflictResolutionSprocName) { + ConflictResolutionPolicy policy = new ConflictResolutionPolicy(); + policy.mode(ConflictResolutionMode.CUSTOM); + if (conflictResolutionSprocName != null) { + policy.conflictResolutionProcedure(conflictResolutionSprocName); + } + return policy; + } + + /** + * Creates a CUSTOM {@link ConflictResolutionPolicy} without any {@link StoredProcedure}. User manually + * should resolve conflicts. + * + * The conflicts will be registered in the conflicts feed and the user should manually resolve them. + * + * @return ConflictResolutionPolicy. + */ + public static ConflictResolutionPolicy createCustomPolicy() { + ConflictResolutionPolicy policy = new ConflictResolutionPolicy(); + policy.mode(ConflictResolutionMode.CUSTOM); + return policy; + } + + /** + * Initializes a new instance of the {@link ConflictResolutionPolicy} class for the Azure Cosmos DB service. + */ + ConflictResolutionPolicy() {} + + public ConflictResolutionPolicy(String jsonString) { + super(jsonString); + } + + /** + * Gets the {@link ConflictResolutionMode} in the Azure Cosmos DB service. + * By default it is {@link ConflictResolutionMode#LAST_WRITER_WINS}. + * + * @return ConflictResolutionMode. + */ + public ConflictResolutionMode mode() { + + String strValue = super.getString(Constants.Properties.MODE); + + if (!Strings.isNullOrEmpty(strValue)) { + try { + return ConflictResolutionMode.valueOf(Strings.fromCamelCaseToUpperCase(super.getString(Constants.Properties.MODE))); + } catch (IllegalArgumentException e) { + this.getLogger().warn("INVALID ConflictResolutionMode value {}.", super.getString(Constants.Properties.MODE)); + return ConflictResolutionMode.INVALID; + } + } + + return ConflictResolutionMode.INVALID; + } + + /** + * Sets the {@link ConflictResolutionMode} in the Azure Cosmos DB service. + * By default it is {@link ConflictResolutionMode#LAST_WRITER_WINS}. + * + * @param mode One of the values of the {@link ConflictResolutionMode} enum. + */ + ConflictResolutionPolicy mode(ConflictResolutionMode mode) { + super.set(Constants.Properties.MODE, mode.toString()); + return this; + } + + /** + * Gets the path which is present in each document in the Azure Cosmos DB service for last writer wins conflict-resolution. + * This path must be present in each document and must be an integer value. + * In case of a conflict occurring on a document, the document with the higher integer value in the specified path will be picked. + * If the path is unspecified, by default the {@link Resource#timestamp()} path will be used. + * + * This value should only be set when using {@link ConflictResolutionMode#LAST_WRITER_WINS} + * + * @return The path to check values for last-writer wins conflict resolution. + * That path is a rooted path of the property in the document, such as "/name/first". + */ + public String conflictResolutionPath() { + return super.getString(Constants.Properties.CONFLICT_RESOLUTION_PATH); + } + + /** + * Sets the path which is present in each document in the Azure Cosmos DB service for last writer wins conflict-resolution. + * This path must be present in each document and must be an integer value. + * In case of a conflict occurring on a document, the document with the higher integer value in the specified path will be picked. + * If the path is unspecified, by default the {@link Resource#timestamp()} path will be used. + * + * This value should only be set when using {@link ConflictResolutionMode#LAST_WRITER_WINS} + * + * @param value The path to check values for last-writer wins conflict resolution. + * That path is a rooted path of the property in the document, such as "/name/first". + */ + ConflictResolutionPolicy conflictResolutionPath(String value) { + super.set(Constants.Properties.CONFLICT_RESOLUTION_PATH, value); + return this; + } + + /** + * Gets the {@link StoredProcedure} which is used for conflict resolution in the Azure Cosmos DB service. + * This stored procedure may be created after the {@link DocumentCollection} is created and can be changed as required. + * + *
    + *
  • This value should only be set when using {@link ConflictResolutionMode#CUSTOM}
  • + *
  • In case the stored procedure fails or throws an exception, + * the conflict resolution will default to registering conflicts in the conflicts feed
  • + *
  • The user can provide the stored procedure @see {@link Resource#id()}
  • + *
+ ** + * @return the stored procedure to perform conflict resolution.] + */ + public String conflictResolutionProcedure() { + return super.getString(Constants.Properties.CONFLICT_RESOLUTION_PROCEDURE); + } + + ConflictResolutionPolicy conflictResolutionProcedure(String value) { + super.set(Constants.Properties.CONFLICT_RESOLUTION_PROCEDURE, value); + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ConnectionMode.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ConnectionMode.java new file mode 100644 index 0000000000000..cc6d367d0af50 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ConnectionMode.java @@ -0,0 +1,57 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +/** + * Represents the connection mode to be used by the client in the Azure Cosmos DB database service. + *

+ * DIRECT and GATEWAY connectivity modes are supported. GATEWAY is the default. + * Refer to <see>http://azure.microsoft.com/documentation/articles/documentdb- + * interactions-with-resources/#connectivity-options</see> for additional + * details. + *

+ */ +public enum ConnectionMode { + + /** + * Specifies that requests to server resources are made through a gateway proxy using HTTPS. + *

+ * In GATEWAY mode, all requests are made through a gateway proxy. + *

+ */ + GATEWAY, + + /** + * Specifies that requests to server resources are made directly to the data nodes. + *

+ * In DIRECT mode, all requests to server resources within a collection, such as documents, stored procedures + * and user-defined functions, etc., are made directly to the data nodes within the target Cosmos DB cluster + * using either the HTTPS or TCP/SSL transport protocol. + *

+ * Certain operations on account or database level resources, such as databases, collections and users, etc., + * are always routed through the gateway using HTTPS. + *

+ */ + DIRECT +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ConnectionPolicy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ConnectionPolicy.java new file mode 100644 index 0000000000000..524d304acfdfa --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ConnectionPolicy.java @@ -0,0 +1,380 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import java.net.InetSocketAddress; +import java.util.Collections; +import java.util.List; + +/** + * Represents the Connection policy associated with a DocumentClient in the Azure Cosmos DB database service. + */ +public final class ConnectionPolicy { + + private static final int DEFAULT_REQUEST_TIMEOUT_IN_MILLIS = 60 * 1000; + // defaultMediaRequestTimeout is based upon the blob client timeout and the + // retry policy. + private static final int DEFAULT_MEDIA_REQUEST_TIMEOUT_IN_MILLIS = 300 * 1000; + private static final int DEFAULT_IDLE_CONNECTION_TIMEOUT_IN_MILLIS = 60 * 1000; + + private static final int DEFAULT_MAX_POOL_SIZE = 1000; + + private static ConnectionPolicy default_policy = null; + private int requestTimeoutInMillis; + private int mediaRequestTimeoutInMillis; + private ConnectionMode connectionMode; + private int maxPoolSize; + private int idleConnectionTimeoutInMillis; + private String userAgentSuffix; + private RetryOptions retryOptions; + private boolean enableEndpointDiscovery = true; + private List preferredLocations; + private boolean usingMultipleWriteLocations; + private InetSocketAddress inetSocketProxyAddress; + private Boolean enableReadRequestsFallback; + + /** + * Constructor. + */ + public ConnectionPolicy() { + this.connectionMode = ConnectionMode.GATEWAY; + this.enableReadRequestsFallback = null; + this.idleConnectionTimeoutInMillis = DEFAULT_IDLE_CONNECTION_TIMEOUT_IN_MILLIS; + this.maxPoolSize = DEFAULT_MAX_POOL_SIZE; + this.mediaRequestTimeoutInMillis = ConnectionPolicy.DEFAULT_MEDIA_REQUEST_TIMEOUT_IN_MILLIS; + this.requestTimeoutInMillis = ConnectionPolicy.DEFAULT_REQUEST_TIMEOUT_IN_MILLIS; + this.retryOptions = new RetryOptions(); + this.userAgentSuffix = ""; + } + + /** + * Gets the default connection policy. + * + * @return the default connection policy. + */ + public static ConnectionPolicy defaultPolicy() { + if (ConnectionPolicy.default_policy == null) { + ConnectionPolicy.default_policy = new ConnectionPolicy(); + } + return ConnectionPolicy.default_policy; + } + + /** + * Gets the request timeout (time to wait for response from network peer) in + * milliseconds. + * + * @return the request timeout in milliseconds. + */ + public int requestTimeoutInMillis() { + return this.requestTimeoutInMillis; + } + + /** + * Sets the request timeout (time to wait for response from network peer) in + * milliseconds. The default is 60 seconds. + * + * @param requestTimeoutInMillis the request timeout in milliseconds. + * @return the ConnectionPolicy. + */ + public ConnectionPolicy requestTimeoutInMillis(int requestTimeoutInMillis) { + this.requestTimeoutInMillis = requestTimeoutInMillis; + return this; + } + + /** + * Gets the connection mode used in the client. + * + * @return the connection mode. + */ + public ConnectionMode connectionMode() { + return this.connectionMode; + } + + /** + * Sets the connection mode used in the client. + * + * @param connectionMode the connection mode. + * @return the ConnectionPolicy. + */ + public ConnectionPolicy connectionMode(ConnectionMode connectionMode) { + this.connectionMode = connectionMode; + return this; + } + + /** + * Gets the value of the connection pool size the client is using. + * + * @return connection pool size. + */ + public int maxPoolSize() { + return this.maxPoolSize; + } + + /** + * Sets the value of the connection pool size, the default + * is 1000. + * + * @param maxPoolSize The value of the connection pool size. + * @return the ConnectionPolicy. + */ + public ConnectionPolicy maxPoolSize(int maxPoolSize) { + this.maxPoolSize = maxPoolSize; + return this; + } + + /** + * Gets the value of the timeout for an idle connection, the default is 60 + * seconds. + * + * @return Idle connection timeout. + */ + public int idleConnectionTimeoutInMillis() { + return this.idleConnectionTimeoutInMillis; + } + + /** + * sets the value of the timeout for an idle connection. After that time, + * the connection will be automatically closed. + * + * @param idleConnectionTimeoutInMillis the timeout for an idle connection in seconds. + * @return the ConnectionPolicy. + */ + public ConnectionPolicy idleConnectionTimeoutInMillis(int idleConnectionTimeoutInMillis) { + this.idleConnectionTimeoutInMillis = idleConnectionTimeoutInMillis; + return this; + } + + /** + * Gets the value of user-agent suffix. + * + * @return the value of user-agent suffix. + */ + public String userAgentSuffix() { + return this.userAgentSuffix; + } + + /** + * sets the value of the user-agent suffix. + * + * @param userAgentSuffix The value to be appended to the user-agent header, this is + * used for monitoring purposes. + * @return the ConnectionPolicy. + */ + public ConnectionPolicy userAgentSuffix(String userAgentSuffix) { + this.userAgentSuffix = userAgentSuffix; + return this; + } + + /** + * Gets the retry policy options associated with the DocumentClient instance. + * + * @return the RetryOptions instance. + */ + public RetryOptions retryOptions() { + return this.retryOptions; + } + + /** + * Sets the retry policy options associated with the DocumentClient instance. + *

+ * Properties in the RetryOptions class allow application to customize the built-in + * retry policies. This property is optional. When it's not set, the SDK uses the + * default values for configuring the retry policies. See RetryOptions class for + * more details. + * + * @param retryOptions the RetryOptions instance. + * @return the ConnectionPolicy. + */ + public ConnectionPolicy retryOptions(RetryOptions retryOptions) { + if (retryOptions == null) { + throw new IllegalArgumentException("retryOptions value must not be null."); + } + + this.retryOptions = retryOptions; + return this; + } + + /** + * Gets the flag to enable endpoint discovery for geo-replicated database accounts. + * + * @return whether endpoint discovery is enabled. + */ + public boolean enableEndpointDiscovery() { + return this.enableEndpointDiscovery; + } + + /** + * Sets the flag to enable endpoint discovery for geo-replicated database accounts. + *

+ * When EnableEndpointDiscovery is true, the SDK will automatically discover the + * current write and read regions to ensure requests are sent to the correct region + * based on the capability of the region and the user's preference. + *

+ * The default value for this property is true indicating endpoint discovery is enabled. + * + * @param enableEndpointDiscovery true if EndpointDiscovery is enabled. + * @return the ConnectionPolicy. + */ + public ConnectionPolicy enableEndpointDiscovery(boolean enableEndpointDiscovery) { + this.enableEndpointDiscovery = enableEndpointDiscovery; + return this; + } + + /** + * Gets the flag to enable writes on any locations (regions) for geo-replicated database accounts in the Azure Cosmos DB service. + * + * When the value of this property is true, the SDK will direct write operations to + * available writable locations of geo-replicated database account. Writable locations + * are ordered by PreferredLocations property. Setting the property value + * to true has no effect until EnableMultipleWriteLocations in DatabaseAccount + * is also set to true. + * + * DEFAULT value is false indicating that writes are only directed to + * first region in PreferredLocations property. + * + * @return flag to enable writes on any locations (regions) for geo-replicated database accounts. + */ + public boolean usingMultipleWriteLocations() { + return this.usingMultipleWriteLocations; + } + + /** + * Gets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service. + * + * DEFAULT value is null. + * + * If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness, + * The default is false for Bounded Staleness. + * 1. {@link #enableEndpointDiscovery} is true + * 2. the Azure Cosmos DB account has more than one region + * + * @return flag to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service. + */ + public Boolean enableReadRequestsFallback() { + return this.enableReadRequestsFallback; + } + + /** + * Sets the flag to enable writes on any locations (regions) for geo-replicated database accounts in the Azure Cosmos DB service. + * + * When the value of this property is true, the SDK will direct write operations to + * available writable locations of geo-replicated database account. Writable locations + * are ordered by PreferredLocations property. Setting the property value + * to true has no effect until EnableMultipleWriteLocations in DatabaseAccount + * is also set to true. + * + * DEFAULT value is false indicating that writes are only directed to + * first region in PreferredLocations property. + * + * @param usingMultipleWriteLocations flag to enable writes on any locations (regions) for geo-replicated database accounts. + * @return the ConnectionPolicy. + */ + public ConnectionPolicy usingMultipleWriteLocations(boolean usingMultipleWriteLocations) { + this.usingMultipleWriteLocations = usingMultipleWriteLocations; + return this; + } + + /** + * Sets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service. + * + * DEFAULT value is null. + * + * If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness, + * The default is false for Bounded Staleness. + * 1. {@link #enableEndpointDiscovery} is true + * 2. the Azure Cosmos DB account has more than one region + * + * @param enableReadRequestsFallback flag to enable reads to go to multiple regions configured on an account of Azure Cosmos DB service. + * @return the ConnectionPolicy. + */ + public ConnectionPolicy enableReadRequestsFallback(Boolean enableReadRequestsFallback) { + this.enableReadRequestsFallback = enableReadRequestsFallback; + return this; + } + + /** + * Gets the preferred locations for geo-replicated database accounts + * + * @return the list of preferred location. + */ + public List preferredLocations() { + return this.preferredLocations != null ? preferredLocations : Collections.emptyList(); + } + + /** + * Sets the preferred locations for geo-replicated database accounts. For example, + * "East US" as the preferred location. + *

+ * When EnableEndpointDiscovery is true and PreferredRegions is non-empty, + * the SDK will prefer to use the locations in the collection in the order + * they are specified to perform operations. + *

+ * If EnableEndpointDiscovery is set to false, this property is ignored. + * + * @param preferredLocations the list of preferred locations. + * @return the ConnectionPolicy. + */ + public ConnectionPolicy preferredLocations(List preferredLocations) { + this.preferredLocations = preferredLocations; + return this; + } + + /** + * Gets the InetSocketAddress of proxy server. + * + * @return the value of proxyHost. + */ + public InetSocketAddress proxy() { + return this.inetSocketProxyAddress; + } + + /** + * This will create the InetSocketAddress for proxy server, + * all the requests to cosmoDB will route from this address. + * @param proxyHost The proxy server host. + * @param proxyPort The proxy server port. + * @return the ConnectionPolicy. + */ + public ConnectionPolicy proxy(String proxyHost, int proxyPort) { + this.inetSocketProxyAddress = new InetSocketAddress(proxyHost, proxyPort); + return this; + } + + @Override + public String toString() { + return "ConnectionPolicy{" + + "requestTimeoutInMillis=" + requestTimeoutInMillis + + ", mediaRequestTimeoutInMillis=" + mediaRequestTimeoutInMillis + + ", connectionMode=" + connectionMode + + ", maxPoolSize=" + maxPoolSize + + ", idleConnectionTimeoutInMillis=" + idleConnectionTimeoutInMillis + + ", userAgentSuffix='" + userAgentSuffix + '\'' + + ", retryOptions=" + retryOptions + + ", enableEndpointDiscovery=" + enableEndpointDiscovery + + ", preferredLocations=" + preferredLocations + + ", usingMultipleWriteLocations=" + usingMultipleWriteLocations + + ", inetSocketProxyAddress=" + inetSocketProxyAddress + + '}'; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ConsistencyLevel.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ConsistencyLevel.java new file mode 100644 index 0000000000000..e64a5ef9976ec --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ConsistencyLevel.java @@ -0,0 +1,71 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.text.WordUtils; + +/** + * Represents the consistency levels supported for Cosmos DB client operations in the Azure Cosmos DB database service. + *

+ * The requested ConsistencyLevel must match or be weaker than that provisioned for the database account. Consistency + * levels by order of strength are STRONG, BOUNDED_STALENESS, SESSION and EVENTUAL. + */ +public enum ConsistencyLevel { + + /** + * STRONG Consistency guarantees that read operations always return the value that was last written. + */ + STRONG, + + /** + * Bounded Staleness guarantees that reads are not too out-of-date. This can be configured based on number of + * operations (MaxStalenessPrefix) or time (MaxStalenessIntervalInSeconds) + */ + BOUNDED_STALENESS, + + /** + * SESSION Consistency guarantees monotonic reads (you never read old data, then new, then old again), monotonic + * writes (writes are ordered) and read your writes (your writes are immediately visible to your reads) within + * any single session. + */ + SESSION, + + /** + * EVENTUAL Consistency guarantees that reads will return a subset of writes. ALL writes will be eventually be + * available for reads. + */ + EVENTUAL, + + /** + * CONSISTENT_PREFIX Consistency guarantees that reads will return some prefix of all writes with no gaps. ALL writes + * will be eventually be available for reads. + */ + CONSISTENT_PREFIX; + + @Override + public String toString() { + return StringUtils.remove(WordUtils.capitalizeFully(this.name(), '_'), '_'); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ConsistencyPolicy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ConsistencyPolicy.java new file mode 100644 index 0000000000000..2af95869c1db4 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ConsistencyPolicy.java @@ -0,0 +1,134 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + + +import com.azure.data.cosmos.internal.Constants; +import org.apache.commons.lang3.StringUtils; + +/** + * Encapsulates the properties for consistency policy in the Azure Cosmos DB database service. + */ +public final class ConsistencyPolicy extends JsonSerializable { + private static final ConsistencyLevel DEFAULT_DEFAULT_CONSISTENCY_LEVEL = + ConsistencyLevel.SESSION; + + private static final int DEFAULT_MAX_STALENESS_INTERVAL = 5; + private static final int DEFAULT_MAX_STALENESS_PREFIX = 100; + + + /** + * Constructor. + */ + public ConsistencyPolicy() { + } + + /** + * Constructor. + * + * @param jsonString the json string that represents the consistency policy. + */ + ConsistencyPolicy(String jsonString) { + super(jsonString); + } + + /** + * Get the name of the resource. + * + * @return the default consistency level. + */ + public ConsistencyLevel defaultConsistencyLevel() { + + ConsistencyLevel result = ConsistencyPolicy.DEFAULT_DEFAULT_CONSISTENCY_LEVEL; + try { + result = ConsistencyLevel.valueOf( + StringUtils.upperCase(super.getString(Constants.Properties.DEFAULT_CONSISTENCY_LEVEL))); + } catch (IllegalArgumentException e) { + // ignore the exception and return the default + this.getLogger().warn("Unknown consistency level {}, value ignored.", super.getString(Constants.Properties.DEFAULT_CONSISTENCY_LEVEL)); + } + return result; + } + + /** + * Set the name of the resource. + * + * @param level the consistency level. + * @return the ConsistenctPolicy. + */ + public ConsistencyPolicy defaultConsistencyLevel(ConsistencyLevel level) { + super.set(Constants.Properties.DEFAULT_CONSISTENCY_LEVEL, level.toString()); + return this; + } + + /** + * Gets the bounded staleness consistency, the maximum allowed staleness in terms difference in sequence numbers + * (aka version). + * + * @return the max staleness prefix. + */ + public int maxStalenessPrefix() { + Integer value = super.getInt(Constants.Properties.MAX_STALENESS_PREFIX); + if (value == null) { + return ConsistencyPolicy.DEFAULT_MAX_STALENESS_PREFIX; + } + return value; + } + + /** + * Sets the bounded staleness consistency, the maximum allowed staleness in terms difference in sequence numbers + * (aka version). + * + * @param maxStalenessPrefix the max staleness prefix. + * @return the ConsistenctPolicy. + */ + public ConsistencyPolicy maxStalenessPrefix(int maxStalenessPrefix) { + super.set(Constants.Properties.MAX_STALENESS_PREFIX, maxStalenessPrefix); + return this; + } + + /** + * Gets the in bounded staleness consistency, the maximum allowed staleness in terms time interval. + * + * @return the max staleness prefix. + */ + public int maxStalenessIntervalInSeconds() { + Integer value = super.getInt(Constants.Properties.MAX_STALENESS_INTERVAL_IN_SECONDS); + if (value == null) { + return ConsistencyPolicy.DEFAULT_MAX_STALENESS_INTERVAL; + } + return value; + } + + /** + * Sets the in bounded staleness consistency, the maximum allowed staleness in terms time interval. + * + * @param maxStalenessIntervalInSeconds the max staleness interval in seconds. + * @return the ConsistenctPolicy. + */ + public ConsistencyPolicy maxStalenessIntervalInSeconds(int maxStalenessIntervalInSeconds) { + super.set(Constants.Properties.MAX_STALENESS_INTERVAL_IN_SECONDS, maxStalenessIntervalInSeconds); + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosBridgeInternal.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosBridgeInternal.java new file mode 100644 index 0000000000000..fab004759731b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosBridgeInternal.java @@ -0,0 +1,40 @@ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.internal.DatabaseAccount; +import com.azure.data.cosmos.internal.DocumentCollection; +import reactor.core.publisher.Mono; + +/** + * DO NOT USE. For internal use only by the SDK. These methods might break at any time. No support will be provided. + */ +public class CosmosBridgeInternal { + + public static DocumentCollection toDocumentCollection(CosmosContainerProperties cosmosContainerProperties) { + return new DocumentCollection(cosmosContainerProperties.toJson()); + } + + public static AsyncDocumentClient getAsyncDocumentClient(CosmosClient client) { + return client.getDocClientWrapper(); + } + + public static CosmosDatabase getCosmosDatabaseWithNewClient(CosmosDatabase cosmosDatabase, CosmosClient client) { + return new CosmosDatabase(cosmosDatabase.id(), client); + } + + public static CosmosContainer getCosmosContainerWithNewClient(CosmosContainer cosmosContainer, CosmosDatabase cosmosDatabase, CosmosClient client) { + return new CosmosContainer(cosmosContainer.id(), CosmosBridgeInternal.getCosmosDatabaseWithNewClient(cosmosDatabase, client)); + } + + public static Mono getDatabaseAccount(CosmosClient client) { + return client.getDatabaseAccount(); + } + + public static AsyncDocumentClient getContextClient(CosmosDatabase database) { + return database.getClient().getContextClient(); + } + + public static AsyncDocumentClient getContextClient(CosmosContainer container) { + return container.getDatabase().getClient().getContextClient(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosClient.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosClient.java new file mode 100644 index 0000000000000..049d4fc01dfed --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosClient.java @@ -0,0 +1,380 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.internal.Configs; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.DatabaseAccount; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.Permission; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.util.List; + +/** + * Provides a client-side logical representation of the Azure Cosmos database service. + * This asynchronous client is used to configure and execute requests + * against the service. + */ +public class CosmosClient implements AutoCloseable { + + //Document client wrapper + private final Configs configs; + private final AsyncDocumentClient asyncDocumentClient; + private final String serviceEndpoint; + private final String keyOrResourceToken; + private final ConnectionPolicy connectionPolicy; + private final ConsistencyLevel desiredConsistencyLevel; + private final List permissions; + private final TokenResolver tokenResolver; + + + CosmosClient(CosmosClientBuilder builder) { + this.configs = builder.configs(); + this.serviceEndpoint = builder.endpoint(); + this.keyOrResourceToken = builder.key(); + this.connectionPolicy = builder.connectionPolicy(); + this.desiredConsistencyLevel = builder.consistencyLevel(); + this.permissions = builder.permissions(); + this.tokenResolver = builder.tokenResolver(); + this.asyncDocumentClient = new AsyncDocumentClient.Builder() + .withServiceEndpoint(this.serviceEndpoint) + .withMasterKeyOrResourceToken(this.keyOrResourceToken) + .withConnectionPolicy(this.connectionPolicy) + .withConsistencyLevel(this.desiredConsistencyLevel) + .withConfigs(this.configs) + .withTokenResolver(this.tokenResolver) + .build(); + } + + AsyncDocumentClient getContextClient() { + return this.asyncDocumentClient; + } + + /** + * Instantiate the cosmos client builder to build cosmos client + * @return {@link CosmosClientBuilder} + */ + public static CosmosClientBuilder builder(){ + return new CosmosClientBuilder(); + } + + /** + * Get the service endpoint + * @return the service endpoint + */ + String getServiceEndpoint() { + return serviceEndpoint; + } + + /** + * Gets the key or resource token + * @return get the key or resource token + */ + String getKeyOrResourceToken() { + return keyOrResourceToken; + } + + /** + * Get the connection policy + * @return {@link ConnectionPolicy} + */ + ConnectionPolicy getConnectionPolicy() { + return connectionPolicy; + } + + /** + * Gets the consistency level + * @return the (@link ConsistencyLevel) + */ + ConsistencyLevel getDesiredConsistencyLevel() { + return desiredConsistencyLevel; + } + + /** + * Gets the permission list + * @return the permission list + */ + List getPermissions() { + return permissions; + } + + AsyncDocumentClient getDocClientWrapper(){ + return asyncDocumentClient; + } + + /** + * Gets the configs + * @return the configs + */ + Configs getConfigs() { + return configs; + } + + /** + * Gets the token resolver + * @return the token resolver + */ + TokenResolver getTokenResolver() { + return tokenResolver; + } + + /** + * CREATE a Database if it does not already exist on the service + * + * The {@link Mono} upon successful completion will contain a single cosmos database response with the + * created or existing database. + * @param databaseSettings CosmosDatabaseProperties + * @return a {@link Mono} containing the cosmos database response with the created or existing database or + * an error. + */ + public Mono createDatabaseIfNotExists(CosmosDatabaseProperties databaseSettings) { + return createDatabaseIfNotExistsInternal(getDatabase(databaseSettings.id())); + } + + /** + * CREATE a Database if it does not already exist on the service + * The {@link Mono} upon successful completion will contain a single cosmos database response with the + * created or existing database. + * @param id the id of the database + * @return a {@link Mono} containing the cosmos database response with the created or existing database or + * an error + */ + public Mono createDatabaseIfNotExists(String id) { + return createDatabaseIfNotExistsInternal(getDatabase(id)); + } + + private Mono createDatabaseIfNotExistsInternal(CosmosDatabase database){ + return database.read().onErrorResume(exception -> { + if (exception instanceof CosmosClientException) { + CosmosClientException cosmosClientException = (CosmosClientException) exception; + if (cosmosClientException.statusCode() == HttpConstants.StatusCodes.NOTFOUND) { + return createDatabase(new CosmosDatabaseProperties(database.id()), new CosmosDatabaseRequestOptions()); + } + } + return Mono.error(exception); + }); + } + + /** + * Creates a database. + * + * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response with the + * created database. + * In case of failure the {@link Mono} will error. + * + * @param databaseSettings {@link CosmosDatabaseProperties} + * @param options {@link CosmosDatabaseRequestOptions} + * @return an {@link Mono} containing the single cosmos database response with the created database or an error. + */ + public Mono createDatabase(CosmosDatabaseProperties databaseSettings, + CosmosDatabaseRequestOptions options) { + if (options == null) { + options = new CosmosDatabaseRequestOptions(); + } + Database wrappedDatabase = new Database(); + wrappedDatabase.id(databaseSettings.id()); + return asyncDocumentClient.createDatabase(wrappedDatabase, options.toRequestOptions()).map(databaseResourceResponse -> + new CosmosDatabaseResponse(databaseResourceResponse, this)).single(); + } + + /** + * Creates a database. + * + * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response with the + * created database. + * In case of failure the {@link Mono} will error. + * + * @param databaseSettings {@link CosmosDatabaseProperties} + * @return an {@link Mono} containing the single cosmos database response with the created database or an error. + */ + public Mono createDatabase(CosmosDatabaseProperties databaseSettings) { + return createDatabase(databaseSettings, new CosmosDatabaseRequestOptions()); + } + + /** + * Creates a database. + * + * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response with the + * created database. + * In case of failure the {@link Mono} will error. + * + * @param id id of the database + * @return a {@link Mono} containing the single cosmos database response with the created database or an error. + */ + public Mono createDatabase(String id) { + return createDatabase(new CosmosDatabaseProperties(id), new CosmosDatabaseRequestOptions()); + } + + /** + * Creates a database. + * + * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response with the + * created database. + * In case of failure the {@link Mono} will error. + * + * @param databaseSettings {@link CosmosDatabaseProperties} + * @param throughput the throughput for the database + * @param options {@link CosmosDatabaseRequestOptions} + * @return an {@link Mono} containing the single cosmos database response with the created database or an error. + */ + public Mono createDatabase(CosmosDatabaseProperties databaseSettings, + int throughput, + CosmosDatabaseRequestOptions options) { + if (options == null) { + options = new CosmosDatabaseRequestOptions(); + } + options.offerThroughput(throughput); + Database wrappedDatabase = new Database(); + wrappedDatabase.id(databaseSettings.id()); + return asyncDocumentClient.createDatabase(wrappedDatabase, options.toRequestOptions()).map(databaseResourceResponse -> + new CosmosDatabaseResponse(databaseResourceResponse, this)).single(); + } + + /** + * Creates a database. + * + * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response with the + * created database. + * In case of failure the {@link Mono} will error. + * + * @param databaseSettings {@link CosmosDatabaseProperties} + * @param throughput the throughput for the database + * @return an {@link Mono} containing the single cosmos database response with the created database or an error. + */ + public Mono createDatabase(CosmosDatabaseProperties databaseSettings, int throughput) { + CosmosDatabaseRequestOptions options = new CosmosDatabaseRequestOptions(); + options.offerThroughput(throughput); + return createDatabase(databaseSettings, options); + } + + /** + * Creates a database. + * + * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response with the + * created database. + * In case of failure the {@link Mono} will error. + * + * @param id id of the database + * @param throughput the throughput for the database + * @return a {@link Mono} containing the single cosmos database response with the created database or an error. + */ + public Mono createDatabase(String id, int throughput) { + CosmosDatabaseRequestOptions options = new CosmosDatabaseRequestOptions(); + options.offerThroughput(throughput); + return createDatabase(new CosmosDatabaseProperties(id), options); + } + + /** + * Reads all databases. + * + * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response of the read databases. + * In case of failure the {@link Flux} will error. + * + * @param options {@link FeedOptions} + * @return a {@link Flux} containing one or several feed response pages of read databases or an error. + */ + public Flux> readAllDatabases(FeedOptions options) { + return getDocClientWrapper().readDatabases(options) + .map(response-> BridgeInternal.createFeedResponse(CosmosDatabaseProperties.getFromV2Results(response.results()), + response.responseHeaders())); + } + + /** + * Reads all databases. + * + * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response of the read databases. + * In case of failure the {@link Flux} will error. + * + * @return a {@link Flux} containing one or several feed response pages of read databases or an error. + */ + public Flux> readAllDatabases() { + return readAllDatabases(new FeedOptions()); + } + + + /** + * Query for databases. + * + * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response of the read databases. + * In case of failure the {@link Flux} will error. + * + * @param query the query. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of read databases or an error. + */ + public Flux> queryDatabases(String query, FeedOptions options){ + return queryDatabases(new SqlQuerySpec(query), options); + } + + /** + * Query for databases. + * + * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response of the read databases. + * In case of failure the {@link Flux} will error. + * + * @param querySpec the SQL query specification. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of read databases or an error. + */ + public Flux> queryDatabases(SqlQuerySpec querySpec, FeedOptions options){ + return getDocClientWrapper().queryDatabases(querySpec, options) + .map(response-> BridgeInternal.createFeedResponse( + CosmosDatabaseProperties.getFromV2Results(response.results()), + response.responseHeaders())); + } + + Mono getDatabaseAccount() { + return asyncDocumentClient.getDatabaseAccount().single(); + } + + /** + * Gets a database object without making a service call. + * + * @param id name of the database + * @return {@link CosmosDatabase} + */ + public CosmosDatabase getDatabase(String id) { + return new CosmosDatabase(id, this); + } + + /** + * Close this {@link CosmosClient} instance and cleans up the resources. + */ + @Override + public void close() { + asyncDocumentClient.close(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosClientBuilder.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosClientBuilder.java new file mode 100644 index 0000000000000..f077c6570096f --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosClientBuilder.java @@ -0,0 +1,228 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Configs; +import com.azure.data.cosmos.internal.Permission; + +import java.util.List; + +/** + * Helper class to build {@link CosmosClient} instances + * as logical representation of the Azure Cosmos database service. + * + *

+ * {@code
+ * ConnectionPolicy connectionPolicy = new ConnectionPolicy();
+ * connectionPolicy.connectionMode(ConnectionMode.DIRECT);
+ * CosmonsClient client = new CosmosClient.builder()
+ *         .endpoint(serviceEndpoint)
+ *         .key(key)
+ *         .connectionPolicy(connectionPolicy)
+ *         .consistencyLevel(ConsistencyLevel.SESSION)
+ *         .build();
+ * }
+ * 
+ */ +public class CosmosClientBuilder { + + private Configs configs = new Configs(); + private String serviceEndpoint; + private String keyOrResourceToken; + private ConnectionPolicy connectionPolicy; + private ConsistencyLevel desiredConsistencyLevel; + private List permissions; + private TokenResolver tokenResolver; + + CosmosClientBuilder() { + } + + /** + * Gets the token resolver + * @return the token resolver + */ + public TokenResolver tokenResolver() { + return tokenResolver; + } + + /** + * Sets the token resolver + * @param tokenResolver + * @return current builder + */ + public CosmosClientBuilder tokenResolver(TokenResolver tokenResolver) { + this.tokenResolver = tokenResolver; + return this; + } + + /** + * Gets the Azure Cosmos DB endpoint the SDK will connect to + * @return the endpoint + */ + public String endpoint() { + return serviceEndpoint; + } + + /** + * Sets the Azure Cosmos DB endpoint the SDK will connect to + * @param endpoint the service endpoint + * @return current Builder + */ + public CosmosClientBuilder endpoint(String endpoint) { + this.serviceEndpoint = endpoint; + return this; + } + + /** + * Gets either a master or readonly key used to perform authentication + * for accessing resource. + * @return the key + */ + public String key() { + return keyOrResourceToken; + } + + /** + * Sets either a master or readonly key used to perform authentication + * for accessing resource. + * + * @param key master or readonly key + * @return current Builder. + */ + public CosmosClientBuilder key(String key) { + this.keyOrResourceToken = key; + return this; + } + + /** + * Sets a resource token used to perform authentication + * for accessing resource. + * @return the resourceToken + */ + public String resourceToken() { + return keyOrResourceToken; + } + + /** + * Sets a resource token used to perform authentication + * for accessing resource. + * + * @param resourceToken resourceToken for authentication + * @return current Builder. + */ + public CosmosClientBuilder resourceToken(String resourceToken) { + this.keyOrResourceToken = resourceToken; + return this; + } + + /** + * Gets the permission list, which contains the + * resource tokens needed to access resources. + * @return the permission list + */ + public List permissions() { + return permissions; + } + + /** + * Sets the permission list, which contains the + * resource tokens needed to access resources. + * + * @param permissions Permission list for authentication. + * @return current Builder. + */ + public CosmosClientBuilder permissions(List permissions) { + this.permissions = permissions; + return this; + } + + /** + * Gets the (@link ConsistencyLevel) to be used + * @return the consistency level + */ + public ConsistencyLevel consistencyLevel() { + return this.desiredConsistencyLevel; + } + + /** + * Sets the (@link ConsistencyLevel) to be used + * @param desiredConsistencyLevel {@link ConsistencyLevel} + * @return current Builder + */ + public CosmosClientBuilder consistencyLevel(ConsistencyLevel desiredConsistencyLevel) { + this.desiredConsistencyLevel = desiredConsistencyLevel; + return this; + } + + /** + * Gets the (@link ConnectionPolicy) to be used + * @return the connection policy + */ + public ConnectionPolicy connectionPolicy() { + return connectionPolicy; + } + + /** + * Sets the (@link ConnectionPolicy) to be used + * @param connectionPolicy {@link ConnectionPolicy} + * @return current Builder + */ + public CosmosClientBuilder connectionPolicy(ConnectionPolicy connectionPolicy) { + this.connectionPolicy = connectionPolicy; + return this; + } + + /** + * Builds a cosmos configuration object with the provided properties + * @return CosmosClient + */ + public CosmosClient build() { + + ifThrowIllegalArgException(this.serviceEndpoint == null, "cannot build client without service endpoint"); + ifThrowIllegalArgException( + this.keyOrResourceToken == null && (permissions == null || permissions.isEmpty()), + "cannot build client without key or resource token"); + + return new CosmosClient(this); + } + + Configs configs() { + return configs; + } + + /** + * Configs + * @param configs + * @return current builder + */ + CosmosClientBuilder configs(Configs configs) { + this.configs = configs; + return this; + } + + private void ifThrowIllegalArgException(boolean value, String error) { + if (value) { + throw new IllegalArgumentException(error); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosClientException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosClientException.java new file mode 100644 index 0000000000000..cd3b216dc6c4c --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosClientException.java @@ -0,0 +1,291 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; +import com.azure.data.cosmos.internal.HttpConstants; +import org.apache.commons.lang3.StringUtils; + +import java.net.URI; +import java.util.HashMap; +import java.util.Map; + +/** + * This class defines a custom exception type for all operations on + * DocumentClient in the Azure Cosmos DB database service. Applications are + * expected to catch CosmosClientException and handle errors as appropriate when + * calling methods on DocumentClient. + *

+ * Errors coming from the service during normal execution are converted to + * CosmosClientException before returning to the application with the following + * exception: + *

+ * When a BE error is encountered during a QueryIterable<T> iteration, an + * IllegalStateException is thrown instead of CosmosClientException. + *

+ * When a transport level error happens that request is not able to reach the + * service, an IllegalStateException is thrown instead of CosmosClientException. + */ +public class CosmosClientException extends Exception { + private static final long serialVersionUID = 1L; + + private final int statusCode; + private final Map responseHeaders; + + private CosmosResponseDiagnostics cosmosResponseDiagnostics; + private CosmosError cosmosError; + + long lsn; + String partitionKeyRangeId; + Map requestHeaders; + URI requestUri; + String resourceAddress; + + CosmosClientException(int statusCode, String message, Map responseHeaders, Throwable cause) { + super(message, cause, /* enableSuppression */ true, /* writableStackTrace */ false); + this.statusCode = statusCode; + this.responseHeaders = responseHeaders == null ? new HashMap<>() : new HashMap<>(responseHeaders); + } + + /** + * Creates a new instance of the CosmosClientException class. + * + * @param statusCode the http status code of the response. + */ + CosmosClientException(int statusCode) { + this(statusCode, null, null, null); + } + + /** + * Creates a new instance of the CosmosClientException class. + * + * @param statusCode the http status code of the response. + * @param errorMessage the error message. + */ + CosmosClientException(int statusCode, String errorMessage) { + this(statusCode, errorMessage, null, null); + this.cosmosError = new CosmosError(); + cosmosError.set(Constants.Properties.MESSAGE, errorMessage); + } + + /** + * Creates a new instance of the CosmosClientException class. + * + * @param statusCode the http status code of the response. + * @param innerException the original exception. + */ + CosmosClientException(int statusCode, Exception innerException) { + this(statusCode, null, null, innerException); + } + + /** + * Creates a new instance of the CosmosClientException class. + * + * @param statusCode the http status code of the response. + * @param cosmosErrorResource the error resource object. + * @param responseHeaders the response headers. + */ + CosmosClientException(int statusCode, CosmosError cosmosErrorResource, Map responseHeaders) { + this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders); + } + + /** + * Creates a new instance of the CosmosClientException class. + * + * @param resourceAddress the address of the resource the request is associated with. + * @param statusCode the http status code of the response. + * @param cosmosErrorResource the error resource object. + * @param responseHeaders the response headers. + */ + + CosmosClientException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map responseHeaders) { + this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null); + this.resourceAddress = resourceAddress; + this.cosmosError = cosmosErrorResource; + } + + /** + * Creates a new instance of the CosmosClientException class. + * + * @param message the string message. + * @param statusCode the http status code of the response. + * @param exception the exception object. + * @param responseHeaders the response headers. + * @param resourceAddress the address of the resource the request is associated with. + */ + CosmosClientException(String message, Exception exception, Map responseHeaders, int statusCode, String resourceAddress) { + this(statusCode, message, responseHeaders, exception); + this.resourceAddress = resourceAddress; + } + + @Override + public String getMessage() { + if (cosmosResponseDiagnostics == null) { + return innerErrorMessage(); + } + return innerErrorMessage() + ", " + cosmosResponseDiagnostics.toString(); + } + + /** + * Gets the activity ID associated with the request. + * + * @return the activity ID. + */ + public String message() { + if (this.responseHeaders != null) { + return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID); + } + + return null; + } + + /** + * Gets the http status code. + * + * @return the status code. + */ + public int statusCode() { + return this.statusCode; + } + + /** + * Gets the sub status code. + * + * @return the status code. + */ + public int subStatusCode() { + int code = HttpConstants.SubStatusCodes.UNKNOWN; + if (this.responseHeaders != null) { + String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS); + if (StringUtils.isNotEmpty(subStatusString)) { + try { + code = Integer.parseInt(subStatusString); + } catch (NumberFormatException e) { + // If value cannot be parsed as Integer, return Unknown. + } + } + } + + return code; + } + + /** + * Gets the error code associated with the exception. + * + * @return the error. + */ + public CosmosError error() { + return this.cosmosError; + } + + void error(CosmosError cosmosError) { + this.cosmosError = cosmosError; + } + + /** + * Gets the recommended time interval after which the client can retry failed + * requests + * + * @return the recommended time interval after which the client can retry failed + * requests. + */ + public long retryAfterInMilliseconds() { + long retryIntervalInMilliseconds = 0; + + if (this.responseHeaders != null) { + String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS); + + if (StringUtils.isNotEmpty(header)) { + try { + retryIntervalInMilliseconds = Long.parseLong(header); + } catch (NumberFormatException e) { + // If the value cannot be parsed as long, return 0. + } + } + } + + // + // In the absence of explicit guidance from the backend, don't introduce + // any unilateral retry delays here. + return retryIntervalInMilliseconds; + } + + /** + * Gets the response headers as key-value pairs + * + * @return the response headers + */ + public Map responseHeaders() { + return this.responseHeaders; + } + + /** + * Gets the resource address associated with this exception. + * + * @return the resource address associated with this exception. + */ + String getResourceAddress() { + return this.resourceAddress; + } + + /** + * Gets the Cosmos Response Diagnostic Statistics associated with this exception. + * + * @return Cosmos Response Diagnostic Statistics associated with this exception. + */ + public CosmosResponseDiagnostics cosmosResponseDiagnostics() { + return cosmosResponseDiagnostics; + } + + CosmosClientException cosmosResponseDiagnostics(CosmosResponseDiagnostics cosmosResponseDiagnostics) { + this.cosmosResponseDiagnostics = cosmosResponseDiagnostics; + return this; + } + + @Override + public String toString() { + return getClass().getSimpleName() + "{" + "error=" + cosmosError + ", resourceAddress='" + resourceAddress + '\'' + + ", statusCode=" + statusCode + ", message=" + getMessage() + ", causeInfo=" + causeInfo() + + ", responseHeaders=" + responseHeaders + ", requestHeaders=" + requestHeaders + '}'; + } + + String innerErrorMessage() { + String innerErrorMessage = super.getMessage(); + if (cosmosError != null) { + innerErrorMessage = cosmosError.getMessage(); + if (innerErrorMessage == null) { + innerErrorMessage = String.valueOf(cosmosError.get("Errors")); + } + } + return innerErrorMessage; + } + + private String causeInfo() { + Throwable cause = getCause(); + if (cause != null) { + return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage()); + } + return null; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosConflict.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosConflict.java new file mode 100644 index 0000000000000..4b4b6adbf9755 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosConflict.java @@ -0,0 +1,127 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.RequestOptions; +import reactor.core.publisher.Mono; + +import static com.azure.data.cosmos.internal.Paths.CONFLICTS_PATH_SEGMENT; + +/** + * Read and delete conflicts + */ +public class CosmosConflict { + + private CosmosContainer container; + private String id; + + /** + * Constructor + * + * @param id the conflict id + * @param container the container + */ + CosmosConflict(String id, CosmosContainer container) { + this.id = id; + this.container = container; + } + + /** + * Get the id of the {@link CosmosConflict} + * + * @return the id of the {@link CosmosConflict} + */ + public String id() { + return id; + } + + /** + * Set the id of the {@link CosmosConflict} + * + * @param id the id of the {@link CosmosConflict} + * @return the same {@link CosmosConflict} that had the id set + */ + CosmosConflict id(String id) { + this.id = id; + return this; + } + + /** + * Reads a conflict. + *

+ * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a single resource response with the read + * conflict. In case of failure the {@link Mono} will error. + * + * @param options the request options. + * @return a {@link Mono} containing the single resource response with the read + * conflict or an error. + */ + public Mono read(CosmosConflictRequestOptions options) { + if (options == null) { + options = new CosmosConflictRequestOptions(); + } + RequestOptions requestOptions = options.toRequestOptions(); + return this.container.getDatabase().getDocClientWrapper().readConflict(getLink(), requestOptions) + .map(response -> new CosmosConflictResponse(response, container)).single(); + + } + + /** + * Reads all conflicts in a document collection. + *

+ * After subscription the operation will be performed. The {@link Mono} will + * contain one or several feed response pages of the read conflicts. In case of + * failure the {@link Mono} will error. + * + * @param options the feed options. + * @return a {@link Mono} containing one or several feed response pages of the + * read conflicts or an error. + */ + public Mono delete(CosmosConflictRequestOptions options) { + if (options == null) { + options = new CosmosConflictRequestOptions(); + } + RequestOptions requestOptions = options.toRequestOptions(); + return this.container.getDatabase().getDocClientWrapper().deleteConflict(getLink(), requestOptions) + .map(response -> new CosmosConflictResponse(response, container)).single(); + } + + String URIPathSegment() { + return CONFLICTS_PATH_SEGMENT; + } + + String parentLink() { + return this.container.getLink(); + } + + String getLink() { + StringBuilder builder = new StringBuilder(); + builder.append(parentLink()); + builder.append("/"); + builder.append(URIPathSegment()); + builder.append("/"); + builder.append(id()); + return builder.toString(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosConflictProperties.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosConflictProperties.java new file mode 100644 index 0000000000000..8c33bd1ab47f8 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosConflictProperties.java @@ -0,0 +1,102 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Conflict; +import com.azure.data.cosmos.internal.Constants; +import com.azure.data.cosmos.internal.Strings; + +import java.lang.reflect.InvocationTargetException; +import java.util.List; +import java.util.stream.Collectors; + +public class CosmosConflictProperties extends Resource { + + /** + * Initialize a conflict object. + */ + CosmosConflictProperties() { + super(); + } + + /** + * Initialize a conflict object from json string. + * + * @param jsonString the json string that represents the conflict. + */ + CosmosConflictProperties(String jsonString) { + super(jsonString); + } + + /** + * Gets the operation kind. + * + * @return the operation kind. + */ + public String operationKind() { + return super.getString(Constants.Properties.OPERATION_TYPE); + } + + /** + * Gets the type of the conflicting resource. + * + * @return the resource type. + */ + public String resourceType() { + return super.getString(Constants.Properties.RESOURCE_TYPE); + } + + /** + * Gets the resource ID for the conflict in the Azure Cosmos DB service. + * @return resource Id for the conflict. + */ + String sourceResourceId() { + return super.getString(Constants.Properties.SOURCE_RESOURCE_ID); + } + + /** + * Gets the conflicting resource in the Azure Cosmos DB service. + * @param the type of the object. + * @param klass The returned type of conflicting resource. + * @return The conflicting resource. + */ + public T getResource(Class klass) { + String resourceAsString = super.getString(Constants.Properties.CONTENT); + + if (!Strings.isNullOrEmpty(resourceAsString)) { + try { + return klass.getConstructor(String.class).newInstance(resourceAsString); + } catch (InstantiationException | IllegalAccessException | IllegalArgumentException + | InvocationTargetException | NoSuchMethodException | SecurityException e) { + throw new IllegalStateException("Failed to instantiate class object.", e); + } + } else { + return null; + } + } + + static List getFromV2Results(List results) { + return results.stream().map(conflict -> new CosmosConflictProperties(conflict.toJson())) + .collect(Collectors.toList()); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosConflictRequestOptions.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosConflictRequestOptions.java new file mode 100644 index 0000000000000..3271fccd84e7e --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosConflictRequestOptions.java @@ -0,0 +1,55 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.RequestOptions; + +public class CosmosConflictRequestOptions { + private AccessCondition accessCondition; + + /** + * Gets the conditions associated with the request. + * + * @return the access condition. + */ + public AccessCondition accessCondition() { + return accessCondition; + } + + /** + * Sets the conditions associated with the request. + * + * @param accessCondition the access condition. + * @return the current request options + */ + public CosmosConflictRequestOptions accessCondition(AccessCondition accessCondition) { + this.accessCondition = accessCondition; + return this; + } + + RequestOptions toRequestOptions() { + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setAccessCondition(accessCondition); + return requestOptions; + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosConflictResponse.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosConflictResponse.java new file mode 100644 index 0000000000000..6ece0c92f4a85 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosConflictResponse.java @@ -0,0 +1,62 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Conflict; +import com.azure.data.cosmos.internal.ResourceResponse; + +public class CosmosConflictResponse extends CosmosResponse { + private CosmosContainer container; + private CosmosConflict conflictClient; + + CosmosConflictResponse(ResourceResponse response, CosmosContainer container) { + super(response); + this.container = container; + if(response.getResource() == null){ + super.resourceSettings(null); + }else{ + super.resourceSettings(new CosmosConflictProperties(response.getResource().toJson())); + conflictClient = new CosmosConflict(response.getResource().id(), container); + } + } + + CosmosContainer getContainer() { + return container; + } + + /** + * Get conflict client + * @return the cosmos conflict client + */ + public CosmosConflict conflict() { + return conflictClient; + } + + /** + * Get conflict properties object representing the resource on the server + * @return the conflict properties + */ + public CosmosConflictProperties properties() { + return resourceSettings(); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosContainer.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosContainer.java new file mode 100644 index 0000000000000..c2a55589f499e --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosContainer.java @@ -0,0 +1,500 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.Offer; +import com.azure.data.cosmos.internal.Paths; +import com.azure.data.cosmos.internal.RequestOptions; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import static com.azure.data.cosmos.Resource.validateResource; + +/** + * Provides methods for reading, deleting, and replacing existing Containers. + * Provides methods for interacting with child resources (Items, Scripts, Conflicts) + */ +public class CosmosContainer { + + private CosmosDatabase database; + private String id; + private CosmosScripts scripts; + + CosmosContainer(String id, CosmosDatabase database) { + this.id = id; + this.database = database; + } + + /** + * Get the id of the {@link CosmosContainer} + * + * @return the id of the {@link CosmosContainer} + */ + public String id() { + return id; + } + + /** + * Set the id of the {@link CosmosContainer} + * + * @param id the id of the {@link CosmosContainer} + * @return the same {@link CosmosContainer} that had the id set + */ + CosmosContainer id(String id) { + this.id = id; + return this; + } + + /** + * Reads the document container + * + * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a single cosmos container response with + * the read container. In case of failure the {@link Mono} will error. + * + * @return an {@link Mono} containing the single cosmos container response with + * the read container or an error. + */ + public Mono read() { + return read(new CosmosContainerRequestOptions()); + } + + /** + * Reads the document container by the container link. + * + * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a single cosmos container response with + * the read container. In case of failure the {@link Mono} will error. + * + * @param options The cosmos container request options. + * @return an {@link Mono} containing the single cosmos container response with + * the read container or an error. + */ + public Mono read(CosmosContainerRequestOptions options) { + if (options == null) { + options = new CosmosContainerRequestOptions(); + } + return database.getDocClientWrapper().readCollection(getLink(), options.toRequestOptions()) + .map(response -> new CosmosContainerResponse(response, database)).single(); + } + + /** + * Deletes the item container + * + * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a single cosmos container response for the + * deleted database. In case of failure the {@link Mono} will error. + * + * @param options the request options. + * @return an {@link Mono} containing the single cosmos container response for + * the deleted database or an error. + */ + public Mono delete(CosmosContainerRequestOptions options) { + if (options == null) { + options = new CosmosContainerRequestOptions(); + } + return database.getDocClientWrapper().deleteCollection(getLink(), options.toRequestOptions()) + .map(response -> new CosmosContainerResponse(response, database)).single(); + } + + /** + * Deletes the item container + * + * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a single cosmos container response for the + * deleted container. In case of failure the {@link Mono} will error. + * + * @return an {@link Mono} containing the single cosmos container response for + * the deleted container or an error. + */ + public Mono delete() { + return delete(new CosmosContainerRequestOptions()); + } + + /** + * Replaces a document container. + * + * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a single cosmos container response with + * the replaced document container. In case of failure the {@link Mono} will + * error. + * + * @param containerSettings the item container properties + * @return an {@link Mono} containing the single cosmos container response with + * the replaced document container or an error. + */ + public Mono replace(CosmosContainerProperties containerSettings) { + return replace(containerSettings, null); + } + + /** + * Replaces a document container. + * + * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a single cosmos container response with + * the replaced document container. In case of failure the {@link Mono} will + * error. + * + * @param containerSettings the item container properties + * @param options the cosmos container request options. + * @return an {@link Mono} containing the single cosmos container response with + * the replaced document container or an error. + */ + public Mono replace(CosmosContainerProperties containerSettings, + CosmosContainerRequestOptions options) { + validateResource(containerSettings); + if (options == null) { + options = new CosmosContainerRequestOptions(); + } + return database.getDocClientWrapper() + .replaceCollection(containerSettings.getV2Collection(), options.toRequestOptions()) + .map(response -> new CosmosContainerResponse(response, database)).single(); + } + + /* CosmosItem operations */ + + /** + * Creates a cosmos item. + * + * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a single resource response with the + * created cosmos item. In case of failure the {@link Mono} will error. + * + * @param item the cosmos item represented as a POJO or cosmos item object. + * @return an {@link Mono} containing the single resource response with the + * created cosmos item or an error. + */ + public Mono createItem(Object item) { + return createItem(item, new CosmosItemRequestOptions()); + } + + /** + * Creates a cosmos item. + * + * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a single resource response with the + * created cosmos item. In case of failure the {@link Mono} will error. + * + * @param item the cosmos item represented as a POJO or cosmos item object. + * @param options the request options. + * @return an {@link Mono} containing the single resource response with the + * created cosmos item or an error. + */ + public Mono createItem(Object item, CosmosItemRequestOptions options) { + if (options == null) { + options = new CosmosItemRequestOptions(); + } + RequestOptions requestOptions = options.toRequestOptions(); + return database.getDocClientWrapper() + .createDocument(getLink(), CosmosItemProperties.fromObject(item), requestOptions, true) + .map(response -> new CosmosItemResponse(response, requestOptions.getPartitionKey(), this)).single(); + } + + /** + * Upserts an item. + * + * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a single resource response with the + * upserted item. In case of failure the {@link Mono} will error. + * + * @param item the item represented as a POJO or Item object to upsert. + * @return an {@link Mono} containing the single resource response with the + * upserted document or an error. + */ + public Mono upsertItem(Object item) { + return upsertItem(item, null); + } + + /** + * Upserts a cosmos item. + * + * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a single resource response with the + * upserted item. In case of failure the {@link Mono} will error. + * + * @param item the item represented as a POJO or Item object to upsert. + * @param options the request options. + * @return an {@link Mono} containing the single resource response with the + * upserted document or an error. + */ + public Mono upsertItem(Object item, CosmosItemRequestOptions options) { + if (options == null) { + options = new CosmosItemRequestOptions(); + } + RequestOptions requestOptions = options.toRequestOptions(); + + return this.getDatabase().getDocClientWrapper() + .upsertDocument(this.getLink(), CosmosItemProperties.fromObject(item), options.toRequestOptions(), true) + .map(response -> new CosmosItemResponse(response, requestOptions.getPartitionKey(), this)).single(); + } + + /** + * Reads all cosmos items in the container. + * + * After subscription the operation will be performed. The {@link Flux} will + * contain one or several feed response of the read cosmos items. In case of + * failure the {@link Flux} will error. + * + * @return an {@link Flux} containing one or several feed response pages of the + * read cosmos items or an error. + */ + public Flux> readAllItems() { + return readAllItems(new FeedOptions()); + } + + /** + * Reads all cosmos items in a container. + * + * After subscription the operation will be performed. The {@link Flux} will + * contain one or several feed response of the read cosmos items. In case of + * failure the {@link Flux} will error. + * + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the + * read cosmos items or an error. + */ + public Flux> readAllItems(FeedOptions options) { + return getDatabase().getDocClientWrapper().readDocuments(getLink(), options).map( + response -> BridgeInternal.createFeedResponse(CosmosItemProperties.getFromV2Results(response.results()), + response.responseHeaders())); + } + + /** + * Query for documents in a items in a container + * + * After subscription the operation will be performed. The {@link Flux} will + * contain one or several feed response of the obtained items. In case of + * failure the {@link Flux} will error. + * + * @param query the query. + * @return an {@link Flux} containing one or several feed response pages of the + * obtained items or an error. + */ + public Flux> queryItems(String query) { + return queryItems(new SqlQuerySpec(query), null); + } + + /** + * Query for documents in a items in a container + * + * After subscription the operation will be performed. The {@link Flux} will + * contain one or several feed response of the obtained items. In case of + * failure the {@link Flux} will error. + * + * @param query the query. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the + * obtained items or an error. + */ + public Flux> queryItems(String query, FeedOptions options) { + return queryItems(new SqlQuerySpec(query), options); + } + + /** + * Query for documents in a items in a container + * + * After subscription the operation will be performed. The {@link Flux} will + * contain one or several feed response of the obtained items. In case of + * failure the {@link Flux} will error. + * + * @param querySpec the SQL query specification. + * @return an {@link Flux} containing one or several feed response pages of the + * obtained items or an error. + */ + public Flux> queryItems(SqlQuerySpec querySpec) { + return queryItems(querySpec, null); + } + + /** + * Query for documents in a items in a container + * + * After subscription the operation will be performed. The {@link Flux} will + * contain one or several feed response of the obtained items. In case of + * failure the {@link Flux} will error. + * + * @param querySpec the SQL query specification. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the + * obtained items or an error. + */ + public Flux> queryItems(SqlQuerySpec querySpec, FeedOptions options) { + return getDatabase().getDocClientWrapper().queryDocuments(getLink(), querySpec, options) + .map(response -> BridgeInternal.createFeedResponseWithQueryMetrics( + CosmosItemProperties.getFromV2Results(response.results()), response.responseHeaders(), + response.queryMetrics())); + } + + /** + * Query for documents in a items in a container + * + * After subscription the operation will be performed. The {@link Flux} will + * contain one or several feed response of the obtained items. In case of + * failure the {@link Flux} will error. + * + * @param changeFeedOptions the feed options. + * @return an {@link Flux} containing one or several feed response pages of the + * obtained items or an error. + */ + public Flux> queryChangeFeedItems(ChangeFeedOptions changeFeedOptions) { + return getDatabase().getDocClientWrapper().queryDocumentChangeFeed(getLink(), changeFeedOptions) + .map(response -> new FeedResponse( + CosmosItemProperties.getFromV2Results(response.results()), response.responseHeaders(), false)); + } + + /** + * Gets a CosmosItem object without making a service call + * + * @param id id of the item + * @param partitionKey the partition key + * @return a cosmos item + */ + public CosmosItem getItem(String id, Object partitionKey) { + return new CosmosItem(id, partitionKey, this); + } + + public CosmosScripts getScripts() { + if (this.scripts == null) { + this.scripts = new CosmosScripts(this); + } + return this.scripts; + } + + /** + * Lists all the conflicts in the container + * + * @param options the feed options + * @return a {@link Flux} containing one or several feed response pages of the + * obtained conflicts or an error. + */ + public Flux> readAllConflicts(FeedOptions options) { + return database.getDocClientWrapper().readConflicts(getLink(), options) + .map(response -> BridgeInternal.createFeedResponse( + CosmosConflictProperties.getFromV2Results(response.results()), response.responseHeaders())); + } + + /** + * Queries all the conflicts in the container + * + * @param query the query + * @return a {@link Flux} containing one or several feed response pages of the + * obtained conflicts or an error. + */ + public Flux> queryConflicts(String query) { + return queryConflicts(query, null); + } + + /** + * Queries all the conflicts in the container + * + * @param query the query + * @param options the feed options + * @return a {@link Flux} containing one or several feed response pages of the + * obtained conflicts or an error. + */ + public Flux> queryConflicts(String query, FeedOptions options) { + return database.getDocClientWrapper().queryConflicts(getLink(), query, options) + .map(response -> BridgeInternal.createFeedResponse( + CosmosConflictProperties.getFromV2Results(response.results()), response.responseHeaders())); + } + + /** + * Gets a CosmosConflict object without making a service call + * + * @param id id of the cosmos conflict + * @return a cosmos conflict + */ + public CosmosConflict getConflict(String id) { + return new CosmosConflict(id, this); + } + + /** + * Gets the throughput of the container + * + * @return a {@link Mono} containing throughput or an error. + */ + public Mono readProvisionedThroughput() { + return this.read().flatMap(cosmosContainerResponse -> database.getDocClientWrapper() + .queryOffers("select * from c where c.offerResourceId = '" + + cosmosContainerResponse.resourceSettings().resourceId() + "'", new FeedOptions()) + .single()).flatMap(offerFeedResponse -> { + if (offerFeedResponse.results().isEmpty()) { + return Mono.error(BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.BADREQUEST, + "No offers found for the resource")); + } + return database.getDocClientWrapper().readOffer(offerFeedResponse.results().get(0).selfLink()) + .single(); + }).map(cosmosOfferResponse -> cosmosOfferResponse.getResource().getThroughput()); + } + + /** + * Sets throughput provisioned for a container in measurement of + * Requests-per-Unit in the Azure Cosmos service. + * + * @param requestUnitsPerSecond the cosmos container throughput, expressed in + * Request Units per second + * @return a {@link Mono} containing throughput or an error. + */ + public Mono replaceProvisionedThroughput(int requestUnitsPerSecond) { + return this.read().flatMap(cosmosContainerResponse -> database.getDocClientWrapper() + .queryOffers("select * from c where c.offerResourceId = '" + + cosmosContainerResponse.resourceSettings().resourceId() + "'", new FeedOptions()) + .single()).flatMap(offerFeedResponse -> { + if (offerFeedResponse.results().isEmpty()) { + return Mono.error(BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.BADREQUEST, + "No offers found for the resource")); + } + Offer offer = offerFeedResponse.results().get(0); + offer.setThroughput(requestUnitsPerSecond); + return database.getDocClientWrapper().replaceOffer(offer).single(); + }).map(offerResourceResponse -> offerResourceResponse.getResource().getThroughput()); + } + + /** + * Gets the parent Database + * + * @return the {@link CosmosDatabase} + */ + public CosmosDatabase getDatabase() { + return database; + } + + String URIPathSegment() { + return Paths.COLLECTIONS_PATH_SEGMENT; + } + + String parentLink() { + return database.getLink(); + } + + String getLink() { + StringBuilder builder = new StringBuilder(); + builder.append(parentLink()); + builder.append("/"); + builder.append(URIPathSegment()); + builder.append("/"); + builder.append(id()); + return builder.toString(); + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosContainerProperties.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosContainerProperties.java new file mode 100644 index 0000000000000..244bf3422db76 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosContainerProperties.java @@ -0,0 +1,216 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.internal.ResourceResponse; + +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +/** + * Represents a item container in the Azure Cosmos DB database service. A cosmos container is a named logical container + * for cosmos items. + *

+ * A database may contain zero or more named containers and each container consists of zero or more JSON items. + * Being schema-free, the items in a container do not need to share the same structure or fields. Since containers + * are application resources, they can be authorized using either the master key or resource keys. + */ +public class CosmosContainerProperties extends Resource { + + private IndexingPolicy indexingPolicy; + private UniqueKeyPolicy uniqueKeyPolicy; + private PartitionKeyDefinition partitionKeyDefinition; + + /** + * Constructor + * @param id id of the Container + * @param partitionKeyPath partition key path + */ + public CosmosContainerProperties(String id, String partitionKeyPath) { + super.id(id); + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList<>(); + paths.add(partitionKeyPath); + partitionKeyDef.paths(paths); + partitionKeyDefinition(partitionKeyDef); + } + + /** + * Constructor + * @param id id of the container + * @param partitionKeyDefinition the {@link PartitionKeyDefinition} + */ + public CosmosContainerProperties(String id, PartitionKeyDefinition partitionKeyDefinition) { + super.id(id); + partitionKeyDefinition(partitionKeyDefinition); + } + + CosmosContainerProperties(ResourceResponse response) { + super(response.getResource().toJson()); + } + + // Converting document collection to CosmosContainerProperties + CosmosContainerProperties(DocumentCollection collection){ + super(collection.toJson()); + } + + static List getFromV2Results(List results){ + return results.stream().map(CosmosContainerProperties::new).collect(Collectors.toList()); + } + + /** + * Gets the container's indexing policy. + * + * @return the indexing policy. + */ + public IndexingPolicy indexingPolicy() { + if (this.indexingPolicy == null) { + if (super.has(Constants.Properties.INDEXING_POLICY)) { + this.indexingPolicy = super.getObject(Constants.Properties.INDEXING_POLICY, IndexingPolicy.class); + } else { + this.indexingPolicy = new IndexingPolicy(); + } + } + + return this.indexingPolicy; + } + + /** + * Sets the container's indexing policy + * + * @param indexingPolicy {@link IndexingPolicy} the indexing policy + * @return the CosmosContainerProperties. + */ + public CosmosContainerProperties indexingPolicy(IndexingPolicy indexingPolicy) { + if (indexingPolicy == null) { + throw new IllegalArgumentException("IndexingPolicy cannot be null."); + } + this.indexingPolicy = indexingPolicy; + super.set(Constants.Properties.INDEXING_POLICY, indexingPolicy); + return this; + } + + /** + * Gets the containers unique key policy + * + * @return the unique key policy + */ + public UniqueKeyPolicy uniqueKeyPolicy() { + + // Thread safe lazy initialization for case when collection is cached (and is basically readonly). + if (this.uniqueKeyPolicy == null) { + this.uniqueKeyPolicy = super.getObject(Constants.Properties.UNIQUE_KEY_POLICY, UniqueKeyPolicy.class); + + if (this.uniqueKeyPolicy == null) { + this.uniqueKeyPolicy = new UniqueKeyPolicy(); + } + } + + return this.uniqueKeyPolicy; + } + + /** + * Sets the Containers unique key policy + * + * @param uniqueKeyPolicy the unique key policy + * @return the CosmosContainerProperties. + */ + public CosmosContainerProperties uniqueKeyPolicy(UniqueKeyPolicy uniqueKeyPolicy) { + if (uniqueKeyPolicy == null) { + throw new IllegalArgumentException("uniqueKeyPolicy cannot be null."); + } + + this.uniqueKeyPolicy = uniqueKeyPolicy; + super.set(Constants.Properties.UNIQUE_KEY_POLICY, uniqueKeyPolicy); + return this; + } + + /** + * Gets the containers's partition key definition. + * + * @return the partition key definition. + */ + public PartitionKeyDefinition partitionKeyDefinition() { + if (this.partitionKeyDefinition == null) { + + if (super.has(Constants.Properties.PARTITION_KEY)) { + this.partitionKeyDefinition = super.getObject(Constants.Properties.PARTITION_KEY, PartitionKeyDefinition.class); + } else { + this.partitionKeyDefinition = new PartitionKeyDefinition(); + } + } + + return this.partitionKeyDefinition; + } + + /** + * Sets the containers's partition key definition. + * + * @param partitionKeyDefinition the partition key definition. + * @return the CosmosContainerProperties. + */ + public CosmosContainerProperties partitionKeyDefinition(PartitionKeyDefinition partitionKeyDefinition) { + if (partitionKeyDefinition == null) { + throw new IllegalArgumentException("partitionKeyDefinition cannot be null."); + } + + this.partitionKeyDefinition = partitionKeyDefinition; + return this; + } + + /** + * Gets the conflictResolutionPolicy that is used for resolving conflicting writes + * on documents in different regions, in a collection in the Azure Cosmos DB service. + * + * @return ConflictResolutionPolicy + */ + public ConflictResolutionPolicy conflictResolutionPolicy() { + return super.getObject(Constants.Properties.CONFLICT_RESOLUTION_POLICY, ConflictResolutionPolicy.class); + } + + /** + * Sets the conflictResolutionPolicy that is used for resolving conflicting writes + * on documents in different regions, in a collection in the Azure Cosmos DB service. + * + * @param value ConflictResolutionPolicy to be used. + * @return the CosmosContainerProperties. + */ + public CosmosContainerProperties conflictResolutionPolicy(ConflictResolutionPolicy value) { + if (value == null) { + throw new IllegalArgumentException("CONFLICT_RESOLUTION_POLICY cannot be null."); + } + + super.set(Constants.Properties.CONFLICT_RESOLUTION_POLICY, value); + return this; + } + + DocumentCollection getV2Collection(){ + DocumentCollection collection = new DocumentCollection(this.toJson()); + collection.setPartitionKey(this.partitionKeyDefinition()); + collection.setIndexingPolicy(this.indexingPolicy()); + return collection; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosContainerRequestOptions.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosContainerRequestOptions.java new file mode 100644 index 0000000000000..c2222cca36b5b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosContainerRequestOptions.java @@ -0,0 +1,150 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.RequestOptions; + +/** + * Encapsulates options that can be specified for a request issued to cosmos container. + */ +public class CosmosContainerRequestOptions { + private Integer offerThroughput; + private boolean populateQuotaInfo; + private ConsistencyLevel consistencyLevel; + private String sessionToken; + private AccessCondition accessCondition; + + /** + * Gets the throughput in the form of Request Units per second when creating a cosmos container. + * + * @return the throughput value. + */ + Integer offerThroughput() { + return offerThroughput; + } + + /** + * Sets the throughput in the form of Request Units per second when creating a cosmos container. + * + * @param offerThroughput the throughput value. + * @return the current request options + */ + CosmosContainerRequestOptions offerThroughput(Integer offerThroughput) { + this.offerThroughput = offerThroughput; + return this; + } + + /** + * Gets the PopulateQuotaInfo setting for cosmos container read requests in the Azure Cosmos DB database service. + * PopulateQuotaInfo is used to enable/disable getting cosmos container quota related stats for document + * collection read requests. + * + * @return true if PopulateQuotaInfo is enabled + */ + public boolean populateQuotaInfo() { + return populateQuotaInfo; + } + + /** + * Sets the PopulateQuotaInfo setting for cosmos container read requests in the Azure Cosmos DB database service. + * PopulateQuotaInfo is used to enable/disable getting cosmos container quota related stats for document + * collection read requests. + * + * @param populateQuotaInfo a boolean value indicating whether PopulateQuotaInfo is enabled or not + * @return the current request options + */ + public CosmosContainerRequestOptions populateQuotaInfo(boolean populateQuotaInfo) { + this.populateQuotaInfo = populateQuotaInfo; + return this; + } + + /** + * Gets the consistency level required for the request. + * + * @return the consistency level. + */ + public ConsistencyLevel consistencyLevel() { + return consistencyLevel; + } + + /** + * Sets the consistency level required for the request. + * + * @param consistencyLevel the consistency level. + * @return the current request options + */ + public CosmosContainerRequestOptions consistencyLevel(ConsistencyLevel consistencyLevel) { + this.consistencyLevel = consistencyLevel; + return this; + } + + /** + * Gets the token for use with session consistency. + * + * @return the session token. + */ + public String sessionToken() { + return sessionToken; + } + + /** + * Sets the token for use with session consistency. + * + * @param sessionToken the session token. + * @return the current request options + */ + public CosmosContainerRequestOptions sessionToken(String sessionToken) { + this.sessionToken = sessionToken; + return this; + } + + /** + * Gets the conditions associated with the request. + * + * @return the access condition. + */ + public AccessCondition accessCondition() { + return accessCondition; + } + + /** + * Sets the conditions associated with the request. + * + * @param accessCondition the access condition. + * @return the current request options + */ + public CosmosContainerRequestOptions accessCondition(AccessCondition accessCondition) { + this.accessCondition = accessCondition; + return this; + } + + RequestOptions toRequestOptions() { + RequestOptions options = new RequestOptions(); + options.setAccessCondition(accessCondition); + options.setOfferThroughput(offerThroughput); + options.setPopulateQuotaInfo(populateQuotaInfo); + options.setSessionToken(sessionToken); + options.setConsistencyLevel(consistencyLevel); + return options; + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosContainerResponse.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosContainerResponse.java new file mode 100644 index 0000000000000..50c82ee30f3f2 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosContainerResponse.java @@ -0,0 +1,75 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.internal.ResourceResponse; + +public class CosmosContainerResponse extends CosmosResponse { + + private CosmosContainer container; + + CosmosContainerResponse(ResourceResponse response, CosmosDatabase database) { + super(response); + if(response.getResource() == null){ + super.resourceSettings(null); + }else{ + super.resourceSettings(new CosmosContainerProperties(response)); + container = new CosmosContainer(resourceSettings().id(), database); + } + } + + /** + * Gets the progress of an index transformation, if one is underway. + * + * @return the progress of an index transformation. + */ + public long indexTransformationProgress() { + return resourceResponseWrapper.getIndexTransformationProgress(); + } + + /** + * Gets the progress of lazy indexing. + * + * @return the progress of lazy indexing. + */ + long lazyIndexingProgress() { + return resourceResponseWrapper.getLazyIndexingProgress(); + } + + /** + * Gets the container properties + * @return the cosmos container properties + */ + public CosmosContainerProperties properties() { + return resourceSettings(); + } + + /** + * Gets the Container object + * @return the Cosmos container object + */ + public CosmosContainer container() { + return container; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosDatabase.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosDatabase.java new file mode 100644 index 0000000000000..b37885710fefd --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosDatabase.java @@ -0,0 +1,627 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.Offer; +import com.azure.data.cosmos.internal.Paths; +import org.apache.commons.lang3.StringUtils; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import static com.azure.data.cosmos.Resource.validateResource; + +/** + * Perform read and delete databases, update database throughput, and perform operations on child resources + */ +public class CosmosDatabase { + private CosmosClient client; + private String id; + + CosmosDatabase(String id, CosmosClient client) { + this.id = id; + this.client = client; + } + + /** + * Get the id of the CosmosDatabase + * + * @return the id of the CosmosDatabase + */ + public String id() { + return id; + } + + /** + * Set the id of the CosmosDatabase + * + * @param id the id of the CosmosDatabase + * @return the same CosmosConflict that had the id set + */ + CosmosDatabase id(String id) { + this.id = id; + return this; + } + + /** + * Reads a database. + * + * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a single cosmos database respone with the + * read database. In case of failure the {@link Mono} will error. + * + * @return an {@link Mono} containing the single cosmos database respone with + * the read database or an error. + */ + public Mono read() { + return read(new CosmosDatabaseRequestOptions()); + } + + /** + * Reads a database. + * + * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a cosmos cosmos database respone with the + * read database. In case of failure the {@link Mono} will error. + * + * @param options the request options. + * @return an {@link Mono} containing the single cosmos database response with + * the read database or an error. + */ + public Mono read(CosmosDatabaseRequestOptions options) { + return getDocClientWrapper().readDatabase(getLink(), options.toRequestOptions()) + .map(response -> new CosmosDatabaseResponse(response, getClient())).single(); + } + + /** + * Deletes a database. + * + * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a cosmos database response with the + * deleted database. In case of failure the {@link Mono} will error. + * + * @return an {@link Mono} containing the single cosmos database response + */ + public Mono delete() { + return delete(new CosmosDatabaseRequestOptions()); + } + + /** + * Deletes a database. + *

+ * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a cosmos database response with the + * deleted database. In case of failure the {@link Mono} will error. + * + * @param options the request options + * @return an {@link Mono} containing the single cosmos database response + */ + public Mono delete(CosmosDatabaseRequestOptions options) { + return getDocClientWrapper().deleteDatabase(getLink(), options.toRequestOptions()) + .map(response -> new CosmosDatabaseResponse(response, getClient())).single(); + } + + /* CosmosContainer operations */ + + /** + * Creates a document container. + * + * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a cosmos container response with the + * created collection. In case of failure the {@link Mono} will error. + * + * @param containerSettings the container properties. + * @return an {@link Flux} containing the single cosmos container response with + * the created container or an error. + */ + public Mono createContainer(CosmosContainerProperties containerSettings) { + return createContainer(containerSettings, new CosmosContainerRequestOptions()); + } + + /** + * Creates a document container. + * + * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a cosmos container response with the + * created collection. In case of failure the {@link Mono} will error. + * + * @param containerSettings the container properties. + * @param throughput the throughput for the container + * @return an {@link Flux} containing the single cosmos container response with + * the created container or an error. + */ + public Mono createContainer(CosmosContainerProperties containerSettings, int throughput) { + validateResource(containerSettings); + CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); + options.offerThroughput(throughput); + return createContainer(containerSettings, options); + } + + /** + * Creates a document container. + * + * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a cosmos container response with the + * created collection. In case of failure the {@link Mono} will error. + * + * @param containerSettings the containerSettings. + * @param options the cosmos container request options + * @return an {@link Flux} containing the cosmos container response with the + * created container or an error. + */ + public Mono createContainer(CosmosContainerProperties containerSettings, + CosmosContainerRequestOptions options) { + validateResource(containerSettings); + return getDocClientWrapper() + .createCollection(this.getLink(), containerSettings.getV2Collection(), options.toRequestOptions()) + .map(response -> new CosmosContainerResponse(response, this)).single(); + } + + /** + * Creates a document container. + * + * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a cosmos container response with the + * created collection. In case of failure the {@link Mono} will error. + * + * @param containerSettings the containerSettings. + * @param throughput the throughput for the container + * @param options the cosmos container request options + * @return an {@link Flux} containing the cosmos container response with the + * created container or an error. + */ + public Mono createContainer(CosmosContainerProperties containerSettings, + int throughput, + CosmosContainerRequestOptions options) { + options.offerThroughput(throughput); + return createContainer(containerSettings, options); + } + + /** + * Creates a document container. + * + * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a cosmos container response with the + * created collection. In case of failure the {@link Mono} will error. + * + * @param id the cosmos container id + * @param partitionKeyPath the partition key path + * @return an {@link Flux} containing the cosmos container response with the + * created container or an error. + */ + public Mono createContainer(String id, String partitionKeyPath) { + return createContainer(new CosmosContainerProperties(id, partitionKeyPath)); + } + + /** + * Creates a document container. + * + * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a cosmos container response with the + * created collection. In case of failure the {@link Mono} will error. + * + * @param id the cosmos container id + * @param partitionKeyPath the partition key path + * @param throughput the throughput for the container + * @return an {@link Flux} containing the cosmos container response with the + * created container or an error. + */ + public Mono createContainer(String id, String partitionKeyPath, int throughput) { + CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); + options.offerThroughput(throughput); + return createContainer(new CosmosContainerProperties(id, partitionKeyPath), options); + } + + /** + * Creates a document container if it does not exist on the service. + *

+ * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a cosmos container response with the + * created or existing collection. In case of failure the {@link Mono} will + * error. + * + * @param containerSettings the container properties + * @return a {@link Mono} containing the cosmos container response with the + * created or existing container or an error. + */ + public Mono createContainerIfNotExists(CosmosContainerProperties containerSettings) { + CosmosContainer container = getContainer(containerSettings.id()); + return createContainerIfNotExistsInternal(containerSettings, container, null); + } + + /** + * Creates a document container if it does not exist on the service. + *

+ * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a cosmos container response with the + * created or existing collection. In case of failure the {@link Mono} will + * error. + * + * @param containerSettings the container properties + * @param throughput the throughput for the container + * @return a {@link Mono} containing the cosmos container response with the + * created or existing container or an error. + */ + public Mono createContainerIfNotExists(CosmosContainerProperties containerSettings, int throughput) { + CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); + options.offerThroughput(throughput); + CosmosContainer container = getContainer(containerSettings.id()); + return createContainerIfNotExistsInternal(containerSettings, container, options); + } + + /** + * Creates a document container if it does not exist on the service. + * + * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a cosmos container response with the + * created collection. In case of failure the {@link Mono} will error. + * + * @param id the cosmos container id + * @param partitionKeyPath the partition key path + * @return an {@link Flux} containing the cosmos container response with the + * created container or an error. + */ + public Mono createContainerIfNotExists(String id, String partitionKeyPath) { + CosmosContainer container = getContainer(id); + return createContainerIfNotExistsInternal(new CosmosContainerProperties(id, partitionKeyPath), container, null); + } + + /** + * Creates a document container if it does not exist on the service. + * + * After subscription the operation will be performed. The {@link Mono} upon + * successful completion will contain a cosmos container response with the + * created collection. In case of failure the {@link Mono} will error. + * + * @param id the cosmos container id + * @param partitionKeyPath the partition key path + * @param throughput the throughput for the container + * @return an {@link Flux} containing the cosmos container response with the + * created container or an error. + */ + public Mono createContainerIfNotExists(String id, String partitionKeyPath, int throughput) { + CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); + options.offerThroughput(throughput); + CosmosContainer container = getContainer(id); + return createContainerIfNotExistsInternal(new CosmosContainerProperties(id, partitionKeyPath), container, options); + } + + private Mono createContainerIfNotExistsInternal( + CosmosContainerProperties containerSettings, CosmosContainer container, CosmosContainerRequestOptions options) { + return container.read(options).onErrorResume(exception -> { + if (exception instanceof CosmosClientException) { + CosmosClientException cosmosClientException = (CosmosClientException) exception; + if (cosmosClientException.statusCode() == HttpConstants.StatusCodes.NOTFOUND) { + return createContainer(containerSettings, options); + } + } + return Mono.error(exception); + }); + } + + /** + * Reads all cosmos containers. + * + * After subscription the operation will be performed. The {@link Flux} will + * contain one or several feed response of the read containers. In case of + * failure the {@link Flux} will error. + * + * @param options {@link FeedOptions} + * @return a {@link Flux} containing one or several feed response pages of read + * containers or an error. + */ + public Flux> readAllContainers(FeedOptions options) { + return getDocClientWrapper().readCollections(getLink(), options) + .map(response -> BridgeInternal.createFeedResponse( + CosmosContainerProperties.getFromV2Results(response.results()), response.responseHeaders())); + } + + /** + * Reads all cosmos containers. + * + * After subscription the operation will be performed. The {@link Flux} will + * contain one or several feed response of the read containers. In case of + * failure the {@link Flux} will error. + * + * @return a {@link Flux} containing one or several feed response pages of read + * containers or an error. + */ + public Flux> readAllContainers() { + return readAllContainers(new FeedOptions()); + } + + /** + * Query for cosmos containers in a cosmos database. + * + * After subscription the operation will be performed. The {@link Flux} will + * contain one or several feed response of the obtained containers. In case of + * failure the {@link Flux} will error. + * + * @param query the query + * @return a {@link Flux} containing one or several feed response pages of the + * obtained containers or an error. + */ + public Flux> queryContainers(String query) { + return queryContainers(new SqlQuerySpec(query)); + } + + /** + * Query for cosmos containers in a cosmos database. + * + * After subscription the operation will be performed. The {@link Flux} will + * contain one or several feed response of the obtained containers. In case of + * failure the {@link Flux} will error. + * + * @param query the query. + * @param options the feed options. + * @return a {@link Flux} containing one or several feed response pages of the + * obtained containers or an error. + */ + public Flux> queryContainers(String query, FeedOptions options) { + return queryContainers(new SqlQuerySpec(query), options); + } + + /** + * Query for cosmos containers in a cosmos database. + * + * After subscription the operation will be performed. The {@link Flux} will + * contain one or several feed response of the obtained containers. In case of + * failure the {@link Flux} will error. + * + * @param querySpec the SQL query specification. + * @return a {@link Flux} containing one or several feed response pages of the + * obtained containers or an error. + */ + public Flux> queryContainers(SqlQuerySpec querySpec) { + return queryContainers(querySpec, null); + } + + /** + * Query for cosmos containers in a cosmos database. + * + * After subscription the operation will be performed. The {@link Flux} will + * contain one or several feed response of the obtained containers. In case of + * failure the {@link Flux} will error. + * + * @param querySpec the SQL query specification. + * @param options the feed options. + * @return a {@link Flux} containing one or several feed response pages of the + * obtained containers or an error. + */ + public Flux> queryContainers(SqlQuerySpec querySpec, FeedOptions options) { + return getDocClientWrapper().queryCollections(getLink(), querySpec, options) + .map(response -> BridgeInternal.createFeedResponse( + CosmosContainerProperties.getFromV2Results(response.results()), response.responseHeaders())); + } + + /** + * Gets a CosmosContainer object without making a service call + * + * @param id id of the container + * @return Cosmos Container + */ + public CosmosContainer getContainer(String id) { + return new CosmosContainer(id, this); + } + + /** User operations **/ + + /** + * Creates a user After subscription the operation will be performed. The + * {@link Mono} upon successful completion will contain a single resource + * response with the created user. In case of failure the {@link Mono} will + * error. + * + * @param settings the cosmos user properties + * @return an {@link Mono} containing the single resource response with the + * created cosmos user or an error. + */ + public Mono createUser(CosmosUserProperties settings) { + return getDocClientWrapper().createUser(this.getLink(), settings.getV2User(), null) + .map(response -> new CosmosUserResponse(response, this)).single(); + } + + + /** + * Upsert a user. Upsert will create a new user if it doesn't exist, or replace + * the existing one if it does. After subscription the operation will be + * performed. The {@link Mono} upon successful completion will contain a single + * resource response with the created user. In case of failure the {@link Mono} + * will error. + * + * @param settings the cosmos user properties + * @return an {@link Mono} containing the single resource response with the + * upserted user or an error. + */ + public Mono upsertUser(CosmosUserProperties settings) { + return getDocClientWrapper().upsertUser(this.getLink(), settings.getV2User(), null) + .map(response -> new CosmosUserResponse(response, this)).single(); + } + + /** + * Reads all cosmos users in a database. + * + * After subscription the operation will be performed. The {@link Flux} will + * contain one or several feed response of the read cosmos users. In case of + * failure the {@link Flux} will error. + * + * @return an {@link Flux} containing one or several feed response pages of the + * read cosmos users or an error. + */ + public Flux> readAllUsers() { + return readAllUsers(new FeedOptions()); + } + + /** + * Reads all cosmos users in a database. + * + * After subscription the operation will be performed. The {@link Flux} will + * contain one or several feed response of the read cosmos users. In case of + * failure the {@link Flux} will error. + * + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the + * read cosmos users or an error. + */ + public Flux> readAllUsers(FeedOptions options) { + return getDocClientWrapper().readUsers(getLink(), options).map(response -> BridgeInternal.createFeedResponse( + CosmosUserProperties.getFromV2Results(response.results()), response.responseHeaders())); + } + + /** + * Query for cosmos users in a database. + * + * After subscription the operation will be performed. The {@link Flux} will + * contain one or several feed response of the obtained users. In case of + * failure the {@link Flux} will error. + * + * @param query query as string + * @return a {@link Flux} containing one or several feed response pages of the + * obtained users or an error. + */ + public Flux> queryUsers(String query) { + return queryUsers(query, null); + } + + /** + * Query for cosmos users in a database. + * + * After subscription the operation will be performed. The {@link Flux} will + * contain one or several feed response of the obtained users. In case of + * failure the {@link Flux} will error. + * + * @param query query as string + * @param options the feed options + * @return a {@link Flux} containing one or several feed response pages of the + * obtained users or an error. + */ + public Flux> queryUsers(String query, FeedOptions options) { + return queryUsers(new SqlQuerySpec(query), options); + } + + /** + * Query for cosmos users in a database. + * + * After subscription the operation will be performed. The {@link Flux} will + * contain one or several feed response of the obtained users. In case of + * failure the {@link Flux} will error. + * + * @param querySpec the SQL query specification. + * @return a {@link Flux} containing one or several feed response pages of the + * obtained users or an error. + */ + public Flux> queryUsers(SqlQuerySpec querySpec) { + return queryUsers(querySpec, null); + } + + /** + * Query for cosmos users in a database. + * + * After subscription the operation will be performed. The {@link Flux} will + * contain one or several feed response of the obtained users. In case of + * failure the {@link Flux} will error. + * + * @param querySpec the SQL query specification. + * @param options the feed options. + * @return a {@link Flux} containing one or several feed response pages of the + * obtained users or an error. + */ + public Flux> queryUsers(SqlQuerySpec querySpec, FeedOptions options) { + return getDocClientWrapper().queryUsers(getLink(), querySpec, options) + .map(response -> BridgeInternal.createFeedResponseWithQueryMetrics( + CosmosUserProperties.getFromV2Results(response.results()), response.responseHeaders(), + response.queryMetrics())); + } + + public CosmosUser getUser(String id) { + return new CosmosUser(id, this); + } + + /** + * Gets the throughput of the database + * + * @return a {@link Mono} containing throughput or an error. + */ + public Mono readProvisionedThroughput() { + return this.read().flatMap(cosmosDatabaseResponse -> getDocClientWrapper() + .queryOffers("select * from c where c.offerResourceId = '" + + cosmosDatabaseResponse.resourceSettings().resourceId() + "'", new FeedOptions()) + .single().flatMap(offerFeedResponse -> { + if (offerFeedResponse.results().isEmpty()) { + return Mono.error(BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.BADREQUEST, + "No offers found for the resource")); + } + return getDocClientWrapper().readOffer(offerFeedResponse.results().get(0).selfLink()).single(); + }).map(cosmosContainerResponse1 -> cosmosContainerResponse1.getResource().getThroughput())); + } + + /** + * Sets throughput provisioned for a container in measurement of + * Requests-per-Unit in the Azure Cosmos service. + * + * @param requestUnitsPerSecond the cosmos container throughput, expressed in + * Request Units per second + * @return a {@link Mono} containing throughput or an error. + */ + public Mono replaceProvisionedThroughput(int requestUnitsPerSecond) { + return this.read().flatMap(cosmosDatabaseResponse -> this.getDocClientWrapper() + .queryOffers("select * from c where c.offerResourceId = '" + + cosmosDatabaseResponse.resourceSettings().resourceId() + "'", new FeedOptions()) + .single().flatMap(offerFeedResponse -> { + if (offerFeedResponse.results().isEmpty()) { + return Mono.error(BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.BADREQUEST, + "No offers found for the resource")); + } + Offer offer = offerFeedResponse.results().get(0); + offer.setThroughput(requestUnitsPerSecond); + return this.getDocClientWrapper().replaceOffer(offer).single(); + }).map(offerResourceResponse -> offerResourceResponse.getResource().getThroughput())); + } + + CosmosClient getClient() { + return client; + } + + AsyncDocumentClient getDocClientWrapper() { + return client.getDocClientWrapper(); + } + + String URIPathSegment() { + return Paths.DATABASES_PATH_SEGMENT; + } + + String parentLink() { + return StringUtils.EMPTY; + } + + String getLink() { + StringBuilder builder = new StringBuilder(); + builder.append(parentLink()); + builder.append("/"); + builder.append(URIPathSegment()); + builder.append("/"); + builder.append(id()); + return builder.toString(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosDatabaseProperties.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosDatabaseProperties.java new file mode 100644 index 0000000000000..8153fb579bb7d --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosDatabaseProperties.java @@ -0,0 +1,61 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.ResourceResponse; + +import java.util.List; +import java.util.stream.Collectors; + +/** + * Represents a CosmosDatabase in the Azure Cosmos database service. A cosmos database manages users, permissions and a set of containers + *

+ * Each Azure Cosmos DB Service is able to support multiple independent named databases, with the database being the + * logical container for data. Each Database consists of one or more cosmos containers, each of which in turn contain one or + * more cosmos items. Since databases are an an administrative resource and the Service Key will be required in + * order to access and successfully complete any action using the User APIs. + */ +public class CosmosDatabaseProperties extends Resource { + + /** + * Constructor + * @param id id of the database + */ + public CosmosDatabaseProperties(String id) { + super.id(id); + } + + CosmosDatabaseProperties(ResourceResponse response) { + super(response.getResource().toJson()); + } + + // Converting document collection to CosmosContainerProperties + CosmosDatabaseProperties(Database database){ + super(database.toJson()); + } + + static List getFromV2Results(List results){ + return results.stream().map(CosmosDatabaseProperties::new).collect(Collectors.toList()); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosDatabaseRequestOptions.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosDatabaseRequestOptions.java new file mode 100644 index 0000000000000..ca1b8cc0ecd77 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosDatabaseRequestOptions.java @@ -0,0 +1,80 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.RequestOptions; + +/** + * Encapsulates options that can be specified for a request issued to cosmos database. + */ +public class CosmosDatabaseRequestOptions{ + private Integer offerThroughput; + private AccessCondition accessCondition; + + /** + * Gets the conditions associated with the request. + * + * @return the access condition. + */ + public AccessCondition accessCondition() { + return accessCondition; + } + + /** + * Sets the conditions associated with the request. + * + * @param accessCondition the access condition. + * @return the current request options + */ + public CosmosDatabaseRequestOptions accessCondition(AccessCondition accessCondition) { + this.accessCondition = accessCondition; + return this; + } + + /** + * Gets the throughput in the form of Request Units per second when creating a cosmos database. + * + * @return the throughput value. + */ + Integer offerThroughput() { + return offerThroughput; + } + + /** + * Sets the throughput in the form of Request Units per second when creating a cosmos database. + * + * @param offerThroughput the throughput value. + * @return the current request options + */ + CosmosDatabaseRequestOptions offerThroughput(Integer offerThroughput) { + this.offerThroughput = offerThroughput; + return this; + } + + RequestOptions toRequestOptions() { + RequestOptions options = new RequestOptions(); + options.setAccessCondition(accessCondition); + options.setOfferThroughput(offerThroughput); + return options; + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosDatabaseResponse.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosDatabaseResponse.java new file mode 100644 index 0000000000000..b1b2240c289fe --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosDatabaseResponse.java @@ -0,0 +1,77 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.ResourceResponse; + +public class CosmosDatabaseResponse extends CosmosResponse{ + private CosmosDatabase database; + + CosmosDatabaseResponse(ResourceResponse response, CosmosClient client) { + super(response); + if(response.getResource() == null){ + super.resourceSettings(null); + }else{ + super.resourceSettings(new CosmosDatabaseProperties(response)); + database = new CosmosDatabase(resourceSettings().id(), client); + } + } + + /** + * Gets the CosmosDatabase object + * + * @return {@link CosmosDatabase} + */ + public CosmosDatabase database() { + return database; + } + + /** + * Gets the cosmos database properties + * + * @return the cosmos database properties + */ + public CosmosDatabaseProperties properties() { + return resourceSettings(); + } + + /** + * Gets the Max Quota. + * + * @return the database quota. + */ + public long databaseQuota(){ + return resourceResponseWrapper.getDatabaseQuota(); + } + + /** + * Gets the current Usage. + * + * @return the current database usage. + */ + public long databaseUsage(){ + return resourceResponseWrapper.getDatabaseUsage(); + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosError.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosError.java new file mode 100644 index 0000000000000..15f0351df0912 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosError.java @@ -0,0 +1,148 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; +import com.fasterxml.jackson.databind.node.ObjectNode; + +/** + * Encapsulates error related details in the Azure Cosmos DB database service. + */ +public class CosmosError extends Resource { + /** + * Initialize a new instance of the Error object. + */ + public CosmosError() { + super(); + } + + /** + * Initialize a new instance of the Error object from a JSON string. + * + * @param objectNode the {@link ObjectNode} that represents the error. + */ + CosmosError(ObjectNode objectNode) { + super(objectNode); + } + + /** + * Initialize a new instance of the Error object from a JSON string. + * + * @param jsonString the jsonString that represents the error. + */ + CosmosError(String jsonString) { + super(jsonString); + } + + /** + * Initialize a new instance of the Error object. + * + * @param errorCode the error code. + * @param message the error message. + */ + public CosmosError(String errorCode, String message) { + this(errorCode, message, null); + } + + /** + * Initialize a new instance of the Error object. + * + * @param errorCode + * the error code. + * @param message + * the error message. + * @param additionalErrorInfo + * additional error info. + */ + public CosmosError(String errorCode, String message, String additionalErrorInfo) { + super(); + this.setCode(errorCode); + this.setMessage(message); + this.setAdditionalErrorInfo(additionalErrorInfo); + } + + /** + * Gets the error code. + * + * @return the error code. + */ + public String getCode() { + return super.getString(Constants.Properties.CODE); + } + + /** + * Sets the error code. + * + * @param code the error code. + */ + private void setCode(String code) { + super.set(Constants.Properties.CODE, code); + } + + /** + * Gets the error message. + * + * @return the error message. + */ + public String getMessage() { + return super.getString(Constants.Properties.MESSAGE); + } + + /** + * Sets the error message. + * + * @param message the error message. + */ + private void setMessage(String message) { + super.set(Constants.Properties.MESSAGE, message); + } + + /** + * Gets the error details. + * + * @return the error details. + */ + public String getErrorDetails() { + return super.getString(Constants.Properties.ERROR_DETAILS); + } + + /** + * Sets the partitioned query execution info. + * + * @param additionalErrorInfo + * the partitioned query execution info. + */ + private void setAdditionalErrorInfo(String additionalErrorInfo) { + super.set(Constants.Properties.ADDITIONAL_ERROR_INFO, additionalErrorInfo); + } + + /** + * Gets the partitioned query execution info. + * + * @return the partitioned query execution info. + */ + public String getPartitionedQueryExecutionInfo() { + return super.getString(Constants.Properties.ADDITIONAL_ERROR_INFO); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosItem.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosItem.java new file mode 100644 index 0000000000000..74eeaa27a576c --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosItem.java @@ -0,0 +1,186 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.internal.Paths; +import com.azure.data.cosmos.internal.RequestOptions; +import reactor.core.publisher.Mono; + +public class CosmosItem { + private Object partitionKey; + private CosmosContainer container; + private String id; + + CosmosItem(String id, Object partitionKey, CosmosContainer container) { + this.id = id; + this.partitionKey = partitionKey; + this.container = container; + } + + /** + * Get the id of the {@link CosmosItem} + * @return the id of the {@link CosmosItem} + */ + public String id() { + return id; + } + + /** + * Set the id of the {@link CosmosItem} + * @param id the id of the {@link CosmosItem} + * @return the same {@link CosmosItem} that had the id set + */ + CosmosItem id(String id) { + this.id = id; + return this; + } + + /** + * Reads an item. + * + * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a cosmos item response with the read item + * In case of failure the {@link Mono} will error. + * + * @return an {@link Mono} containing the cosmos item response with the read item or an error + */ + public Mono read() { + return read(new CosmosItemRequestOptions(partitionKey)); + } + + /** + * Reads an item. + * + * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a cosmos item response with the read item + * In case of failure the {@link Mono} will error. + * + * @param options the request comosItemRequestOptions + * @return an {@link Mono} containing the cosmos item response with the read item or an error + */ + public Mono read(CosmosItemRequestOptions options) { + if (options == null) { + options = new CosmosItemRequestOptions(); + } + RequestOptions requestOptions = options.toRequestOptions(); + return container.getDatabase().getDocClientWrapper() + .readDocument(getLink(), requestOptions) + .map(response -> new CosmosItemResponse(response, requestOptions.getPartitionKey(), container)) + .single(); + } + + /** + * Replaces an item with the passed in item. + * + * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single cosmos item response with the replaced item. + * In case of failure the {@link Mono} will error. + * + * @param item the item to replace (containing the document id). + * @return an {@link Mono} containing the cosmos item resource response with the replaced item or an error. + */ + public Mono replace(Object item){ + return replace(item, new CosmosItemRequestOptions(partitionKey)); + } + + /** + * Replaces an item with the passed in item. + * + * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single cosmos item response with the replaced item. + * In case of failure the {@link Mono} will error. + * + * @param item the item to replace (containing the document id). + * @param options the request comosItemRequestOptions + * @return an {@link Mono} containing the cosmos item resource response with the replaced item or an error. + */ + public Mono replace(Object item, CosmosItemRequestOptions options){ + Document doc = CosmosItemProperties.fromObject(item); + if (options == null) { + options = new CosmosItemRequestOptions(); + } + RequestOptions requestOptions = options.toRequestOptions(); + return container.getDatabase() + .getDocClientWrapper() + .replaceDocument(getLink(), doc, requestOptions) + .map(response -> new CosmosItemResponse(response, requestOptions.getPartitionKey(), container)) + .single(); + } + + /** + * Deletes the item. + * + * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single cosmos item response with the replaced item. + * In case of failure the {@link Mono} will error. + * @return an {@link Mono} containing the cosmos item resource response. + */ + public Mono delete() { + return delete(new CosmosItemRequestOptions(partitionKey)); + } + + /** + * Deletes the item. + * + * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single cosmos item response with the replaced item. + * In case of failure the {@link Mono} will error. + * + * @param options the request options + * @return an {@link Mono} containing the cosmos item resource response. + */ + public Mono delete(CosmosItemRequestOptions options){ + if (options == null) { + options = new CosmosItemRequestOptions(); + } + RequestOptions requestOptions = options.toRequestOptions(); + return container.getDatabase() + .getDocClientWrapper() + .deleteDocument(getLink(), requestOptions) + .map(response -> new CosmosItemResponse(response, requestOptions.getPartitionKey(), container)) + .single(); + } + + void setContainer(CosmosContainer container) { + this.container = container; + } + + String URIPathSegment() { + return Paths.DOCUMENTS_PATH_SEGMENT; + } + + String parentLink() { + return this.container.getLink(); + } + + String getLink() { + StringBuilder builder = new StringBuilder(); + builder.append(parentLink()); + builder.append("/"); + builder.append(URIPathSegment()); + builder.append("/"); + builder.append(id()); + return builder.toString(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosItemProperties.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosItemProperties.java new file mode 100644 index 0000000000000..c610554cf3b58 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosItemProperties.java @@ -0,0 +1,92 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.internal.Utils; +import com.fasterxml.jackson.databind.ObjectMapper; + +import java.io.IOException; +import java.util.List; +import java.util.stream.Collectors; + +public class CosmosItemProperties extends Resource { + + private static final ObjectMapper mapper = Utils.getSimpleObjectMapper(); + + /** + * Initialize an empty CosmosItemProperties object. + */ + public CosmosItemProperties() { + } + + /** + * Sets the id + * + * @param id the name of the resource. + * @return the cosmos item properties with id set + */ + public CosmosItemProperties id(String id) { + super.id(id); + return this; + } + + /** + * Initialize a CosmosItemProperties object from json string. + * + * @param jsonString the json string that represents the document object. + */ + public CosmosItemProperties(String jsonString) { + super(jsonString); + } + + /** + * fromObject returns Document for compatibility with V2 sdk + * + * @param cosmosItem + * @return + */ + static Document fromObject(Object cosmosItem) { + Document typedItem; + if (cosmosItem instanceof CosmosItemProperties) { + typedItem = new Document(((CosmosItemProperties) cosmosItem).toJson()); + } else { + try { + return new Document(CosmosItemProperties.mapper.writeValueAsString(cosmosItem)); + } catch (IOException e) { + throw new IllegalArgumentException("Can't serialize the object into the json string", e); + } + } + return typedItem; + } + + static List getFromV2Results(List results) { + return results.stream().map(document -> new CosmosItemProperties(document.toJson())) + .collect(Collectors.toList()); + } + + public T getObject(Class klass) throws IOException { + return (T) mapper.readValue(this.toJson(), klass); + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosItemRequestOptions.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosItemRequestOptions.java new file mode 100644 index 0000000000000..9d2363e0aae16 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosItemRequestOptions.java @@ -0,0 +1,212 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.RequestOptions; + +import java.util.List; + +/** + * Encapsulates options that can be specified for a request issued to cosmos Item. + */ +public class CosmosItemRequestOptions { + private ConsistencyLevel consistencyLevel; + private IndexingDirective indexingDirective; + private List preTriggerInclude; + private List postTriggerInclude; + private String sessionToken; + private PartitionKey partitionKey; + private AccessCondition accessCondition; + + /** + * Constructor + */ + public CosmosItemRequestOptions(){ + super(); + } + + /** + * Constructor + * @param partitionKey the partition key + */ + public CosmosItemRequestOptions(Object partitionKey){ + super(); + if (partitionKey instanceof PartitionKey) { + partitionKey((PartitionKey) partitionKey); + } else { + partitionKey(new PartitionKey(partitionKey)); + } + } + + /** + * Gets the conditions associated with the request. + * + * @return the access condition. + */ + public AccessCondition accessCondition() { + return accessCondition; + } + + /** + * Sets the conditions associated with the request. + * + * @param accessCondition the access condition. + * @return the current request options + */ + public CosmosItemRequestOptions accessCondition(AccessCondition accessCondition) { + this.accessCondition = accessCondition; + return this; + } + + /** + * Gets the consistency level required for the request. + * + * @return the consistency level. + */ + public ConsistencyLevel consistencyLevel() { + return consistencyLevel; + } + + /** + * Sets the consistency level required for the request. + * + * @param consistencyLevel the consistency level. + * @return the CosmosItemRequestOptions. + */ + public CosmosItemRequestOptions consistencyLevel(ConsistencyLevel consistencyLevel) { + this.consistencyLevel = consistencyLevel; + return this; + } + + /** + * Gets the indexing directive (index, do not index etc). + * + * @return the indexing directive. + */ + public IndexingDirective indexingDirective() { + return indexingDirective; + } + + /** + * Sets the indexing directive (index, do not index etc). + * + * @param indexingDirective the indexing directive. + * @return the CosmosItemRequestOptions. + */ + public CosmosItemRequestOptions indexingDirective(IndexingDirective indexingDirective) { + this.indexingDirective = indexingDirective; + return this; + } + + /** + * Gets the triggers to be invoked before the operation. + * + * @return the triggers to be invoked before the operation. + */ + public List preTriggerInclude() { + return preTriggerInclude; + } + + /** + * Sets the triggers to be invoked before the operation. + * + * @param preTriggerInclude the triggers to be invoked before the operation. + * @return the CosmosItemRequestOptions. + */ + public CosmosItemRequestOptions preTriggerInclude(List preTriggerInclude) { + this.preTriggerInclude = preTriggerInclude; + return this; + } + + /** + * Gets the triggers to be invoked after the operation. + * + * @return the triggers to be invoked after the operation. + */ + public List postTriggerInclude() { + return postTriggerInclude; + } + + /** + * Sets the triggers to be invoked after the operation. + * + * @param postTriggerInclude the triggers to be invoked after the operation. + * @return the CosmosItemRequestOptions. + */ + public CosmosItemRequestOptions postTriggerInclude(List postTriggerInclude) { + this.postTriggerInclude = postTriggerInclude; + return this; + } + + /** + * Gets the token for use with session consistency. + * + * @return the session token. + */ + public String sessionToken() { + return sessionToken; + } + + /** + * Sets the token for use with session consistency. + * + * @param sessionToken the session token. + * @return the CosmosItemRequestOptions. + */ + public CosmosItemRequestOptions sessionToken(String sessionToken) { + this.sessionToken = sessionToken; + return this; + } + + /** + * Sets the partition key + * @param partitionKey the partition key + * @return the CosmosItemRequestOptions. + */ + public CosmosItemRequestOptions partitionKey(PartitionKey partitionKey) { + this.partitionKey = partitionKey; + return this; + } + + /** + * Gets the partition key + * @return the partition key + */ + public PartitionKey partitionKey() { + return partitionKey; + } + + RequestOptions toRequestOptions() { + //TODO: Should we set any default values instead of nulls? + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setAccessCondition(accessCondition); + requestOptions.setAccessCondition(accessCondition()); + requestOptions.setConsistencyLevel(consistencyLevel()); + requestOptions.setIndexingDirective(indexingDirective); + requestOptions.setPreTriggerInclude(preTriggerInclude); + requestOptions.setPostTriggerInclude(postTriggerInclude); + requestOptions.setSessionToken(sessionToken); + requestOptions.setPartitionKey(partitionKey); + return requestOptions; + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosItemResponse.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosItemResponse.java new file mode 100644 index 0000000000000..f25d0d2361789 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosItemResponse.java @@ -0,0 +1,56 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.internal.ResourceResponse; + +public class CosmosItemResponse extends CosmosResponse{ + private CosmosItem itemClient; + + CosmosItemResponse(ResourceResponse response, PartitionKey partitionKey, CosmosContainer container) { + super(response); + if(response.getResource() == null){ + super.resourceSettings(null); + }else{ + super.resourceSettings(new CosmosItemProperties(response.getResource().toJson())); + itemClient = new CosmosItem(response.getResource().id(),partitionKey, container); + } + } + + /** + * Gets the itemSettings + * @return the itemSettings + */ + public CosmosItemProperties properties() { + return resourceSettings(); + } + + /** + * Gets the CosmosItem + * @return the cosmos item + */ + public CosmosItem item() { + return itemClient; + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosPermission.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosPermission.java new file mode 100644 index 0000000000000..590c95cc23b5d --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosPermission.java @@ -0,0 +1,134 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Paths; +import com.azure.data.cosmos.internal.RequestOptions; +import reactor.core.publisher.Mono; + +public class CosmosPermission { + + private final CosmosUser cosmosUser; + private String id; + + CosmosPermission(String id, CosmosUser user){ + this.id = id; + this.cosmosUser = user; + } + + /** + * Get the id of the {@link CosmosPermission} + * @return the id of the {@link CosmosPermission} + */ + public String id() { + return id; + } + + /** + * Set the id of the {@link CosmosPermission} + * @param id the id of the {@link CosmosPermission} + * @return the same {@link CosmosPermission} that had the id set + */ + CosmosPermission id(String id) { + this.id = id; + return this; + } + + /** + * Reads a permission. + *

+ * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response with the read permission. + * In case of failure the {@link Mono} will error. + * + * @param options the request options. + * @return an {@link Mono} containing the single resource response with the read permission or an error. + */ + public Mono read(RequestOptions options) { + + return cosmosUser.getDatabase() + .getDocClientWrapper() + .readPermission(getLink(),options) + .map(response -> new CosmosPermissionResponse(response, cosmosUser)) + .single(); + } + + /** + * Replaces a permission. + *

+ * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response with the replaced permission. + * In case of failure the {@link Mono} will error. + * + * @param permissionSettings the permission properties to use. + * @param options the request options. + * @return an {@link Mono} containing the single resource response with the replaced permission or an error. + */ + public Mono replace(CosmosPermissionProperties permissionSettings, RequestOptions options) { + + return cosmosUser.getDatabase() + .getDocClientWrapper() + .replacePermission(permissionSettings.getV2Permissions(), options) + .map(response -> new CosmosPermissionResponse(response, cosmosUser)) + .single(); + } + + /** + * Deletes a permission. + *

+ * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response for the deleted permission. + * In case of failure the {@link Mono} will error. + * + * @param options the request options. + * @return an {@link Mono} containing the single resource response for the deleted permission or an error. + */ + public Mono delete(CosmosPermissionRequestOptions options) { + if(options == null){ + options = new CosmosPermissionRequestOptions(); + } + return cosmosUser.getDatabase() + .getDocClientWrapper() + .deletePermission(getLink(), options.toRequestOptions()) + .map(response -> new CosmosPermissionResponse(response, cosmosUser)) + .single(); + } + + String URIPathSegment() { + return Paths.PERMISSIONS_PATH_SEGMENT; + } + + String parentLink() { + return cosmosUser.getLink(); + } + + String getLink() { + StringBuilder builder = new StringBuilder(); + builder.append(parentLink()); + builder.append("/"); + builder.append(URIPathSegment()); + builder.append("/"); + builder.append(id()); + return builder.toString(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosPermissionProperties.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosPermissionProperties.java new file mode 100644 index 0000000000000..dce8f70a96799 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosPermissionProperties.java @@ -0,0 +1,138 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; +import com.azure.data.cosmos.internal.Permission; +import org.apache.commons.lang3.StringUtils; + +import java.util.List; +import java.util.stream.Collectors; + +public class CosmosPermissionProperties extends Resource { + + public static List getFromV2Results(List results) { + return results.stream().map(permission -> new CosmosPermissionProperties(permission.toJson())).collect(Collectors.toList()); + } + + /** + * Initialize a permission object. + */ + public CosmosPermissionProperties() { + super(); + } + + /** + * Sets the id + * + * @param id the name of the resource. + * @return the current {@link CosmosPermissionProperties} object + */ + public CosmosPermissionProperties id(String id) { + super.id(id); + return this; + } + + /** + * Initialize a permission object from json string. + * + * @param jsonString the json string that represents the permission. + */ + CosmosPermissionProperties(String jsonString) { + super(jsonString); + } + + /** + * Gets the self-link of resource to which the permission applies. + * + * @return the resource link. + */ + public String resourceLink() { + return super.getString(Constants.Properties.RESOURCE_LINK); + } + + /** + * Sets the self-link of resource to which the permission applies. + * + * @param resourceLink the resource link. + * @return the current {@link CosmosPermissionProperties} object + */ + public CosmosPermissionProperties resourceLink(String resourceLink) { + super.set(Constants.Properties.RESOURCE_LINK, resourceLink); + return this; + } + + /** + * Gets the permission mode. + * + * @return the permission mode. + */ + public PermissionMode permissionMode() { + String value = super.getString(Constants.Properties.PERMISSION_MODE); + return PermissionMode.valueOf(StringUtils.upperCase(value)); + } + + /** + * Sets the permission mode. + * + * @param permissionMode the permission mode. + * @return the current {@link CosmosPermissionProperties} object + */ + public CosmosPermissionProperties permissionMode(PermissionMode permissionMode) { + this.set(Constants.Properties.PERMISSION_MODE, + permissionMode.toString().toLowerCase()); + return this; + } + + //TODO: need value from JsonSerializable +// /** +// * Gets the resource partition key associated with this permission object. +// * +// * @return the partition key. +// */ +// public PartitionKey getResourcePartitionKey() { +// PartitionKey key = null; +// Object value = super.get(Constants.Properties.RESOURCE_PARTITION_KEY); +// if (value != null) { +// ArrayNode arrayValue = (ArrayNode) value; +// key = new PartitionKey(value(arrayValue.get(0))); +// } +// +// return key; +// } + + /** + * Sets the resource partition key associated with this permission object. + * + * @param partitionKey the partition key. + * @return the current {@link CosmosPermissionProperties} object + */ + public CosmosPermissionProperties resourcePartitionKey(PartitionKey partitionKey) { + super.set(Constants.Properties.RESOURCE_PARTITION_KEY, partitionKey.getInternalPartitionKey().toJson()); + return this; + } + + Permission getV2Permissions() { + return new Permission(this.toJson()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosPermissionRequestOptions.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosPermissionRequestOptions.java new file mode 100644 index 0000000000000..d3cb39f886523 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosPermissionRequestOptions.java @@ -0,0 +1,60 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.RequestOptions; + +/** + * Contains the request options of CosmosPermission + */ +public class CosmosPermissionRequestOptions { + //TODO: Need to add respective options + private AccessCondition accessCondition; + + /** + * Gets the conditions associated with the request. + * + * @return the access condition. + */ + public AccessCondition accessCondition() { + return accessCondition; + } + + /** + * Sets the conditions associated with the request. + * + * @param accessCondition the access condition. + * @return the current request options + */ + public CosmosPermissionRequestOptions accessCondition(AccessCondition accessCondition) { + this.accessCondition = accessCondition; + return this; + } + + RequestOptions toRequestOptions() { + //TODO: Should we set any default values instead of nulls? + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setAccessCondition(accessCondition); + return requestOptions; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosPermissionResponse.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosPermissionResponse.java new file mode 100644 index 0000000000000..8227ac9419c10 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosPermissionResponse.java @@ -0,0 +1,58 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Permission; +import com.azure.data.cosmos.internal.ResourceResponse; + +public class CosmosPermissionResponse extends CosmosResponse { + CosmosPermission permissionClient; + + CosmosPermissionResponse(ResourceResponse response, CosmosUser cosmosUser) { + super(response); + if(response.getResource() == null){ + super.resourceSettings(null); + }else{ + super.resourceSettings(new CosmosPermissionProperties(response.getResource().toJson())); + permissionClient = new CosmosPermission(response.getResource().id(), cosmosUser); + } + } + + /** + * Get the permission properties + * + * @return the permission properties + */ + public CosmosPermissionProperties properties() { + return super.resourceSettings(); + } + + /** + * Gets the CosmosPermission + * + * @return the cosmos permission + */ + public CosmosPermission permission() { + return permissionClient; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosResourceType.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosResourceType.java new file mode 100644 index 0000000000000..dce4aa8475c12 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosResourceType.java @@ -0,0 +1,54 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +/** + * Resource types in the Azure Cosmos DB database service. + */ +public enum CosmosResourceType { + + System(-100), + Attachment(3), + DocumentCollection(1), + Conflict(107), + Database(0), + Document(2), + Index(104), + Offer(113), + Permission(5), + StoredProcedure(109), + Trigger(110), + User(4), + UserDefinedFunction(111); + + final private int value; + + CosmosResourceType(int value) { + this.value = value; + } + + public int value() { + return this.value; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosResponse.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosResponse.java new file mode 100644 index 0000000000000..fe227d5bd771b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosResponse.java @@ -0,0 +1,137 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.ResourceResponse; +import com.azure.data.cosmos.internal.StoredProcedureResponse; + +import java.time.Duration; +import java.util.Map; + +public class CosmosResponse { + private T resourceSettings; + ResourceResponse resourceResponseWrapper; + + CosmosResponse(ResourceResponse resourceResponse){ + this.resourceResponseWrapper = resourceResponse; + } + + CosmosResponse(T resourceSettings){ + this.resourceSettings = resourceSettings; + } + + // Only used in CosmosStoredProcedureResponse compatibility with StoredProcedureResponse + CosmosResponse(StoredProcedureResponse response) { + } + + T resourceSettings() { + return resourceSettings; + } + + CosmosResponse resourceSettings(T resourceSettings){ + this.resourceSettings = resourceSettings; + return this; + } + + /** + * Gets the maximum size limit for this entity (in megabytes (MB) for server resources and in count for master + * resources). + * + * @return the max resource quota. + */ + public String maxResourceQuota() { + return resourceResponseWrapper.getMaxResourceQuota(); + } + + /** + * Gets the current size of this entity (in megabytes (MB) for server resources and in count for master resources) + * + * @return the current resource quota usage. + */ + public String currentResourceQuotaUsage() { + return resourceResponseWrapper.getCurrentResourceQuotaUsage(); + } + + /** + * Gets the Activity ID for the request. + * + * @return the activity id. + */ + public String activityId() { + return resourceResponseWrapper.getActivityId(); + } + + /** + * Gets the number of index paths (terms) generated by the operation. + * + * @return the request charge. + */ + public double requestCharge() { + return resourceResponseWrapper.getRequestCharge(); + } + + /** + * Gets the HTTP status code associated with the response. + * + * @return the status code. + */ + public int statusCode() { + return resourceResponseWrapper.getStatusCode(); + } + + /** + * Gets the token used for managing client's consistency requirements. + * + * @return the session token. + */ + public String sessionToken(){ + return resourceResponseWrapper.getSessionToken(); + } + + /** + * Gets the headers associated with the response. + * + * @return the response headers. + */ + public Map responseHeaders() { + return resourceResponseWrapper.getResponseHeaders(); + } + + /** + * Gets the diagnostics information for the current request to Azure Cosmos DB service. + * + * @return diagnostics information for the current request to Azure Cosmos DB service. + */ + public CosmosResponseDiagnostics cosmosResponseDiagnosticsString() { + return resourceResponseWrapper.getCosmosResponseDiagnostics(); + } + + /** + * Gets the end-to-end request latency for the current request to Azure Cosmos DB service. + * + * @return end-to-end request latency for the current request to Azure Cosmos DB service. + */ + public Duration requestLatency() { + return resourceResponseWrapper.getRequestLatency(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosResponseDiagnostics.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosResponseDiagnostics.java new file mode 100644 index 0000000000000..3346298fba3a0 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosResponseDiagnostics.java @@ -0,0 +1,63 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import java.time.Duration; + +/** + * This class represents response diagnostic statistics associated with a request to Azure Cosmos DB + */ +public class CosmosResponseDiagnostics { + + private ClientSideRequestStatistics clientSideRequestStatistics; + + CosmosResponseDiagnostics() { + this.clientSideRequestStatistics = new ClientSideRequestStatistics(); + } + + ClientSideRequestStatistics clientSideRequestStatistics() { + return clientSideRequestStatistics; + } + + CosmosResponseDiagnostics clientSideRequestStatistics(ClientSideRequestStatistics clientSideRequestStatistics) { + this.clientSideRequestStatistics = clientSideRequestStatistics; + return this; + } + + /** + * Retrieves Response Diagnostic String + * @return Response Diagnostic String + */ + @Override + public String toString() { + return this.clientSideRequestStatistics.toString(); + } + + /** + * Retrieves latency related to the completion of the request + * @return request completion latency + */ + public Duration requestLatency() { + return this.clientSideRequestStatistics.getRequestLatency(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosScripts.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosScripts.java new file mode 100644 index 0000000000000..3891d36e843da --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosScripts.java @@ -0,0 +1,310 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.StoredProcedure; +import com.azure.data.cosmos.internal.Trigger; +import com.azure.data.cosmos.internal.UserDefinedFunction; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +public class CosmosScripts { + private final CosmosContainer container; + private final CosmosDatabase database; + + CosmosScripts(CosmosContainer container) { + this.container = container; + this.database = container.getDatabase(); + } + /* CosmosStoredProcedure operations */ + + /** + * Creates a cosmos stored procedure. + * + * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single cosmos stored procedure response with the + * created cosmos stored procedure. + * In case of failure the {@link Mono} will error. + * + * @param properties the cosmos stored procedure properties. + * @return an {@link Mono} containing the single cosmos stored procedure resource response or an error. + */ + public Mono createStoredProcedure(CosmosStoredProcedureProperties properties){ + return this.createStoredProcedure(properties, new CosmosStoredProcedureRequestOptions()); + } + + /** + * Creates a cosmos stored procedure. + * + * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single cosmos stored procedure response with the + * created cosmos stored procedure. + * In case of failure the {@link Mono} will error. + * + * @param properties the cosmos stored procedure properties. + * @param options the stored procedure request options. + * @return an {@link Mono} containing the single cosmos stored procedure resource response or an error. + */ + public Mono createStoredProcedure(CosmosStoredProcedureProperties properties, + CosmosStoredProcedureRequestOptions options){ + if(options == null){ + options = new CosmosStoredProcedureRequestOptions(); + } + StoredProcedure sProc = new StoredProcedure(); + sProc.id(properties.id()); + sProc.setBody(properties.body()); + return database.getDocClientWrapper() + .createStoredProcedure(container.getLink(), sProc, options.toRequestOptions()) + .map(response -> new CosmosStoredProcedureResponse(response, this.container)) + .single(); + } + + /** + * Reads all cosmos stored procedures in a container. + * + * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the read cosmos stored procedure properties. + * In case of failure the {@link Flux} will error. + * + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the read cosmos stored procedures + * properties or an error. + */ + public Flux> readAllStoredProcedures(FeedOptions options){ + return database.getDocClientWrapper() + .readStoredProcedures(container.getLink(), options) + .map(response -> BridgeInternal.createFeedResponse(CosmosStoredProcedureProperties.getFromV2Results(response.results()), + response.responseHeaders())); + } + + /** + * Query for stored procedures in a container. + * + * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained stored procedures. + * In case of failure the {@link Flux} will error. + * + * @param query the the query. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained stored procedures or + * an error. + */ + public Flux> queryStoredProcedures(String query, + FeedOptions options){ + return queryStoredProcedures(new SqlQuerySpec(query), options); + } + + /** + * Query for stored procedures in a container. + * + * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained stored procedures. + * In case of failure the {@link Flux} will error. + * + * @param querySpec the SQL query specification. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained stored procedures or + * an error. + */ + public Flux> queryStoredProcedures(SqlQuerySpec querySpec, + FeedOptions options){ + return database.getDocClientWrapper() + .queryStoredProcedures(container.getLink(), querySpec,options) + .map(response -> BridgeInternal.createFeedResponse( CosmosStoredProcedureProperties.getFromV2Results(response.results()), + response.responseHeaders())); + } + + /** + * Gets a CosmosStoredProcedure object without making a service call + * @param id id of the stored procedure + * @return a cosmos stored procedure + */ + public CosmosStoredProcedure getStoredProcedure(String id){ + return new CosmosStoredProcedure(id, this.container); + } + + + /* UDF Operations */ + + /** + * Creates a cosmos user defined function. + * + * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single cosmos user defined function response. + * In case of failure the {@link Mono} will error. + * + * @param properties the cosmos user defined function properties + * @return an {@link Mono} containing the single resource response with the created user defined function or an error. + */ + public Mono createUserDefinedFunction(CosmosUserDefinedFunctionProperties properties){ + UserDefinedFunction udf = new UserDefinedFunction(); + udf.id(properties.id()); + udf.setBody(properties.body()); + + return database.getDocClientWrapper() + .createUserDefinedFunction(container.getLink(), udf, null) + .map(response -> new CosmosUserDefinedFunctionResponse(response, this.container)).single(); + } + + /** + * Reads all cosmos user defined functions in the container + * + * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the read user defined functions. + * In case of failure the {@link Flux} will error. + * + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the read user defined functions or an error. + */ + public Flux> readAllUserDefinedFunctions(FeedOptions options){ + return database.getDocClientWrapper() + .readUserDefinedFunctions(container.getLink(), options) + .map(response -> BridgeInternal.createFeedResponse(CosmosUserDefinedFunctionProperties.getFromV2Results(response.results()), + response.responseHeaders())); + } + + /** + * Query for user defined functions in the container. + * + * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained user defined functions. + * In case of failure the {@link Flux} will error. + * + * @param query the query. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained user defined functions or an error. + */ + public Flux> queryUserDefinedFunctions(String query, + FeedOptions options){ + return queryUserDefinedFunctions(new SqlQuerySpec(query), options); + } + + /** + * Query for user defined functions in the container. + * + * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained user defined functions. + * In case of failure the {@link Flux} will error. + * + * @param querySpec the SQL query specification. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained user defined functions or an error. + */ + public Flux> queryUserDefinedFunctions(SqlQuerySpec querySpec, + FeedOptions options){ + return database.getDocClientWrapper() + .queryUserDefinedFunctions(container.getLink(),querySpec, options) + .map(response -> BridgeInternal.createFeedResponse(CosmosUserDefinedFunctionProperties.getFromV2Results(response.results()), + response.responseHeaders())); + } + + /** + * Gets a CosmosUserDefinedFunction object without making a service call + * @param id id of the user defined function + * @return a cosmos user defined function + */ + public CosmosUserDefinedFunction getUserDefinedFunction(String id){ + return new CosmosUserDefinedFunction(id, this.container); + } + + /* Trigger Operations */ + /** + * Creates a Cosmos trigger. + * + * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a cosmos trigger response + * In case of failure the {@link Mono} will error. + * + * @param properties the cosmos trigger properties + * @return an {@link Mono} containing the single resource response with the created trigger or an error. + */ + public Mono createTrigger(CosmosTriggerProperties properties){ + Trigger trigger = new Trigger(properties.toJson()); + + return database.getDocClientWrapper() + .createTrigger(container.getLink(), trigger, null) + .map(response -> new CosmosTriggerResponse(response, this.container)) + .single(); + } + + /** + * Reads all triggers in a container + * + * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the read cosmos trigger properties. + * In case of failure the {@link Flux} will error. + * + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the read cosmos rigger properties or an error. + */ + public Flux> readAllTriggers(FeedOptions options){ + return database.getDocClientWrapper() + .readTriggers(container.getLink(), options) + .map(response -> BridgeInternal.createFeedResponse(CosmosTriggerProperties.getFromV2Results(response.results()), + response.responseHeaders())); + } + + /** + * Query for triggers in the container + * + * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained triggers. + * In case of failure the {@link Flux} will error. + * + * @param query the query. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained triggers or an error. + */ + public Flux> queryTriggers(String query, FeedOptions options){ + return queryTriggers(new SqlQuerySpec(query), options); + } + + /** + * Query for triggers in the container + * + * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained triggers. + * In case of failure the {@link Flux} will error. + * + * @param querySpec the SQL query specification. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained triggers or an error. + */ + public Flux> queryTriggers(SqlQuerySpec querySpec, + FeedOptions options){ + return database.getDocClientWrapper() + .queryTriggers(container.getLink(), querySpec, options) + .map(response -> BridgeInternal.createFeedResponse(CosmosTriggerProperties.getFromV2Results(response.results()), + response.responseHeaders())); + } + + /** + * Gets a CosmosTrigger object without making a service call + * @param id id of the cosmos trigger + * @return a cosmos trigger + */ + public CosmosTrigger getTrigger(String id){ + return new CosmosTrigger(id, this.container); + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosStoredProcedure.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosStoredProcedure.java new file mode 100644 index 0000000000000..14a11f121eb32 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosStoredProcedure.java @@ -0,0 +1,200 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Paths; +import com.azure.data.cosmos.internal.StoredProcedure; +import reactor.core.publisher.Mono; + +public class CosmosStoredProcedure { + + private CosmosContainer cosmosContainer; + private String id; + + CosmosStoredProcedure(String id, CosmosContainer cosmosContainer) { + this.id = id; + this.cosmosContainer = cosmosContainer; + } + + /** + * Get the id of the {@link CosmosStoredProcedure} + * @return the id of the {@link CosmosStoredProcedure} + */ + public String id() { + return id; + } + + /** + * Set the id of the {@link CosmosStoredProcedure} + * @param id the id of the {@link CosmosStoredProcedure} + * @return the same {@link CosmosStoredProcedure} that had the id set + */ + CosmosStoredProcedure id(String id) { + this.id = id; + return this; + } + + /** + * Read a stored procedure by the stored procedure link. + *

+ * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response with the read stored + * procedure. + * In case of failure the {@link Mono} will error. + * + * @return an {@link Mono} containing the single resource response with the read stored procedure or an error. + */ + public Mono read() { + return read(null); + } + + /** + * Read a stored procedure by the stored procedure link. + *

+ * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response with the read stored + * procedure. + * In case of failure the {@link Mono} will error. + * + * @param options the request options. + * @return an {@link Mono} containing the single resource response with the read stored procedure or an error. + */ + public Mono read(CosmosStoredProcedureRequestOptions options) { + if(options == null) { + options = new CosmosStoredProcedureRequestOptions(); + } + return cosmosContainer.getDatabase().getDocClientWrapper().readStoredProcedure(getLink(), options.toRequestOptions()) + .map(response -> new CosmosStoredProcedureResponse(response, cosmosContainer)).single(); + } + + /** + * Deletes a stored procedure by the stored procedure link. + *

+ * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response for the deleted stored procedure. + * In case of failure the {@link Mono} will error. + * + * @return an {@link Mono} containing the single resource response for the deleted stored procedure or an error. + */ + public Mono delete() { + return delete(null); + } + + /** + * Deletes a stored procedure by the stored procedure link. + *

+ * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response for the deleted stored procedure. + * In case of failure the {@link Mono} will error. + * + * @param options the request options. + * @return an {@link Mono} containing the single resource response for the deleted stored procedure or an error. + */ + public Mono delete(CosmosStoredProcedureRequestOptions options) { + if(options == null) { + options = new CosmosStoredProcedureRequestOptions(); + } + return cosmosContainer.getDatabase() + .getDocClientWrapper() + .deleteStoredProcedure(getLink(), options.toRequestOptions()) + .map(response -> new CosmosResponse(response.getResource())) + .single(); + } + + /** + * Executes a stored procedure by the stored procedure link. + *

+ * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response with the stored procedure response. + * In case of failure the {@link Mono} will error. + * + * @param procedureParams the array of procedure parameter values. + * @param options the request options. + * @return an {@link Mono} containing the single resource response with the stored procedure response or an error. + */ + public Mono execute(Object[] procedureParams, CosmosStoredProcedureRequestOptions options) { + if(options == null) { + options = new CosmosStoredProcedureRequestOptions(); + } + return cosmosContainer.getDatabase() + .getDocClientWrapper() + .executeStoredProcedure(getLink(), options.toRequestOptions(), procedureParams) + .map(response -> new CosmosStoredProcedureResponse(response, cosmosContainer)) + .single(); + } + + /** + * Replaces a stored procedure. + *

+ * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response with the replaced stored procedure. + * In case of failure the {@link Mono} will error. + * + * @param storedProcedureSettings the stored procedure properties + * @return an {@link Mono} containing the single resource response with the replaced stored procedure or an error. + */ + public Mono replace(CosmosStoredProcedureProperties storedProcedureSettings) { + return replace(storedProcedureSettings, null); + } + + /** + * Replaces a stored procedure. + *

+ * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response with the replaced stored procedure. + * In case of failure the {@link Mono} will error. + * + * @param storedProcedureSettings the stored procedure properties. + * @param options the request options. + * @return an {@link Mono} containing the single resource response with the replaced stored procedure or an error. + */ + public Mono replace(CosmosStoredProcedureProperties storedProcedureSettings, + CosmosStoredProcedureRequestOptions options) { + if(options == null) { + options = new CosmosStoredProcedureRequestOptions(); + } + return cosmosContainer.getDatabase() + .getDocClientWrapper() + .replaceStoredProcedure(new StoredProcedure(storedProcedureSettings.toJson()), options.toRequestOptions()) + .map(response -> new CosmosStoredProcedureResponse(response, cosmosContainer)) + .single(); + } + + String URIPathSegment() { + return Paths.STORED_PROCEDURES_PATH_SEGMENT; + } + + String parentLink() { + return cosmosContainer.getLink(); + } + + String getLink() { + StringBuilder builder = new StringBuilder(); + builder.append(parentLink()); + builder.append("/"); + builder.append(URIPathSegment()); + builder.append("/"); + builder.append(id()); + return builder.toString(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosStoredProcedureProperties.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosStoredProcedureProperties.java new file mode 100644 index 0000000000000..609216373e4fd --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosStoredProcedureProperties.java @@ -0,0 +1,99 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; +import com.azure.data.cosmos.internal.ResourceResponse; +import com.azure.data.cosmos.internal.StoredProcedure; + +import java.util.List; +import java.util.stream.Collectors; + +public class CosmosStoredProcedureProperties extends Resource { + + /** + * Constructor. + * + */ + public CosmosStoredProcedureProperties() { + super(); + } + + /** + * Sets the id + * @param id the name of the resource. + * @return return the Cosmos stored procedure properties with id set + */ + public CosmosStoredProcedureProperties id(String id){ + super.id(id); + return this; + } + + /** + * Constructor. + * + * @param jsonString the json string that represents the stored procedure. + */ + CosmosStoredProcedureProperties(String jsonString) { + super(jsonString); + } + + /** + * Constructor. + * + * @param id the id of the stored procedure + * @param body the body of the stored procedure + */ + public CosmosStoredProcedureProperties(String id, String body) { + super(); + super.id(id); + this.body(body); + } + + CosmosStoredProcedureProperties(ResourceResponse response) { + super(response.getResource().toJson()); + } + + /** + * Get the body of the stored procedure. + * + * @return the body of the stored procedure. + */ + public String body() { + return super.getString(Constants.Properties.BODY); + } + + /** + * Set the body of the stored procedure. + * + * @param body the body of the stored procedure. + */ + public void body(String body) { + super.set(Constants.Properties.BODY, body); + } + + + static List getFromV2Results(List results) { + return results.stream().map(sproc -> new CosmosStoredProcedureProperties(sproc.toJson())).collect(Collectors.toList()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosStoredProcedureRequestOptions.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosStoredProcedureRequestOptions.java new file mode 100644 index 0000000000000..8cd9d08689788 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosStoredProcedureRequestOptions.java @@ -0,0 +1,123 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.RequestOptions; + +/** + * Encapsulates options that can be specified for a request issued to cosmos stored procedure. + */ +public class CosmosStoredProcedureRequestOptions { + private ConsistencyLevel consistencyLevel; + private PartitionKey partitionKey; + private String sessionToken; + private AccessCondition accessCondition; + + /** + * Gets the conditions associated with the request. + * + * @return the access condition. + */ + public AccessCondition accessCondition() { + return accessCondition; + } + + /** + * Sets the conditions associated with the request. + * + * @param accessCondition the access condition. + * @return the current request options + */ + public CosmosStoredProcedureRequestOptions accessCondition(AccessCondition accessCondition) { + this.accessCondition = accessCondition; + return this; + } + /** + * Gets the consistency level required for the request. + * + * @return the consistency level. + */ + public ConsistencyLevel consistencyLevel() { + return consistencyLevel; + } + + /** + * Sets the consistency level required for the request. + * + * @param consistencyLevel the consistency level. + * @return the CosmosStoredProcedureRequestOptions. + */ + public CosmosStoredProcedureRequestOptions consistencyLevel(ConsistencyLevel consistencyLevel) { + this.consistencyLevel = consistencyLevel; + return this; + } + + /** + * Gets the partition key used to identify the current request's target partition. + * + * @return the partition key value. + */ + public PartitionKey partitionKey() { + return partitionKey; + } + + /** + * Sets the partition key used to identify the current request's target partition. + * + * @param partitionKey the partition key value. + * @return the CosmosStoredProcedureRequestOptions. + */ + public CosmosStoredProcedureRequestOptions partitionKey(PartitionKey partitionKey) { + this.partitionKey = partitionKey; + return this; + } + + /** + * Gets the token for use with session consistency. + * + * @return the session token. + */ + public String sessionToken() { + return sessionToken; + } + + /** + * Sets the token for use with session consistency. + * + * @param sessionToken the session token. + * @return the CosmosStoredProcedureRequestOptions. + */ + public CosmosStoredProcedureRequestOptions sessionToken(String sessionToken) { + this.sessionToken = sessionToken; + return this; + } + + RequestOptions toRequestOptions() { + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setAccessCondition(accessCondition); + requestOptions.setConsistencyLevel(consistencyLevel()); + requestOptions.setPartitionKey(partitionKey); + requestOptions.setSessionToken(sessionToken); + return requestOptions; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosStoredProcedureResponse.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosStoredProcedureResponse.java new file mode 100644 index 0000000000000..6e208cac567cd --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosStoredProcedureResponse.java @@ -0,0 +1,132 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.ResourceResponse; +import com.azure.data.cosmos.internal.StoredProcedure; +import com.azure.data.cosmos.internal.StoredProcedureResponse; + +public class CosmosStoredProcedureResponse extends CosmosResponse { + + private CosmosStoredProcedure storedProcedure; + private StoredProcedureResponse storedProcedureResponse; + + CosmosStoredProcedureResponse(ResourceResponse response, CosmosContainer cosmosContainer) { + super(response); + if(response.getResource() != null){ + super.resourceSettings(new CosmosStoredProcedureProperties(response)); + storedProcedure = new CosmosStoredProcedure(resourceSettings().id(), cosmosContainer); + } + } + + CosmosStoredProcedureResponse(StoredProcedureResponse response, CosmosContainer cosmosContainer) { + super(response); + this.storedProcedureResponse = response; + } + + /** + * Gets the stored procedure properties + * @return the stored procedure properties or null + */ + public CosmosStoredProcedureProperties properties() { + return super.resourceSettings(); + } + + /** + * Gets the stored procedure object + * @return the stored procedure object or null + */ + public CosmosStoredProcedure storedProcedure() { + return this.storedProcedure; + } + + /** + * Gets the Activity ID for the request. + * + * @return the activity id. + */ + @Override + public String activityId() { + if(storedProcedureResponse != null){ + return this.storedProcedureResponse.getActivityId(); + } + return super.activityId(); + } + + /** + * Gets the token used for managing client's consistency requirements. + * + * @return the session token. + */ + @Override + public String sessionToken() { + if(storedProcedureResponse != null){ + return this.storedProcedureResponse.getSessionToken(); + } + return super.sessionToken(); + } + + /** + * Gets the HTTP status code associated with the response. + * + * @return the status code. + */ + @Override + public int statusCode() { + if(storedProcedureResponse != null){ + return this.storedProcedureResponse.getStatusCode(); + } + return super.statusCode(); + } + + /** + * Gets the number of index paths (terms) generated by the operation. + * + * @return the request charge. + */ + @Override + public double requestCharge() { + if(storedProcedureResponse != null){ + return storedProcedureResponse.getRequestCharge(); + } + return super.requestCharge(); + } + + /** + * Gets the response of the stored procedure as a string. + * + * @return the response as a string. + */ + public String responseAsString() { + return this.storedProcedureResponse.getResponseAsString(); + } + + /** + * Gets the output from stored procedure console.log() statements. + * + * @return the output string from the stored procedure console.log() statements. + */ + public String scriptLog() { + return this.storedProcedureResponse.getScriptLog(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosTrigger.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosTrigger.java new file mode 100644 index 0000000000000..e0ff10e1b286f --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosTrigger.java @@ -0,0 +1,128 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Paths; +import com.azure.data.cosmos.internal.Trigger; +import reactor.core.publisher.Mono; + +public class CosmosTrigger { + + private CosmosContainer container; + private String id; + + CosmosTrigger(String id, CosmosContainer container) { + this.id = id; + this.container = container; + } + + /** + * Get the id of the {@link CosmosTrigger} + * @return the id of the {@link CosmosTrigger} + */ + public String id() { + return id; + } + + /** + * Set the id of the {@link CosmosTrigger} + * @param id the id of the {@link CosmosTrigger} + * @return the same {@link CosmosTrigger} that had the id set + */ + CosmosTrigger id(String id) { + this.id = id; + return this; + } + + /** + * Reads a cosmos trigger by the trigger link. + *

+ * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response for the read trigger. + * In case of failure the {@link Mono} will error. + * + * @return an {@link Mono} containing the single resource response for the read cosmos trigger or an error. + */ + public Mono read() { + return container.getDatabase() + .getDocClientWrapper() + .readTrigger(getLink(), null) + .map(response -> new CosmosTriggerResponse(response, container)) + .single(); + } + + + /** + * Replaces a cosmos trigger. + *

+ * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response with the replaced trigger. + * In case of failure the {@link Mono} will error. + * + * @param triggerSettings the cosmos trigger properties. + * @return an {@link Mono} containing the single resource response with the replaced cosmos trigger or an error. + */ + public Mono replace(CosmosTriggerProperties triggerSettings) { + return container.getDatabase() + .getDocClientWrapper() + .replaceTrigger(new Trigger(triggerSettings.toJson()), null) + .map(response -> new CosmosTriggerResponse(response, container)) + .single(); + } + + /** + * Deletes a cosmos trigger. + *

+ * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response for the deleted trigger. + * In case of failure the {@link Mono} will error. + * + * @return an {@link Mono} containing the single resource response for the deleted cosmos trigger or an error. + */ + public Mono delete() { + return container.getDatabase() + .getDocClientWrapper() + .deleteTrigger(getLink(), null) + .map(response -> new CosmosResponse(response.getResource())) + .single(); + } + + String URIPathSegment() { + return Paths.TRIGGERS_PATH_SEGMENT; + } + + String parentLink() { + return container.getLink(); + } + + String getLink() { + StringBuilder builder = new StringBuilder(); + builder.append(parentLink()); + builder.append("/"); + builder.append(URIPathSegment()); + builder.append("/"); + builder.append(id()); + return builder.toString(); + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosTriggerProperties.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosTriggerProperties.java new file mode 100644 index 0000000000000..e221e9faa7fe8 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosTriggerProperties.java @@ -0,0 +1,145 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; +import com.azure.data.cosmos.internal.ResourceResponse; +import com.azure.data.cosmos.internal.Trigger; +import org.apache.commons.lang3.StringUtils; + +import java.util.List; +import java.util.stream.Collectors; + +public class CosmosTriggerProperties extends Resource { + + /** + * Constructor + */ + public CosmosTriggerProperties(){ + super(); + } + + /** + * Constructor. + * + * @param jsonString the json string that represents the trigger properties. + */ + CosmosTriggerProperties(String jsonString){ + super(jsonString); + } + + CosmosTriggerProperties(ResourceResponse response) { + super(response.getResource().toJson()); + } + + /** + * Sets the id + * + * @param id the name of the resource. + * @return the current cosmos trigger properties instance + */ + public CosmosTriggerProperties id(String id) { + super.id(id); + return this; + } + + /** + * Get the body of the trigger. + * + * @return the body of the trigger. + */ + public String body() { + return super.getString(Constants.Properties.BODY); + } + + /** + * Set the body of the trigger. + * + * @param body the body of the trigger. + * @return the CosmosTriggerProperties. + */ + public CosmosTriggerProperties body(String body) { + super.set(Constants.Properties.BODY, body); + return this; + } + + /** + * Get the type of the trigger. + * + * @return the trigger type. + */ + public TriggerType triggerType() { + TriggerType result = TriggerType.PRE; + try { + result = TriggerType.valueOf( + StringUtils.upperCase(super.getString(Constants.Properties.TRIGGER_TYPE))); + } catch (IllegalArgumentException e) { + // ignore the exception and return the default + this.getLogger().warn("INVALID triggerType value {}.", super.getString(Constants.Properties.TRIGGER_TYPE)); + } + return result; + } + + /** + * Set the type of the resource. + * + * @param triggerType the trigger type. + * @return the CosmosTriggerProperties. + */ + public CosmosTriggerProperties triggerType(TriggerType triggerType) { + super.set(Constants.Properties.TRIGGER_TYPE, triggerType.toString()); + return this; + } + + /** + * Get the operation type of the trigger. + * + * @return the trigger operation. + */ + public TriggerOperation triggerOperation() { + TriggerOperation result = TriggerOperation.CREATE; + try { + result = TriggerOperation.valueOf( + StringUtils.upperCase(super.getString(Constants.Properties.TRIGGER_OPERATION))); + } catch (IllegalArgumentException e) { + // ignore the exception and return the default + this.getLogger().warn("INVALID triggerOperation value {}.", super.getString(Constants.Properties.TRIGGER_OPERATION)); + } + return result; + } + + /** + * Set the operation type of the trigger. + * + * @param triggerOperation the trigger operation. + * @return the CosmosTriggerProperties. + */ + public CosmosTriggerProperties triggerOperation(TriggerOperation triggerOperation) { + super.set(Constants.Properties.TRIGGER_OPERATION, triggerOperation.toString()); + return this; + } + + static List getFromV2Results(List results) { + return results.stream().map(trigger -> new CosmosTriggerProperties(trigger.toJson())).collect(Collectors.toList()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosTriggerResponse.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosTriggerResponse.java new file mode 100644 index 0000000000000..4cf3d4b12782f --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosTriggerResponse.java @@ -0,0 +1,59 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.ResourceResponse; +import com.azure.data.cosmos.internal.Trigger; + +public class CosmosTriggerResponse extends CosmosResponse { + + private CosmosTriggerProperties cosmosTriggerProperties; + private CosmosTrigger cosmosTrigger; + + CosmosTriggerResponse(ResourceResponse response, CosmosContainer container) { + super(response); + if(response.getResource() != null) { + super.resourceSettings(new CosmosTriggerProperties(response)); + cosmosTriggerProperties = new CosmosTriggerProperties(response); + cosmosTrigger = new CosmosTrigger(cosmosTriggerProperties.id(), container); + } + } + + /** + * Gets the cosmos trigger properties or null + * + * @return {@link CosmosTriggerProperties} + */ + public CosmosTriggerProperties properties() { + return cosmosTriggerProperties; + } + + /** + * Gets the cosmos trigger object or null + * + * @return {@link CosmosTrigger} + */ + public CosmosTrigger trigger() { + return cosmosTrigger; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosUser.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosUser.java new file mode 100644 index 0000000000000..08c61dae6150a --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosUser.java @@ -0,0 +1,197 @@ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Paths; +import com.azure.data.cosmos.internal.Permission; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +public class CosmosUser { + CosmosDatabase database; + private String id; + + CosmosUser(String id, CosmosDatabase database) { + this.id = id; + this.database = database; + } + + /** + * Get the id of the {@link CosmosUser} + * @return the id of the {@link CosmosUser} + */ + public String id() { + return id; + } + + /** + * Set the id of the {@link CosmosUser} + * @param id the id of the {@link CosmosUser} + * @return the same {@link CosmosUser} that had the id set + */ + CosmosUser id(String id) { + this.id = id; + return this; + } + + /** + * Reads a cosmos user + * @return a {@link Mono} containing the single resource response with the read user or an error. + */ + public Mono read() { + return this.database.getDocClientWrapper() + .readUser(getLink(), null) + .map(response -> new CosmosUserResponse(response, database)).single(); + } + + /** + * REPLACE a cosmos user + * + * @param userSettings the user properties to use + * @return a {@link Mono} containing the single resource response with the replaced user or an error. + */ + public Mono replace(CosmosUserProperties userSettings) { + return this.database.getDocClientWrapper() + .replaceUser(userSettings.getV2User(), null) + .map(response -> new CosmosUserResponse(response, database)).single(); + } + + /** + * Delete a cosmos user + * + * @return a {@link Mono} containing the single resource response with the deleted user or an error. + */ + public Mono delete() { + return this.database.getDocClientWrapper() + .deleteUser(getLink(), null) + .map(response -> new CosmosUserResponse(response, database)).single(); + } + + /** + * Creates a permission. + *

+ * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response with the created permission. + * In case of failure the {@link Mono} will error. + * + * @param permissionSettings the permission properties to create. + * @param options the request options. + * @return an {@link Mono} containing the single resource response with the created permission or an error. + */ + public Mono createPermission(CosmosPermissionProperties permissionSettings, CosmosPermissionRequestOptions options) { + if(options == null){ + options = new CosmosPermissionRequestOptions(); + } + Permission permission = permissionSettings.getV2Permissions(); + return database.getDocClientWrapper() + .createPermission(getLink(), permission, options.toRequestOptions()) + .map(response -> new CosmosPermissionResponse(response, this)) + .single(); + } + + /** + * Upserts a permission. + *

+ * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response with the upserted permission. + * In case of failure the {@link Mono} will error. + * + * @param permissionSettings the permission properties to upsert. + * @param options the request options. + * @return an {@link Mono} containing the single resource response with the upserted permission or an error. + */ + public Mono upsertPermission(CosmosPermissionProperties permissionSettings, CosmosPermissionRequestOptions options) { + Permission permission = permissionSettings.getV2Permissions(); + if(options == null){ + options = new CosmosPermissionRequestOptions(); + } + return database.getDocClientWrapper() + .upsertPermission(getLink(), permission, options.toRequestOptions()) + .map(response -> new CosmosPermissionResponse(response, this)) + .single(); + } + + + /** + * Reads all permissions. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the read permissions. + * In case of failure the {@link Flux} will error. + * + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the read permissions or an error. + */ + public Flux> readAllPermissions(FeedOptions options) { + return getDatabase().getDocClientWrapper() + .readPermissions(getLink(), options) + .map(response-> BridgeInternal.createFeedResponse(CosmosPermissionProperties.getFromV2Results(response.results()), + response.responseHeaders())); + } + + /** + * Query for permissions. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained permissions. + * In case of failure the {@link Flux} will error. + * + * @param query the query. + * @return an {@link Flux} containing one or several feed response pages of the obtained permissions or an error. + */ + public Flux> queryPermissions(String query) { + return queryPermissions(query); + } + + /** + * Query for permissions. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained permissions. + * In case of failure the {@link Flux} will error. + * + * @param query the query. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained permissions or an error. + */ + public Flux> queryPermissions(String query, FeedOptions options) { + return getDatabase().getDocClientWrapper() + .queryPermissions(getLink(), query, options) + .map(response-> BridgeInternal.createFeedResponse(CosmosPermissionProperties.getFromV2Results(response.results()), + response.responseHeaders())); + } + + /** + * Get cosmos permission without making a call to backend + * @param id the id + * @return the cosmos permission + */ + public CosmosPermission getPermission(String id){ + return new CosmosPermission(id, this); + } + + String URIPathSegment() { + return Paths.USERS_PATH_SEGMENT; + } + + String parentLink() { + return database.getLink() ; + } + + String getLink() { + StringBuilder builder = new StringBuilder(); + builder.append(parentLink()); + builder.append("/"); + builder.append(URIPathSegment()); + builder.append("/"); + builder.append(id()); + return builder.toString(); + } + + /** + * Gets the parent Database + * + * @return the (@link CosmosDatabase) + */ + public CosmosDatabase getDatabase() { + return database; + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosUserDefinedFunction.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosUserDefinedFunction.java new file mode 100644 index 0000000000000..3229de163ca60 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosUserDefinedFunction.java @@ -0,0 +1,128 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Paths; +import com.azure.data.cosmos.internal.UserDefinedFunction; +import reactor.core.publisher.Mono; + +public class CosmosUserDefinedFunction { + + private CosmosContainer container; + private String id; + + CosmosUserDefinedFunction(String id, CosmosContainer container) { + this.id = id; + this.container = container; + } + + /** + * Get the id of the {@link CosmosUserDefinedFunction} + * @return the id of the {@link CosmosUserDefinedFunction} + */ + public String id() { + return id; + } + + /** + * Set the id of the {@link CosmosUserDefinedFunction} + * @param id the id of the {@link CosmosUserDefinedFunction} + * @return the same {@link CosmosUserDefinedFunction} that had the id set + */ + CosmosUserDefinedFunction id(String id) { + this.id = id; + return this; + } + + /** + * Read a user defined function. + *

+ * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response for the read user defined + * function. + * In case of failure the {@link Mono} will error. + * + * @return an {@link Mono} containing the single resource response for the read user defined function or an error. + */ + public Mono read() { + return container.getDatabase().getDocClientWrapper().readUserDefinedFunction(getLink(), null) + .map(response -> new CosmosUserDefinedFunctionResponse(response, container)).single(); + } + + /** + * Replaces a cosmos user defined function. + *

+ * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response with the replaced user + * defined function. + * In case of failure the {@link Mono} will error. + * + * @param udfSettings the cosmos user defined function properties. + * @return an {@link Mono} containing the single resource response with the replaced cosmos user defined function + * or an error. + */ + public Mono replace(CosmosUserDefinedFunctionProperties udfSettings) { + return container.getDatabase() + .getDocClientWrapper() + .replaceUserDefinedFunction(new UserDefinedFunction(udfSettings.toJson()) + , null) + .map(response -> new CosmosUserDefinedFunctionResponse(response, container)) + .single(); + } + + /** + * Deletes a cosmos user defined function. + *

+ * After subscription the operation will be performed. + * The {@link Mono} upon successful completion will contain a single resource response for the deleted user defined function. + * In case of failure the {@link Mono} will error. + * + * @return an {@link Mono} containing the single resource response for the deleted cosmos user defined function or + * an error. + */ + public Mono delete() { + return container.getDatabase() + .getDocClientWrapper() + .deleteUserDefinedFunction(this.getLink(), null) + .map(response -> new CosmosResponse(response.getResource())) + .single(); + } + + String URIPathSegment() { + return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; + } + + String parentLink() { + return container.getLink(); + } + + String getLink() { + StringBuilder builder = new StringBuilder(); + builder.append(parentLink()); + builder.append("/"); + builder.append(URIPathSegment()); + builder.append("/"); + builder.append(id()); + return builder.toString(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosUserDefinedFunctionProperties.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosUserDefinedFunctionProperties.java new file mode 100644 index 0000000000000..9296a0b59f007 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosUserDefinedFunctionProperties.java @@ -0,0 +1,87 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; +import com.azure.data.cosmos.internal.ResourceResponse; +import com.azure.data.cosmos.internal.UserDefinedFunction; + +import java.util.List; +import java.util.stream.Collectors; + +public class CosmosUserDefinedFunctionProperties extends Resource { + + /** + * Constructor + */ + public CosmosUserDefinedFunctionProperties(){ + super(); + } + + CosmosUserDefinedFunctionProperties(ResourceResponse response) { + super(response.getResource().toJson()); + } + + /** + * Constructor. + * + * @param jsonString the json string that represents the cosmos user defined function properties. + */ + CosmosUserDefinedFunctionProperties(String jsonString) { + super(jsonString); + } + + /** + * Sets the id + * @param id the name of the resource. + * @return the current instance of cosmos user defined function properties + */ + public CosmosUserDefinedFunctionProperties id(String id) { + super.id(id); + return this; + } + + /** + * Get the body of the user defined function. + * + * @return the body. + */ + public String body() { + return super.getString(Constants.Properties.BODY); + } + + /** + * Set the body of the user defined function. + * + * @param body the body. + * @return the CosmosUserDefinedFunctionProperties. + */ + public CosmosUserDefinedFunctionProperties body(String body) { + super.set(Constants.Properties.BODY, body); + return this; + } + + static List getFromV2Results(List results) { + return results.stream().map(udf -> new CosmosUserDefinedFunctionProperties(udf.toJson())).collect(Collectors.toList()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosUserDefinedFunctionResponse.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosUserDefinedFunctionResponse.java new file mode 100644 index 0000000000000..0f3fb1d545c9e --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosUserDefinedFunctionResponse.java @@ -0,0 +1,57 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.ResourceResponse; +import com.azure.data.cosmos.internal.UserDefinedFunction; + +public class CosmosUserDefinedFunctionResponse extends CosmosResponse { + + private CosmosUserDefinedFunctionProperties cosmosUserDefinedFunctionProperties; + private CosmosUserDefinedFunction cosmosUserDefinedFunction; + + CosmosUserDefinedFunctionResponse(ResourceResponse response, CosmosContainer container) { + super(response); + if(response.getResource() != null) { + super.resourceSettings(new CosmosUserDefinedFunctionProperties(response)); + cosmosUserDefinedFunctionProperties = new CosmosUserDefinedFunctionProperties(response); + cosmosUserDefinedFunction = new CosmosUserDefinedFunction(cosmosUserDefinedFunctionProperties.id(), container); + } + } + + /** + * Gets the cosmos user defined function properties + * @return the cosmos user defined function properties + */ + public CosmosUserDefinedFunctionProperties properties() { + return cosmosUserDefinedFunctionProperties; + } + + /** + * Gets the cosmos user defined function object + * @return the cosmos user defined function object + */ + public CosmosUserDefinedFunction userDefinedFunction() { + return cosmosUserDefinedFunction; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosUserProperties.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosUserProperties.java new file mode 100644 index 0000000000000..37be2e3a866c7 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosUserProperties.java @@ -0,0 +1,74 @@ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; +import com.azure.data.cosmos.internal.ResourceResponse; +import com.azure.data.cosmos.internal.User; + +import java.util.List; +import java.util.stream.Collectors; + +public class CosmosUserProperties extends Resource { + /** + * Initialize a user object. + */ + public CosmosUserProperties() { + super(); + } + + /** + * Gets the id + * @return the id of the user + */ + public String id() { + return super.id(); + } + + /** + * Sets the id + * @param id the name of the resource. + * @return the current instance of cosmos user properties + */ + public CosmosUserProperties id(String id) { + return (CosmosUserProperties) super.id(id); + } + + /** + * Initialize a user object from json string. + * + * @param jsonString the json string that represents the database user. + */ + CosmosUserProperties(String jsonString) { + super(jsonString); + } + + CosmosUserProperties(ResourceResponse response) { + super(response.getResource().toJson()); + } + + // Converting document collection to CosmosContainerProperties + CosmosUserProperties(User user){ + super(user.toJson()); + } + + /** + * Gets the self-link of the permissions associated with the user. + * + * @return the permissions link. + */ + String getPermissionsLink() { + String selfLink = this.selfLink(); + if (selfLink.endsWith("/")) { + return selfLink + super.getString(Constants.Properties.PERMISSIONS_LINK); + } else { + return selfLink + "/" + super.getString(Constants.Properties.PERMISSIONS_LINK); + } + } + + public User getV2User() { + return new User(this.toJson()); + } + + static List getFromV2Results(List results) { + return results.stream().map(CosmosUserProperties::new).collect(Collectors.toList()); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosUserResponse.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosUserResponse.java new file mode 100644 index 0000000000000..d566de6dd5b0e --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/CosmosUserResponse.java @@ -0,0 +1,36 @@ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.ResourceResponse; +import com.azure.data.cosmos.internal.User; + +public class CosmosUserResponse extends CosmosResponse { + private CosmosUser user; + + CosmosUserResponse(ResourceResponse response, CosmosDatabase database) { + super(response); + if(response.getResource() == null){ + super.resourceSettings(null); + }else{ + super.resourceSettings(new CosmosUserProperties(response)); + this.user = new CosmosUser(resourceSettings().id(), database); + } + } + + /** + * Get cosmos user + * + * @return {@link CosmosUser} + */ + public CosmosUser user() { + return user; + } + + /** + * Gets the cosmos user properties + * + * @return {@link CosmosUserProperties} + */ + public CosmosUserProperties properties(){ + return resourceSettings(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/DataType.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/DataType.java new file mode 100644 index 0000000000000..d2aed00eb006a --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/DataType.java @@ -0,0 +1,67 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.text.WordUtils; + +/** + * Data types in the Azure Cosmos DB database service. + */ +public enum DataType { + /** + * Represents a numeric data type. + */ + NUMBER, + + /** + * Represents a string data type. + */ + STRING, + + /** + * Represent a point data type. + */ + POINT, + + /** + * Represents a line string data type. + */ + LINE_STRING, + + /** + * Represent a polygon data type. + */ + POLYGON, + + /** + * Represent a multi-polygon data type. + */ + MULTI_POLYGON; + + @Override + public String toString() { + return StringUtils.remove(WordUtils.capitalizeFully(this.name(), '_'), '_'); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ExcludedPath.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ExcludedPath.java new file mode 100644 index 0000000000000..34ddd2bd9265c --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ExcludedPath.java @@ -0,0 +1,68 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; + +/** + * Represents an excluded path of the IndexingPolicy in the Azure Cosmos DB database service. + */ +public class ExcludedPath extends JsonSerializable { + + /** + * Constructor. + */ + public ExcludedPath() { + super(); + } + + /** + * Constructor. + * + * @param jsonString the json string that represents the excluded path. + */ + ExcludedPath(String jsonString) { + super(jsonString); + } + + /** + * Gets path. + * + * @return the path. + */ + public String path() { + return super.getString(Constants.Properties.PATH); + } + + /** + * Sets path. + * + * @param path the path. + * @return the Exculded path. + */ + public ExcludedPath path(String path) { + super.set(Constants.Properties.PATH, path); + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/FeedOptions.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/FeedOptions.java new file mode 100644 index 0000000000000..56a45b4330d8a --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/FeedOptions.java @@ -0,0 +1,361 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import java.util.Map; + +/** + * Specifies the options associated with feed methods (enumeration operations) + * in the Azure Cosmos DB database service. + */ +public final class FeedOptions { + private String sessionToken; + private String partitionKeyRangeId; + private Boolean enableScanInQuery; + private Boolean emitVerboseTracesInQuery; + private Boolean enableCrossPartitionQuery; + private int maxDegreeOfParallelism; + private int maxBufferedItemCount; + private int responseContinuationTokenLimitInKb; + private Integer maxItemCount; + private String requestContinuation; + private PartitionKey partitionkey; + private boolean populateQueryMetrics; + private Map properties; + + public FeedOptions() { + } + + public FeedOptions(FeedOptions options) { + this.sessionToken = options.sessionToken; + this.partitionKeyRangeId = options.partitionKeyRangeId; + this.enableScanInQuery = options.enableScanInQuery; + this.emitVerboseTracesInQuery = options.emitVerboseTracesInQuery; + this.enableCrossPartitionQuery = options.enableCrossPartitionQuery; + this.maxDegreeOfParallelism = options.maxDegreeOfParallelism; + this.maxBufferedItemCount = options.maxBufferedItemCount; + this.responseContinuationTokenLimitInKb = options.responseContinuationTokenLimitInKb; + this.maxItemCount = options.maxItemCount; + this.requestContinuation = options.requestContinuation; + this.partitionkey = options.partitionkey; + this.populateQueryMetrics = options.populateQueryMetrics; + } + + /** + * Gets the partitionKeyRangeId. + * + * @return the partitionKeyRangeId. + */ + String partitionKeyRangeIdInternal() { + return this.partitionKeyRangeId; + } + + /** + * Sets the partitionKeyRangeId. + * + * @param partitionKeyRangeId the partitionKeyRangeId. + * @return the FeedOptions. + */ + FeedOptions partitionKeyRangeIdInternal(String partitionKeyRangeId) { + this.partitionKeyRangeId = partitionKeyRangeId; + return this; + } + + /** + * Gets the session token for use with session consistency. + * + * @return the session token. + */ + public String sessionToken() { + return this.sessionToken; + } + + /** + * Sets the session token for use with session consistency. + * + * @param sessionToken the session token. + * @return the FeedOptions. + */ + public FeedOptions sessionToken(String sessionToken) { + this.sessionToken = sessionToken; + return this; + } + + /** + * Gets the option to allow scan on the queries which couldn't be served as + * indexing was opted out on the requested paths. + * + * @return the option of enable scan in query. + */ + public Boolean enableScanInQuery() { + return this.enableScanInQuery; + } + + /** + * Sets the option to allow scan on the queries which couldn't be served as + * indexing was opted out on the requested paths. + * + * @param enableScanInQuery the option of enable scan in query. + * @return the FeedOptions. + */ + public FeedOptions enableScanInQuery(Boolean enableScanInQuery) { + this.enableScanInQuery = enableScanInQuery; + return this; + } + + /** + * Gets the option to allow queries to emit out verbose traces for + * investigation. + * + * @return the emit verbose traces in query. + */ + public Boolean emitVerboseTracesInQuery() { + return this.emitVerboseTracesInQuery; + } + + /** + * Sets the option to allow queries to emit out verbose traces for + * investigation. + * + * @param emitVerboseTracesInQuery the emit verbose traces in query. + * @return the FeedOptions. + */ + public FeedOptions emitVerboseTracesInQuery(Boolean emitVerboseTracesInQuery) { + this.emitVerboseTracesInQuery = emitVerboseTracesInQuery; + return this; + } + + /** + * Gets the option to allow queries to run across all partitions of the + * collection. + * + * @return whether to allow queries to run across all partitions of the + * collection. + */ + public Boolean enableCrossPartitionQuery() { + return this.enableCrossPartitionQuery; + } + + /** + * Sets the option to allow queries to run across all partitions of the + * collection. + * + * @param enableCrossPartitionQuery whether to allow queries to run across all + * partitions of the collection. + * @return the FeedOptions. + */ + public FeedOptions enableCrossPartitionQuery(Boolean enableCrossPartitionQuery) { + this.enableCrossPartitionQuery = enableCrossPartitionQuery; + return this; + } + + /** + * Gets the number of concurrent operations run client side during parallel + * query execution. + * + * @return number of concurrent operations run client side during parallel query + * execution. + */ + public int maxDegreeOfParallelism() { + return maxDegreeOfParallelism; + } + + /** + * Sets the number of concurrent operations run client side during parallel + * query execution. + * + * @param maxDegreeOfParallelism number of concurrent operations. + * @return the FeedOptions. + */ + public FeedOptions maxDegreeOfParallelism(int maxDegreeOfParallelism) { + this.maxDegreeOfParallelism = maxDegreeOfParallelism; + return this; + } + + /** + * Gets the maximum number of items that can be buffered client side during + * parallel query execution. + * + * @return maximum number of items that can be buffered client side during + * parallel query execution. + */ + public int maxBufferedItemCount() { + return maxBufferedItemCount; + } + + /** + * Sets the maximum number of items that can be buffered client side during + * parallel query execution. + * + * @param maxBufferedItemCount maximum number of items. + * @return the FeedOptions. + */ + public FeedOptions maxBufferedItemCount(int maxBufferedItemCount) { + this.maxBufferedItemCount = maxBufferedItemCount; + return this; + } + + /** + * Sets the ResponseContinuationTokenLimitInKb request option for document query + * requests in the Azure Cosmos DB service. + * + * ResponseContinuationTokenLimitInKb is used to limit the length of + * continuation token in the query response. Valid values are >= 1. + * + * The continuation token contains both required and optional fields. The + * required fields are necessary for resuming the execution from where it was + * stooped. The optional fields may contain serialized index lookup work that + * was done but not yet utilized. This avoids redoing the work again in + * subsequent continuations and hence improve the query performance. Setting the + * maximum continuation size to 1KB, the Azure Cosmos DB service will only + * serialize required fields. Starting from 2KB, the Azure Cosmos DB service + * would serialize as much as it could fit till it reaches the maximum specified + * size. + * + * @param limitInKb continuation token size limit. + * @return the FeedOptions. + */ + public FeedOptions responseContinuationTokenLimitInKb(int limitInKb) { + this.responseContinuationTokenLimitInKb = limitInKb; + return this; + } + + /** + * Gets the ResponseContinuationTokenLimitInKb request option for document query + * requests in the Azure Cosmos DB service. If not already set returns 0. + * + * ResponseContinuationTokenLimitInKb is used to limit the length of + * continuation token in the query response. Valid values are >= 1. + * + * @return return set ResponseContinuationTokenLimitInKb, or 0 if not set + */ + public int responseContinuationTokenLimitInKb() { + return responseContinuationTokenLimitInKb; + } + + + /** + * Gets the maximum number of items to be returned in the enumeration + * operation. + * + * @return the max number of items. + */ + public Integer maxItemCount() { + return this.maxItemCount; + } + + /** + * Sets the maximum number of items to be returned in the enumeration + * operation. + * + * @param maxItemCount the max number of items. + * @return the FeedOptionsBase. + */ + public FeedOptions maxItemCount(Integer maxItemCount) { + this.maxItemCount = maxItemCount; + return this; + } + + /** + * Gets the request continuation token. + * + * @return the request continuation. + */ + public String requestContinuation() { + return this.requestContinuation; + } + + /** + * Sets the request continuation token. + * + * @param requestContinuation + * the request continuation. + * @return the FeedOptionsBase. + */ + public FeedOptions requestContinuation(String requestContinuation) { + this.requestContinuation = requestContinuation; + return this; + } + + /** + * Gets the partition key used to identify the current request's target + * partition. + * + * @return the partition key. + */ + public PartitionKey partitionKey() { + return this.partitionkey; + } + + /** + * Sets the partition key used to identify the current request's target + * partition. + * + * @param partitionkey + * the partition key value. + * @return the FeedOptionsBase. + */ + public FeedOptions partitionKey(PartitionKey partitionkey) { + this.partitionkey = partitionkey; + return this; + } + + /** + * Gets the option to enable populate query metrics + * @return whether to enable populate query metrics + */ + public boolean populateQueryMetrics() { + return populateQueryMetrics; + } + + /** + * Sets the option to enable/disable getting metrics relating to query execution on document query requests + * @param populateQueryMetrics whether to enable or disable query metrics + * @return the FeedOptionsBase. + */ + public FeedOptions populateQueryMetrics(boolean populateQueryMetrics) { + this.populateQueryMetrics = populateQueryMetrics; + return this; + } + + /** + * Gets the properties + * + * @return Map of request options properties + */ + public Map properties() { + return properties; + } + + /** + * Sets the properties used to identify the request token. + * + * @param properties the properties. + * @return the FeedOptionsBase. + */ + public FeedOptions properties(Map properties) { + this.properties = properties; + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/FeedResponse.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/FeedResponse.java new file mode 100644 index 0000000000000..ce9d880e2db1c --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/FeedResponse.java @@ -0,0 +1,407 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.QueryMetrics; +import com.azure.data.cosmos.internal.QueryMetricsConstants; +import org.apache.commons.lang3.StringUtils; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +public class FeedResponse { + + private final List results; + private final Map header; + private final HashMap usageHeaders; + private final HashMap quotaHeaders; + private final boolean useEtagAsContinuation; + boolean nochanges; + private final ConcurrentMap queryMetricsMap; + private final String DefaultPartition = "0"; + private final FeedResponseDiagnostics feedResponseDiagnostics; + + FeedResponse(List results, Map headers) { + this(results, headers, false, false, new ConcurrentHashMap<>()); + } + + FeedResponse(List results, Map headers, ConcurrentMap queryMetricsMap) { + this(results, headers, false, false, queryMetricsMap); + } + + FeedResponse(List results, Map header, boolean nochanges) { + this(results, header, true, nochanges, new ConcurrentHashMap<>()); + } + + // TODO: need to more sure the query metrics can round trip just from the headers. + // We can then remove it as a parameter. + private FeedResponse( + List results, + Map header, + boolean useEtagAsContinuation, + boolean nochanges, + ConcurrentMap queryMetricsMap) { + this.results = results; + this.header = header; + this.usageHeaders = new HashMap<>(); + this.quotaHeaders = new HashMap<>(); + this.useEtagAsContinuation = useEtagAsContinuation; + this.nochanges = nochanges; + this.queryMetricsMap = new ConcurrentHashMap<>(queryMetricsMap); + this.feedResponseDiagnostics = new FeedResponseDiagnostics(queryMetricsMap); + } + + /** + * Results. + * + * @return the list of results. + */ + public List results() { + return results; + } + + /** + * Gets the maximum quota for database resources within the account from the Azure Cosmos DB service. + * + * @return The maximum quota for the account. + */ + public long databaseQuota() { + return this.maxQuotaHeader(Constants.Quota.DATABASE); + } + + /** + * Gets the current number of database resources within the account from the Azure Cosmos DB service. + * + * @return The current number of databases. + */ + public long databaseUsage() { + return this.currentQuotaHeader(Constants.Quota.DATABASE); + } + + /** + * Gets the maximum quota for collection resources within an account from the Azure Cosmos DB service. + * + * @return The maximum quota for the account. + */ + public long collectionQuota() { + return this.maxQuotaHeader(Constants.Quota.COLLECTION); + } + + /** + * Gets the current number of collection resources within the account from the Azure Cosmos DB service. + * + * @return The current number of collections. + */ + public long collectionUsage() { + return this.currentQuotaHeader(Constants.Quota.COLLECTION); + } + + /** + * Gets the maximum quota for user resources within an account from the Azure Cosmos DB service. + * + * @return The maximum quota for the account. + */ + public long userQuota() { + return this.maxQuotaHeader(Constants.Quota.USER); + } + + /** + * Gets the current number of user resources within the account from the Azure Cosmos DB service. + * + * @return The current number of users. + */ + public long userUsage() { + return this.currentQuotaHeader(Constants.Quota.USER); + } + + /** + * Gets the maximum quota for permission resources within an account from the Azure Cosmos DB service. + * + * @return The maximum quota for the account. + */ + public long permissionQuota() { + return this.maxQuotaHeader(Constants.Quota.PERMISSION); + } + + /** + * Gets the current number of permission resources within the account from the Azure Cosmos DB service. + * + * @return The current number of permissions. + */ + public long permissionUsage() { + return this.currentQuotaHeader(Constants.Quota.PERMISSION); + } + + /** + * Gets the maximum size of a collection in kilobytes from the Azure Cosmos DB service. + * + * @return The maximum quota in kilobytes. + */ + public long collectionSizeQuota() { + return this.maxQuotaHeader(Constants.Quota.COLLECTION_SIZE); + } + + /** + * Gets the current size of a collection in kilobytes from the Azure Cosmos DB service. + * + * @return The current size of a collection in kilobytes. + */ + public long collectionSizeUsage() { + return this.currentQuotaHeader(Constants.Quota.COLLECTION_SIZE); + } + + /** + * Gets the maximum quota of stored procedures for a collection from the Azure Cosmos DB service. + * + * @return The maximum stored procedure quota. + */ + public long storedProceduresQuota() { + return this.maxQuotaHeader(Constants.Quota.STORED_PROCEDURE); + } + + /** + * Gets the current number of stored procedures for a collection from the Azure Cosmos DB service. + * + * @return The current number of stored procedures. + */ + public long storedProceduresUsage() { + return this.currentQuotaHeader(Constants.Quota.STORED_PROCEDURE); + } + + /** + * Gets the maximum quota of triggers for a collection from the Azure Cosmos DB service. + * + * @return The maximum triggers quota. + */ + public long triggersQuota() { + return this.maxQuotaHeader(Constants.Quota.TRIGGER); + } + + /** + * Get the current number of triggers for a collection from the Azure Cosmos DB service. + * + * @return The current number of triggers. + */ + public long triggersUsage() { + return this.currentQuotaHeader(Constants.Quota.TRIGGER); + } + + /** + * Gets the maximum quota of user defined functions for a collection from the Azure Cosmos DB service. + * + * @return The maximum user defined functions quota. + */ + public long userDefinedFunctionsQuota() { + return this.maxQuotaHeader(Constants.Quota.USER_DEFINED_FUNCTION); + } + + /** + * Gets the current number of user defined functions for a collection from the Azure Cosmos DB service. + * + * @return the current number of user defined functions. + */ + public long userDefinedFunctionsUsage() { + return this.currentQuotaHeader(Constants.Quota.USER_DEFINED_FUNCTION); + } + + /** + * Gets the maximum size limit for this entity from the Azure Cosmos DB service. + * + * @return the maximum size limit for this entity. + * Measured in kilobytes for document resources and in counts for other resources. + */ + public String maxResourceQuota() { + return getValueOrNull(header, + HttpConstants.HttpHeaders.MAX_RESOURCE_QUOTA); + } + + /** + * Gets the current size of this entity from the Azure Cosmos DB service. + * + * @return the current size for this entity. Measured in kilobytes for document resources + * and in counts for other resources. + */ + public String currentResourceQuotaUsage() { + return getValueOrNull(header, + HttpConstants.HttpHeaders.CURRENT_RESOURCE_QUOTA_USAGE); + } + + /** + * Gets the number of index paths (terms) generated by the operation. + * + * @return the request charge. + */ + public double requestCharge() { + String value = getValueOrNull(header, + HttpConstants.HttpHeaders.REQUEST_CHARGE); + if (StringUtils.isEmpty(value)) { + return 0; + } + return Double.valueOf(value); + } + + /** + * Gets the activity ID for the request. + * + * @return the activity id. + */ + public String activityId() { + return getValueOrNull(header, HttpConstants.HttpHeaders.ACTIVITY_ID); + } + + /** + * Gets the continuation token to be used for continuing the enumeration. + * + * @return the response continuation. + */ + public String continuationToken() { + String headerName = useEtagAsContinuation + ? HttpConstants.HttpHeaders.E_TAG + : HttpConstants.HttpHeaders.CONTINUATION; + return getValueOrNull(header, headerName); + } + + /** + * Gets the session token for use in session consistency. + * + * @return the session token. + */ + public String sessionToken() { + return getValueOrNull(header, HttpConstants.HttpHeaders.SESSION_TOKEN); + } + + /** + * Gets the response headers. + * + * @return the response headers. + */ + public Map responseHeaders() { + return header; + } + + private String queryMetricsString(){ + return getValueOrNull(responseHeaders(), + HttpConstants.HttpHeaders.QUERY_METRICS); + } + + /** + * Gets the feed response diagnostics + * @return Feed response diagnostics + */ + public FeedResponseDiagnostics feedResponseDiagnostics() { + return this.feedResponseDiagnostics; + } + + ConcurrentMap queryMetrics() { + if (queryMetricsMap != null && !queryMetricsMap.isEmpty()) { + return queryMetricsMap; + } + + //We parse query metrics for un-partitioned collection here + if (!StringUtils.isEmpty(queryMetricsString())) { + String qm = queryMetricsString(); + qm += String.format(";%s=%.2f", QueryMetricsConstants.RequestCharge, requestCharge()); + queryMetricsMap.put(DefaultPartition, QueryMetrics.createFromDelimitedString(qm)); + } + return queryMetricsMap; + } + + ConcurrentMap queryMetricsMap(){ + return queryMetricsMap; + } + + private long currentQuotaHeader(String headerName) { + if (this.usageHeaders.size() == 0 && !StringUtils.isEmpty(this.maxResourceQuota()) && + !StringUtils.isEmpty(this.currentResourceQuotaUsage())) { + this.populateQuotaHeader(this.maxResourceQuota(), this.currentResourceQuotaUsage()); + } + + if (this.usageHeaders.containsKey(headerName)) { + return this.usageHeaders.get(headerName); + } + + return 0; + } + + private long maxQuotaHeader(String headerName) { + if (this.quotaHeaders.size() == 0 && + !StringUtils.isEmpty(this.maxResourceQuota()) && + !StringUtils.isEmpty(this.currentResourceQuotaUsage())) { + this.populateQuotaHeader(this.maxResourceQuota(), this.currentResourceQuotaUsage()); + } + + if (this.quotaHeaders.containsKey(headerName)) { + return this.quotaHeaders.get(headerName); + } + + return 0; + } + + private void populateQuotaHeader(String headerMaxQuota, + String headerCurrentUsage) { + String[] headerMaxQuotaWords = headerMaxQuota.split(Constants.Quota.DELIMITER_CHARS, -1); + String[] headerCurrentUsageWords = headerCurrentUsage.split(Constants.Quota.DELIMITER_CHARS, -1); + + for (int i = 0; i < headerMaxQuotaWords.length; ++i) { + if (headerMaxQuotaWords[i].equalsIgnoreCase(Constants.Quota.DATABASE)) { + this.quotaHeaders.put(Constants.Quota.DATABASE, Long.valueOf(headerMaxQuotaWords[i + 1])); + this.usageHeaders.put(Constants.Quota.DATABASE, Long.valueOf(headerCurrentUsageWords[i + 1])); + } else if (headerMaxQuotaWords[i].equalsIgnoreCase(Constants.Quota.COLLECTION)) { + this.quotaHeaders.put(Constants.Quota.COLLECTION, Long.valueOf(headerMaxQuotaWords[i + 1])); + this.usageHeaders.put(Constants.Quota.COLLECTION, Long.valueOf(headerCurrentUsageWords[i + 1])); + } else if (headerMaxQuotaWords[i].equalsIgnoreCase(Constants.Quota.USER)) { + this.quotaHeaders.put(Constants.Quota.USER, Long.valueOf(headerMaxQuotaWords[i + 1])); + this.usageHeaders.put(Constants.Quota.USER, Long.valueOf(headerCurrentUsageWords[i + 1])); + } else if (headerMaxQuotaWords[i].equalsIgnoreCase(Constants.Quota.PERMISSION)) { + this.quotaHeaders.put(Constants.Quota.PERMISSION, Long.valueOf(headerMaxQuotaWords[i + 1])); + this.usageHeaders.put(Constants.Quota.PERMISSION, Long.valueOf(headerCurrentUsageWords[i + 1])); + } else if (headerMaxQuotaWords[i].equalsIgnoreCase(Constants.Quota.COLLECTION_SIZE)) { + this.quotaHeaders.put(Constants.Quota.COLLECTION_SIZE, Long.valueOf(headerMaxQuotaWords[i + 1])); + this.usageHeaders.put(Constants.Quota.COLLECTION_SIZE, Long.valueOf(headerCurrentUsageWords[i + 1])); + } else if (headerMaxQuotaWords[i].equalsIgnoreCase(Constants.Quota.STORED_PROCEDURE)) { + this.quotaHeaders.put(Constants.Quota.STORED_PROCEDURE, Long.valueOf(headerMaxQuotaWords[i + 1])); + this.usageHeaders.put(Constants.Quota.STORED_PROCEDURE, Long.valueOf(headerCurrentUsageWords[i + 1])); + } else if (headerMaxQuotaWords[i].equalsIgnoreCase(Constants.Quota.TRIGGER)) { + this.quotaHeaders.put(Constants.Quota.TRIGGER, Long.valueOf(headerMaxQuotaWords[i + 1])); + this.usageHeaders.put(Constants.Quota.TRIGGER, Long.valueOf(headerCurrentUsageWords[i + 1])); + } else if (headerMaxQuotaWords[i].equalsIgnoreCase(Constants.Quota.USER_DEFINED_FUNCTION)) { + this.quotaHeaders.put(Constants.Quota.USER_DEFINED_FUNCTION, Long.valueOf(headerMaxQuotaWords[i + 1])); + this.usageHeaders.put(Constants.Quota.USER_DEFINED_FUNCTION, + Long.valueOf(headerCurrentUsageWords[i + 1])); + } + } + } + + private static String getValueOrNull(Map map, String key) { + if (map != null) { + return map.get(key); + } + return null; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/FeedResponseDiagnostics.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/FeedResponseDiagnostics.java new file mode 100644 index 0000000000000..acabc5c5f32d9 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/FeedResponseDiagnostics.java @@ -0,0 +1,38 @@ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.QueryMetrics; +import org.apache.commons.lang3.StringUtils; + +import java.util.Map; + +public class FeedResponseDiagnostics { + + private Map queryMetricsMap; + + FeedResponseDiagnostics(Map queryMetricsMap) { + this.queryMetricsMap = queryMetricsMap; + } + + Map queryMetricsMap() { + return queryMetricsMap; + } + + FeedResponseDiagnostics queryMetricsMap(Map queryMetricsMap) { + this.queryMetricsMap = queryMetricsMap; + return this; + } + + /** + * Returns the textual representation of feed response metrics + * @return Textual representation of feed response metrics + */ + @Override + public String toString() { + if (queryMetricsMap == null || queryMetricsMap.isEmpty()) { + return StringUtils.EMPTY; + } + StringBuilder stringBuilder = new StringBuilder(); + queryMetricsMap.forEach((key, value) -> stringBuilder.append(key).append("=").append(value.toString()).append("\n")); + return stringBuilder.toString(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ForbiddenException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ForbiddenException.java new file mode 100644 index 0000000000000..9cc3142107948 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ForbiddenException.java @@ -0,0 +1,71 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.http.HttpHeaders; + +import java.net.URI; +import java.util.Map; + +public class ForbiddenException extends CosmosClientException { + ForbiddenException() { + this(RMResources.Forbidden); + } + + public ForbiddenException(CosmosError cosmosError, long lsn, String partitionKeyRangeId, Map responseHeaders) { + super(HttpConstants.StatusCodes.FORBIDDEN, cosmosError, responseHeaders); + BridgeInternal.setLSN(this, lsn); + BridgeInternal.setPartitionKeyRangeId(this, partitionKeyRangeId); + } + + ForbiddenException(String message) { + this(message, null, null, null); + } + + ForbiddenException(String message, HttpHeaders headers, String requestUrlString) { + this(message, null, headers, requestUrlString); + } + + public ForbiddenException(String message, HttpHeaders headers, URI requestUri) { + this(message, headers, requestUri != null ? requestUri.toString() : null); + } + + ForbiddenException(Exception innerException) { + this(RMResources.Forbidden, innerException, null, null); + } + + ForbiddenException(String message, + Exception innerException, + HttpHeaders headers, + String requestUrlString) { + super(String.format("%s: %s", RMResources.Forbidden, message), + innerException, + HttpUtils.asMap(headers), + HttpConstants.StatusCodes.FORBIDDEN, + requestUrlString); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/GoneException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/GoneException.java new file mode 100644 index 0000000000000..8979ee64c4058 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/GoneException.java @@ -0,0 +1,102 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.Strings; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.http.HttpHeaders; + +import java.net.URI; +import java.util.HashMap; +import java.util.Map; + +public class GoneException extends CosmosClientException { + + public GoneException(String msg) { + this(msg, null); + } + public GoneException() { + this(RMResources.Gone, null); + } + + public GoneException(CosmosError cosmosError, long lsn, String partitionKeyRangeId, Map responseHeaders) { + super(HttpConstants.StatusCodes.GONE, cosmosError, responseHeaders); + BridgeInternal.setLSN(this, lsn); + BridgeInternal.setPartitionKeyRangeId(this, partitionKeyRangeId); + } + + public GoneException(String message, String requestUri) { + this(message, null, new HashMap<>(), requestUri); + } + + GoneException(String message, + Exception innerException, + URI requestUri, + String localIpAddress) { + this(message(localIpAddress, message), innerException, null, requestUri); + } + + GoneException(Exception innerException) { + this(RMResources.Gone, innerException, new HashMap<>(), null); + } + + public GoneException(String message, HttpHeaders headers, URI requestUrl) { + super(message, null, HttpUtils.asMap(headers), HttpConstants.StatusCodes.GONE, requestUrl != null ? requestUrl.toString() : null); + } + + GoneException(String message, HttpHeaders headers, String requestUriString) { + super(message, null, HttpUtils.asMap(headers), HttpConstants.StatusCodes.GONE, requestUriString); + } + + public GoneException(String message, + Exception innerException, + HttpHeaders headers, + URI requestUrl) { + super(message, innerException, HttpUtils.asMap(headers), HttpConstants.StatusCodes.GONE, requestUrl != null ? requestUrl.toString() : null); + } + + public GoneException(String message, + Exception innerException, + Map headers, + String requestUriString) { + super(message, innerException, headers, HttpConstants.StatusCodes.GONE, requestUriString); + } + + GoneException(CosmosError cosmosError, Map headers) { + super(HttpConstants.StatusCodes.GONE, cosmosError, headers); + } + + private static String message(String localIP, String baseMessage) { + if (!Strings.isNullOrEmpty(localIP)) { + return String.format( + RMResources.ExceptionMessageAddIpAddress, + baseMessage, + localIP); + } + + return baseMessage; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/HashIndex.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/HashIndex.java new file mode 100644 index 0000000000000..a501d27846967 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/HashIndex.java @@ -0,0 +1,136 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; +import org.apache.commons.lang3.StringUtils; + +/** + * Represents a hash index in the Azure Cosmos DB database service. + */ +public final class HashIndex extends Index { + + /** + * Specifies an instance of HashIndex class with specified DataType. + *

+ * Here is an example to instantiate HashIndex class passing in the DataType: + *

+     * {@code
+     *
+     * HashIndex hashIndex = new HashIndex(DataType.STRING);
+     *
+     * }
+     * 
+ * + * @param dataType the data type. + */ + public HashIndex(DataType dataType) { + super(IndexKind.HASH); + this.dataType(dataType); + } + + /** + * Initializes a new instance of the HashIndex class with specified DataType and precision. + *

+ * Here is an example to instantiate HashIndex class passing in the DataType: + *

+     * {@code
+     *
+     * HashIndex hashIndex = new HashIndex(DataType.STRING, 3);
+     *
+     * }
+     * 
+ * + * @param dataType the data type. + * @param precision the precision. + */ + public HashIndex(DataType dataType, int precision) { + super(IndexKind.HASH); + this.dataType(dataType); + this.precision(precision); + } + + /** + * Initializes a new instance of the HashIndex class with json string. + * + * @param jsonString the json string that represents the index. + */ + HashIndex(String jsonString) { + super(jsonString, IndexKind.HASH); + if (this.dataType() == null) { + throw new IllegalArgumentException("The jsonString doesn't contain a valid 'dataType'."); + } + } + + /** + * Gets data type. + * + * @return the data type. + */ + public DataType dataType() { + DataType result = null; + try { + result = DataType.valueOf(StringUtils.upperCase(super.getString(Constants.Properties.DATA_TYPE))); + } catch (IllegalArgumentException e) { + // Ignore exception and let the caller handle null value. + this.getLogger().warn("INVALID index dataType value {}.", super.getString(Constants.Properties.DATA_TYPE)); + } + return result; + } + + /** + * Sets data type. + * + * @param dataType the data type. + * @return the Hash Index. + */ + public HashIndex dataType(DataType dataType) { + super.set(Constants.Properties.DATA_TYPE, dataType.toString()); + return this; + } + + /** + * Gets precision. + * + * @return the precision. + */ + public int precision() { + return super.getInt(Constants.Properties.PRECISION); + } + + /** + * Sets precision. + * + * @param precision the precision. + * @return the Hash Index. + */ + public HashIndex precision(int precision) { + super.set(Constants.Properties.PRECISION, precision); + return this; + } + + boolean hasPrecision() { + return super.has(Constants.Properties.PRECISION); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/IncludedPath.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/IncludedPath.java new file mode 100644 index 0000000000000..b18424cf89990 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/IncludedPath.java @@ -0,0 +1,138 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ArrayNode; +import org.apache.commons.lang3.StringUtils; + +import java.util.ArrayList; +import java.util.Collection; + +/** + * Represents an included path of the IndexingPolicy in the Azure Cosmos DB database service. + */ +public class IncludedPath extends JsonSerializable { + + private Collection indexes; + + /** + * Constructor. + */ + public IncludedPath() { + super(); + } + + /** + * Constructor. + * + * @param jsonString the json string that represents the included path. + */ + public IncludedPath(String jsonString) { + super(jsonString); + } + + /** + * Gets path. + * + * @return the path. + */ + public String path() { + return super.getString(Constants.Properties.PATH); + } + + /** + * Sets path. + * + * @param path the path. + * @return the Included Path. + */ + public IncludedPath path(String path) { + super.set(Constants.Properties.PATH, path); + return this; + } + + /** + * Gets the paths that are chosen to be indexed by the user. + * + * @return the included paths. + */ + public Collection indexes() { + if (this.indexes == null) { + this.indexes = this.indexCollection(); + + if (this.indexes == null) { + this.indexes = new ArrayList(); + } + } + + return this.indexes; + } + + public IncludedPath indexes(Collection indexes) { + this.indexes = indexes; + return this; + } + + private Collection indexCollection() { + if (this.propertyBag != null && this.propertyBag.has(Constants.Properties.INDEXES)) { + ArrayNode jsonArray = (ArrayNode) this.propertyBag.get(Constants.Properties.INDEXES); + Collection result = new ArrayList(); + + for (int i = 0; i < jsonArray.size(); i++) { + JsonNode jsonObject = jsonArray.get(i); + + IndexKind indexKind = IndexKind.valueOf(StringUtils.upperCase( + jsonObject.get(Constants.Properties.INDEX_KIND).asText())); + switch (indexKind) { + case HASH: + result.add(new HashIndex(jsonObject.toString())); + break; + case RANGE: + result.add(new RangeIndex(jsonObject.toString())); + break; + case SPATIAL: + result.add(new SpatialIndex(jsonObject.toString())); + break; + } + } + + return result; + } + + return null; + } + + @Override + void populatePropertyBag() { + if (this.indexes != null) { + for (Index index : this.indexes) { + index.populatePropertyBag(); + } + + super.set(Constants.Properties.INDEXES, this.indexes); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/Index.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/Index.java new file mode 100644 index 0000000000000..7a91b006f7cb2 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/Index.java @@ -0,0 +1,166 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; +import org.apache.commons.lang3.StringUtils; + +/** + * Represents the index of a collection in the Azure Cosmos DB database service. + */ +public abstract class Index extends JsonSerializable { + + /** + * Constructor. + * + * @param indexKind the kind of the index + */ + Index(IndexKind indexKind) { + super(); + this.kind(indexKind); + } + + /** + * Constructor. + * + * @param jsonString the json string that represents the index. + * @param indexKind the kind of the index + */ + Index(String jsonString, IndexKind indexKind) { + super(jsonString); + this.kind(indexKind); + } + + /** + * Returns an instance of RangeIndex class with specified DataType. + *

+ * Here is an example to create RangeIndex instance passing in the DataType: + *

+     * {@code
+     *
+     * RangeIndex rangeIndex = Index.RANGE(DataType.NUMBER);
+     *
+     * }
+     * 
+ * + * @param dataType the data type. + * @return an instance of RangeIndex type. + */ + public static RangeIndex Range(DataType dataType) { + return new RangeIndex(dataType); + } + + /** + * Returns an instance of RangeIndex class with specified DataType and precision. + *

+ * Here is an example to create RangeIndex instance passing in the DataType and precision: + *

+     * {@code
+     *
+     * RangeIndex rangeIndex = Index.RANGE(DataType.NUMBER, -1);
+     *
+     * }
+     * 
+ * + * @param dataType specifies the target data type for the index path specification. + * @param precision specifies the precision to be used for the data type associated with this index. + * @return an instance of RangeIndex type. + */ + public static RangeIndex Range(DataType dataType, int precision) { + return new RangeIndex(dataType, precision); + } + + /** + * Returns an instance of HashIndex class with specified DataType. + *

+ * Here is an example to create HashIndex instance passing in the DataType: + *

+     * {@code
+     *
+     * HashIndex hashIndex = Index.HASH(DataType.STRING);
+     * }
+     * 
+ * + * @param dataType specifies the target data type for the index path specification. + * @return an instance of HashIndex type. + */ + public static HashIndex Hash(DataType dataType) { + return new HashIndex(dataType); + } + + /** + * Returns an instance of HashIndex class with specified DataType and precision. + *

+ * Here is an example to create HashIndex instance passing in the DataType and precision: + *

+ * HashIndex hashIndex = Index.HASH(DataType.STRING, 3); + * + * @param dataType specifies the target data type for the index path specification. + * @param precision specifies the precision to be used for the data type associated with this index. + * @return an instance of HashIndex type. + */ + public static HashIndex Hash(DataType dataType, int precision) { + return new HashIndex(dataType, precision); + } + + /** + * Returns an instance of SpatialIndex class with specified DataType. + *

+ * Here is an example to create SpatialIndex instance passing in the DataType: + *

+ * SpatialIndex spatialIndex = Index.SPATIAL(DataType.POINT); + * + * @param dataType specifies the target data type for the index path specification. + * @return an instance of SpatialIndex type. + */ + public static SpatialIndex Spatial(DataType dataType) { + return new SpatialIndex(dataType); + } + + /** + * Gets index kind. + * + * @return the index kind. + */ + public IndexKind kind() { + IndexKind result = null; + try { + result = IndexKind.valueOf(StringUtils.upperCase(super.getString(Constants.Properties.INDEX_KIND))); + } catch (IllegalArgumentException e) { + this.getLogger().warn("INVALID index kind value %s.", super.getString(Constants.Properties.INDEX_KIND)); + } + + return result; + } + + /** + * Sets index kind. + * + * @param indexKind the index kind. + */ + private Index kind(IndexKind indexKind) { + super.set(Constants.Properties.INDEX_KIND, indexKind.toString()); + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/IndexKind.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/IndexKind.java new file mode 100644 index 0000000000000..cea3ec493419b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/IndexKind.java @@ -0,0 +1,51 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import org.apache.commons.text.WordUtils; + +/** + * These are the indexing types available for indexing a path in the Azure Cosmos DB database service. + * For additional details, refer to + * http://azure.microsoft.com/documentation/articles/documentdb-indexing-policies/#ConfigPolicy. + */ +public enum IndexKind { + // The index entries are hashed to serve point look up queries. + // Can be used to serve queries like: SELECT * FROM docs d WHERE d.prop = 5 + HASH, + + // The index entries are ordered. RANGE indexes are optimized for inequality predicate queries with efficient range + // scans. + // Can be used to serve queries like: SELECT * FROM docs d WHERE d.prop > 5 + RANGE, + + // The index entries are indexed to serve spatial queries like below: + // SELECT * FROM Root r WHERE ST_DISTANCE({"type":"POINT","coordinates":[71.0589,42.3601]}, r.location) $LE 10000 + SPATIAL; + + @Override + public String toString() { + return WordUtils.capitalizeFully(this.name()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/IndexingDirective.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/IndexingDirective.java new file mode 100644 index 0000000000000..3e692aa840225 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/IndexingDirective.java @@ -0,0 +1,52 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import org.apache.commons.text.WordUtils; + +/** + * Specifies whether or not the resource is to be indexed in the Azure Cosmos DB database service. + */ +public enum IndexingDirective { + + /** + * Use any pre-defined/pre-configured defaults. + */ + DEFAULT, + + /** + * Index the resource. + */ + INCLUDE, + + /** + * Do not index the resource. + */ + EXCLUDE; + + @Override + public String toString() { + return WordUtils.capitalizeFully(this.name()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/IndexingMode.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/IndexingMode.java new file mode 100644 index 0000000000000..e745eab1c68e3 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/IndexingMode.java @@ -0,0 +1,60 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import org.apache.commons.text.WordUtils; + +/** + * Specifies the supported indexing modes in the Azure Cosmos DB database service. + */ +public enum IndexingMode { + /** + * Index is updated synchronously with a create or update operation. + *

+ * With consistent indexing, query behavior is the same as the default consistency level for the collection. The + * index is always kept up to date with the data. + */ + CONSISTENT, + + /** + * Index is updated asynchronously with respect to a create or update operation. + *

+ * With lazy indexing, queries are eventually consistent. The index is updated when the collection is idle. + */ + LAZY, + + /** + * No index is provided. + *

+ * Setting IndexingMode to "NONE" drops the index. Use this if you don't want to maintain the index for a document + * collection, to save the storage cost or improve the write throughput. Your queries will degenerate to scans of + * the entire collection. + */ + NONE; + + @Override + public String toString() { + return WordUtils.capitalizeFully(this.name()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/IndexingPolicy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/IndexingPolicy.java new file mode 100644 index 0000000000000..588d7297b1f9b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/IndexingPolicy.java @@ -0,0 +1,285 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; +import com.fasterxml.jackson.databind.node.ArrayNode; +import org.apache.commons.lang3.StringUtils; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * Represents the indexing policy configuration for a collection in the Azure Cosmos DB database service. + */ +public final class IndexingPolicy extends JsonSerializable { + + private static final String DEFAULT_PATH = "/*"; + + private List includedPaths; + private List excludedPaths; + private List> compositeIndexes; + private List spatialIndexes; + + /** + * Constructor. + */ + public IndexingPolicy() { + this.automatic(true); + this.indexingMode(IndexingMode.CONSISTENT); + } + + /** + * Initializes a new instance of the IndexingPolicy class with the specified set of indexes as + * default index specifications for the root path. + *

+ * The following example shows how to override the default indexingPolicy for root path: + *

+     * {@code
+     * HashIndex hashIndexOverride = Index.HASH(DataType.STRING, 5);
+     * RangeIndex rangeIndexOverride = Index.RANGE(DataType.NUMBER, 2);
+     * SpatialIndex spatialIndexOverride = Index.SPATIAL(DataType.POINT);
+     *
+     * IndexingPolicy indexingPolicy = new IndexingPolicy(hashIndexOverride, rangeIndexOverride, spatialIndexOverride);
+     * }
+     * 
+ *

+ * If you would like to just override the indexingPolicy for Numbers you can specify just that: + *

+     * {@code
+     * RangeIndex rangeIndexOverride = Index.RANGE(DataType.NUMBER, 2);
+     *
+     * IndexingPolicy indexingPolicy = new IndexingPolicy(rangeIndexOverride);
+     * }
+     * 
+ * + * @param defaultIndexOverrides comma separated set of indexes that serve as default index specifications for the root path. + */ + public IndexingPolicy(Index[] defaultIndexOverrides) { + this(); + + if (defaultIndexOverrides == null) { + throw new IllegalArgumentException("defaultIndexOverrides is null."); + } + + IncludedPath includedPath = new IncludedPath(); + includedPath.path(IndexingPolicy.DEFAULT_PATH); + includedPath.indexes(new ArrayList(Arrays.asList(defaultIndexOverrides))); + this.includedPaths().add(includedPath); + } + + /** + * Constructor. + * + * @param jsonString the json string that represents the indexing policy. + */ + IndexingPolicy(String jsonString) { + super(jsonString); + } + + /** + * Gets whether automatic indexing is enabled for a collection. + *

+ * In automatic indexing, documents can be explicitly excluded from indexing using RequestOptions. In manual + * indexing, documents can be explicitly included. + * + * @return the automatic + */ + public Boolean automatic() { + return super.getBoolean(Constants.Properties.AUTOMATIC); + } + + /** + * Sets whether automatic indexing is enabled for a collection. + *

+ * In automatic indexing, documents can be explicitly excluded from indexing using RequestOptions. In manual + * indexing, documents can be explicitly included. + * + * @param automatic the automatic + * @return the Indexing Policy. + */ + public IndexingPolicy automatic(boolean automatic) { + super.set(Constants.Properties.AUTOMATIC, automatic); + return this; + } + + /** + * Gets the indexing mode (consistent or lazy). + * + * @return the indexing mode. + */ + public IndexingMode indexingMode() { + IndexingMode result = IndexingMode.LAZY; + try { + result = IndexingMode.valueOf(StringUtils.upperCase(super.getString(Constants.Properties.INDEXING_MODE))); + } catch (IllegalArgumentException e) { + this.getLogger().warn("INVALID indexingMode value {}.", super.getString(Constants.Properties.INDEXING_MODE)); + } + return result; + } + + /** + * Sets the indexing mode (consistent or lazy). + * + * @param indexingMode the indexing mode. + * @return the Indexing Policy. + */ + public IndexingPolicy indexingMode(IndexingMode indexingMode) { + super.set(Constants.Properties.INDEXING_MODE, indexingMode.toString()); + return this; + } + + /** + * Gets the paths that are chosen to be indexed by the user. + * + * @return the included paths. + */ + public List includedPaths() { + if (this.includedPaths == null) { + this.includedPaths = super.getList(Constants.Properties.INCLUDED_PATHS, IncludedPath.class); + + if (this.includedPaths == null) { + this.includedPaths = new ArrayList(); + } + } + + return this.includedPaths; + } + + public void setIncludedPaths(List includedPaths) { + this.includedPaths = includedPaths; + } + + /** + * Gets the paths that are not indexed. + * + * @return the excluded paths. + */ + public List excludedPaths() { + if (this.excludedPaths == null) { + this.excludedPaths = super.getList(Constants.Properties.EXCLUDED_PATHS, ExcludedPath.class); + + if (this.excludedPaths == null) { + this.excludedPaths = new ArrayList(); + } + } + + return this.excludedPaths; + } + + public IndexingPolicy excludedPaths(List excludedPaths) { + this.excludedPaths = excludedPaths; + return this; + } + + /** + * Gets the composite indexes for additional indexes. + * + * @return the composite indexes. + */ + public List> compositeIndexes() { + if (this.compositeIndexes == null) { + this.compositeIndexes = new ArrayList<>(); + ArrayNode compositeIndexes = (ArrayNode) super.get(Constants.Properties.COMPOSITE_INDEXES); + for (int i = 0; i < compositeIndexes.size(); i ++) { + ArrayNode compositeIndex = (ArrayNode) compositeIndexes.get(i); + ArrayList compositePaths = new ArrayList(); + for (int j = 0; j < compositeIndex.size(); j ++) { + CompositePath candidateCompositePath = new CompositePath(compositeIndex.get(j).toString()); + compositePaths.add(candidateCompositePath); + } + this.compositeIndexes.add(compositePaths); + } + } + + return this.compositeIndexes; + } + + /** + * Sets the composite indexes for additional indexes. + * + * @param compositeIndexes the composite indexes. + * @return the Indexing Policy. + */ + public IndexingPolicy compositeIndexes(List> compositeIndexes) { + this.compositeIndexes = compositeIndexes; + super.set(Constants.Properties.COMPOSITE_INDEXES, this.compositeIndexes); + return this; + } + + /** + * Sets the spatial indexes for additional indexes. + * + * @return the spatial indexes. + */ + public List spatialIndexes() { + if (this.spatialIndexes == null) { + this.spatialIndexes = super.getList(Constants.Properties.SPATIAL_INDEXES, SpatialSpec.class); + + if (this.spatialIndexes == null) { + this.spatialIndexes = new ArrayList(); + } + } + + return this.spatialIndexes; + } + + /** + * Sets the spatial indexes for additional indexes. + * + * @param spatialIndexes the spatial indexes. + * @return the Indexing Policy. + */ + public IndexingPolicy spatialIndexes(List spatialIndexes) { + this.spatialIndexes = spatialIndexes; + super.set(Constants.Properties.SPATIAL_INDEXES, this.spatialIndexes); + return this; + } + + @Override + void populatePropertyBag() { + // If indexing mode is not 'none' and not paths are set, set them to the defaults + if (this.indexingMode() != IndexingMode.NONE && this.includedPaths().size() == 0 && + this.excludedPaths().size() == 0) { + IncludedPath includedPath = new IncludedPath(); + includedPath.path(IndexingPolicy.DEFAULT_PATH); + this.includedPaths().add(includedPath); + } + + if (this.includedPaths != null) { + for (IncludedPath includedPath : this.includedPaths) { + includedPath.populatePropertyBag(); + } + super.set(Constants.Properties.INCLUDED_PATHS, this.includedPaths); + } + + if (this.excludedPaths != null) { + for (ExcludedPath excludedPath : this.excludedPaths) { + excludedPath.populatePropertyBag(); + } + super.set(Constants.Properties.EXCLUDED_PATHS, this.excludedPaths); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/InternalServerErrorException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/InternalServerErrorException.java new file mode 100644 index 0000000000000..407ba1dab51d5 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/InternalServerErrorException.java @@ -0,0 +1,90 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.http.HttpHeaders; + +import java.net.URI; +import java.net.URL; +import java.util.Map; + +/** + * This exception is thrown when DocumentServiceRequest contains x-ms-documentdb-partitionkeyrangeid + * header and such range id doesn't exist. + *

+ * No retries should be made in this case, as either split or merge might have happened and query/readfeed + * must take appropriate actions. + */ +public class InternalServerErrorException extends CosmosClientException { + + InternalServerErrorException() { + this(RMResources.InternalServerError); + } + + public InternalServerErrorException(CosmosError cosmosError, long lsn, String partitionKeyRangeId, Map responseHeaders) { + super(HttpConstants.StatusCodes.INTERNAL_SERVER_ERROR, cosmosError, responseHeaders); + BridgeInternal.setLSN(this, lsn); + BridgeInternal.setPartitionKeyRangeId(this, partitionKeyRangeId); + } + + public InternalServerErrorException(String message) { + this(message, null, (Map) null, null); + } + + + InternalServerErrorException(String message, Exception innerException) { + this(message, innerException, (HttpHeaders) null, (String) null); + } + + InternalServerErrorException(Exception innerException) { + this(RMResources.InternalServerError, innerException, (HttpHeaders) null, (String) null); + } + + public InternalServerErrorException(String message, HttpHeaders headers, URI requestUri) { + super(message, null, HttpUtils.asMap(headers), HttpConstants.StatusCodes.INTERNAL_SERVER_ERROR, requestUri != null ? requestUri.toString() : null); + } + + InternalServerErrorException(String message, HttpHeaders headers, String requestUriString) { + super(message, null, HttpUtils.asMap(headers), HttpConstants.StatusCodes.INTERNAL_SERVER_ERROR, requestUriString); + } + + InternalServerErrorException(String message, HttpHeaders headers, URL requestUrl) { + super(message, null, HttpUtils.asMap(headers), HttpConstants.StatusCodes.INTERNAL_SERVER_ERROR, requestUrl != null ? requestUrl.toString() : null); + } + + InternalServerErrorException(String message, Exception innerException, HttpHeaders headers, URI requestUri) { + super(message, innerException, HttpUtils.asMap(headers), HttpConstants.StatusCodes.INTERNAL_SERVER_ERROR, requestUri != null ? requestUri.toString() : null); + } + + InternalServerErrorException(String message, Exception innerException, HttpHeaders headers, String requestUriString) { + super(message, innerException, HttpUtils.asMap(headers), HttpConstants.StatusCodes.INTERNAL_SERVER_ERROR, requestUriString); + } + + public InternalServerErrorException(String message, Exception innerException, Map headers, String requestUriString) { + super(message, innerException, headers, HttpConstants.StatusCodes.INTERNAL_SERVER_ERROR, requestUriString); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/InvalidPartitionException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/InvalidPartitionException.java new file mode 100644 index 0000000000000..8231f73701d60 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/InvalidPartitionException.java @@ -0,0 +1,87 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.directconnectivity.WFConstants; +import com.azure.data.cosmos.internal.http.HttpHeaders; + +import java.util.Map; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class InvalidPartitionException extends CosmosClientException { + + private static final long serialVersionUID = 1L; + + public InvalidPartitionException() { + this(RMResources.Gone); + } + + public InvalidPartitionException(CosmosError cosmosError, long lsn, String partitionKeyRangeId, Map responseHeaders) { + super(HttpConstants.StatusCodes.GONE, cosmosError, responseHeaders); + BridgeInternal.setLSN(this, lsn); + BridgeInternal.setPartitionKeyRangeId(this, partitionKeyRangeId); + } + + public InvalidPartitionException(String msg) { + super(HttpConstants.StatusCodes.GONE, msg); + setSubStatus(); + } + + public InvalidPartitionException(String msg, String resourceAddress) { + super(msg, null, null, HttpConstants.StatusCodes.GONE, resourceAddress); + setSubStatus(); + } + + public InvalidPartitionException(String message, HttpHeaders headers, String requestUri) { + this(message, null, headers, requestUri); + } + + InvalidPartitionException(Exception innerException) { + this(RMResources.Gone, innerException, null, null); + } + + InvalidPartitionException(String message, + Exception innerException, + HttpHeaders headers, + String requestUri) { + super(String.format("%s: %s", RMResources.Gone, message), + innerException, + HttpUtils.asMap(headers), + HttpConstants.StatusCodes.GONE, + requestUri); + + setSubStatus(); + } + + private void setSubStatus() { + this.responseHeaders().put( + WFConstants.BackendHeaders.SUB_STATUS, + Integer.toString(HttpConstants.SubStatusCodes.NAME_CACHE_IS_STALE)); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/JsonSerializable.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/JsonSerializable.java new file mode 100644 index 0000000000000..8d8ef71da9b92 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/JsonSerializable.java @@ -0,0 +1,603 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Strings; +import com.azure.data.cosmos.internal.Utils; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Modifier; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + * Represents a base resource that can be serialized to JSON in the Azure Cosmos DB database service. + */ +public class JsonSerializable { + private final static Logger logger = LoggerFactory.getLogger(JsonSerializable.class); + private final static ObjectMapper OBJECT_MAPPER = Utils.getSimpleObjectMapper(); + private ObjectMapper om; + transient ObjectNode propertyBag = null; + + protected JsonSerializable() { + this.propertyBag = OBJECT_MAPPER.createObjectNode(); + } + + /** + * Constructor. + * + * @param jsonString the json string that represents the JsonSerializable. + * @param objectMapper the custom object mapper + */ + JsonSerializable(String jsonString, ObjectMapper objectMapper) { + // TODO: Made package private due to #153. #171 adding custom serialization options back. + this.propertyBag = fromJson(jsonString); + this.om = objectMapper; + } + + /** + * Constructor. + * + * @param jsonString the json string that represents the JsonSerializable. + */ + protected JsonSerializable(String jsonString) { + this.propertyBag = fromJson(jsonString); + } + + /** + * Constructor. + * + * @param objectNode the {@link ObjectNode} that represent the {@link JsonSerializable} + */ + JsonSerializable(ObjectNode objectNode) { + this.propertyBag = objectNode; + } + + private ObjectMapper getMapper() { + // TODO: Made package private due to #153. #171 adding custom serialization options back. + if (this.om != null) { return this.om; } + return OBJECT_MAPPER; + } + + void setMapper(ObjectMapper om) { + this.om = om; + } + + private static void checkForValidPOJO(Class c) { + if (c.isAnonymousClass() || c.isLocalClass()) { + throw new IllegalArgumentException( + String.format("%s can't be an anonymous or local class.", c.getName())); + } + if (c.isMemberClass() && !Modifier.isStatic(c.getModifiers())) { + throw new IllegalArgumentException( + String.format("%s must be static if it's a member class.", c.getName())); + } + } + + public Logger getLogger() { + return logger; + } + + void populatePropertyBag() { + } + + /** + * Returns the propertybag(JSONObject) in a hashMap + * + * @return the HashMap. + */ + public Map getMap() { + return getMapper().convertValue(this.propertyBag, HashMap.class); + } + + /** + * Checks whether a property exists. + * + * @param propertyName the property to look up. + * @return true if the property exists. + */ + public boolean has(String propertyName) { + return this.propertyBag.has(propertyName); + } + + /** + * Removes a value by propertyName. + * + * @param propertyName the property to remove. + */ + void remove(String propertyName) { + this.propertyBag.remove(propertyName); + } + + /** + * Sets the value of a property. + * + * @param the type of the object. + * @param propertyName the property to set. + * @param value the value of the property. + */ + @SuppressWarnings({"unchecked", "rawtypes"}) + void set(String propertyName, T value) { + if (value == null) { + // Sets null. + this.propertyBag.putNull(propertyName); + } else if (value instanceof Collection) { + // Collection. + ArrayNode jsonArray = propertyBag.arrayNode(); + this.internalSetCollection(propertyName, (Collection) value, jsonArray); + this.propertyBag.set(propertyName, jsonArray); + } else if (value instanceof JsonNode) { + this.propertyBag.set(propertyName, (JsonNode) value); + } else if (value instanceof JsonSerializable) { + // JsonSerializable + JsonSerializable castedValue = (JsonSerializable) value; + if (castedValue != null) { + castedValue.populatePropertyBag(); + } + this.propertyBag.set(propertyName, castedValue != null ? castedValue.propertyBag : null); + } else { + // POJO, ObjectNode, number (includes int, float, double etc), boolean, + // and string. + this.propertyBag.set(propertyName, getMapper().valueToTree(value)); + } + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + private void internalSetCollection(String propertyName, Collection collection, ArrayNode targetArray) { + for (T childValue : collection) { + if (childValue == null) { + // Sets null. + targetArray.addNull(); + } else if (childValue instanceof Collection) { + // When T is also a Collection, use recursion. + ArrayNode childArray = targetArray.addArray(); + this.internalSetCollection(propertyName, (Collection) childValue, childArray); + } else if (childValue instanceof JsonNode) { + targetArray.add((JsonNode) childValue); + } else if (childValue instanceof JsonSerializable) { + // JsonSerializable + JsonSerializable castedValue = (JsonSerializable) childValue; + castedValue.populatePropertyBag(); + targetArray.add(castedValue.propertyBag != null ? castedValue.propertyBag : this.getMapper().createObjectNode()); + } else { + // POJO, JSONObject, NUMBER (includes Int, Float, Double etc), + // Boolean, and STRING. + targetArray.add(this.getMapper().valueToTree(childValue)); + } + } + } + + /** + * Gets a property value as Object. + * + * @param propertyName the property to get. + * @return the value of the property. + */ + public Object get(String propertyName) { + if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { + return getValue(this.propertyBag.get(propertyName)); + } else { + return null; + } + } + + /** + * Gets a string value. + * + * @param propertyName the property to get. + * @return the string value. + */ + public String getString(String propertyName) { + if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { + return this.propertyBag.get(propertyName).asText(); + } else { + return null; + } + } + + /** + * Gets a boolean value. + * + * @param propertyName the property to get. + * @return the boolean value. + */ + public Boolean getBoolean(String propertyName) { + if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { + return this.propertyBag.get(propertyName).asBoolean(); + } else { + return null; + } + } + + /** + * Gets an integer value. + * + * @param propertyName the property to get. + * @return the boolean value + */ + public Integer getInt(String propertyName) { + if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { + return Integer.valueOf(this.propertyBag.get(propertyName).asInt()); + } else { + return null; + } + } + + /** + * Gets a long value. + * + * @param propertyName the property to get. + * @return the long value + */ + public Long getLong(String propertyName) { + if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { + return Long.valueOf(this.propertyBag.get(propertyName).asLong()); + } else { + return null; + } + } + + /** + * Gets a double value. + * + * @param propertyName the property to get. + * @return the double value. + */ + public Double getDouble(String propertyName) { + if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { + return new Double(this.propertyBag.get(propertyName).asDouble()); + } else { + return null; + } + } + + /** + * Gets an object value. + * + * @param the type of the object. + * @param propertyName the property to get. + * @param c the class of the object. If c is a POJO class, it must be a member (and not an anonymous or local) + * and a static one. + * @param convertFromCamelCase boolean indicating if String should be converted from camel case to upper case separated by underscore, + * before converting to required class. + * @return the object value. + */ + public T getObject(String propertyName, Class c, boolean ... convertFromCamelCase) { + if (this.propertyBag.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { + JsonNode jsonObj = propertyBag.get(propertyName); + if (Number.class.isAssignableFrom(c) || String.class.isAssignableFrom(c) + || Boolean.class.isAssignableFrom(c) || Object.class == c) { + // NUMBER, STRING, Boolean + return c.cast(getValue(jsonObj)); + } else if (Enum.class.isAssignableFrom(c)) { + try { + String value = String.class.cast(getValue(jsonObj)); + value = convertFromCamelCase.length > 0 && convertFromCamelCase[0] ? Strings.fromCamelCaseToUpperCase(value) : value; + return c.cast(c.getMethod("valueOf", String.class).invoke(null, value)); + } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException + | NoSuchMethodException | SecurityException e) { + throw new IllegalStateException("Failed to create enum.", e); + } + } else if (JsonSerializable.class.isAssignableFrom(c)) { + try { + Constructor constructor = c.getDeclaredConstructor(String.class); + if(Modifier.isPrivate(constructor.getModifiers())) { + constructor.setAccessible(true); + } + return constructor.newInstance(toJson(jsonObj)); + } catch (InstantiationException | IllegalAccessException | IllegalArgumentException + | InvocationTargetException | NoSuchMethodException | SecurityException e) { + throw new IllegalStateException("Failed to instantiate class object.", e); + } + } else { + // POJO + JsonSerializable.checkForValidPOJO(c); + try { + return this.getMapper().treeToValue(jsonObj, c); + } catch (IOException e) { + throw new IllegalStateException("Failed to get POJO.", e); + } + } + } + + return null; + } + + /** + * Gets an object List. + * + * @param the type of the objects in the List. + * @param propertyName the property to get + * @param c the class of the object. If c is a POJO class, it must be a member (and not an anonymous or local) + * and a static one. + * @param convertFromCamelCase boolean indicating if String should be converted from camel case to upper case separated by underscore, + * before converting to required class. + * @return the object collection. + */ + public List getList(String propertyName, Class c, boolean ... convertFromCamelCase) { + if (this.propertyBag.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { + ArrayNode jsonArray = (ArrayNode) this.propertyBag.get(propertyName); + ArrayList result = new ArrayList(); + + boolean isBaseClass = false; + boolean isEnumClass = false; + boolean isJsonSerializable = false; + + // Check once. + if (Number.class.isAssignableFrom(c) || String.class.isAssignableFrom(c) + || Boolean.class.isAssignableFrom(c) || Object.class == c) { + isBaseClass = true; + } else if (Enum.class.isAssignableFrom(c)) { + isEnumClass = true; + } else if (JsonSerializable.class.isAssignableFrom(c)) { + isJsonSerializable = true; + } else { + JsonSerializable.checkForValidPOJO(c); + } + + for (JsonNode n : jsonArray) { + if (isBaseClass) { + // NUMBER, STRING, Boolean + result.add(c.cast(getValue(n))); + } else if (isEnumClass) { + try { + String value = String.class.cast(getValue(n)); + value = convertFromCamelCase.length > 0 && convertFromCamelCase[0] ? Strings.fromCamelCaseToUpperCase(value) : value; + result.add(c.cast(c.getMethod("valueOf", String.class).invoke(null, value))); + } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException + | NoSuchMethodException | SecurityException e) { + throw new IllegalStateException("Failed to create enum.", e); + } + } else if (isJsonSerializable) { + // JsonSerializable + try { + Constructor constructor = c.getDeclaredConstructor(String.class); + if(Modifier.isPrivate(constructor.getModifiers())) { + constructor.setAccessible(true); + } + result.add(constructor.newInstance(toJson(n))); + } catch (InstantiationException | IllegalAccessException | IllegalArgumentException + | InvocationTargetException | NoSuchMethodException | SecurityException e) { + throw new IllegalStateException("Failed to instantiate class object.", e); + } + } else { + // POJO + try { + result.add(this.getMapper().treeToValue(n, c)); + } catch (IOException e) { + throw new IllegalStateException("Failed to get POJO.", e); + } + } + } + return result; + } + return null; + } + + /** + * Gets an object collection. + * + * @param the type of the objects in the collection. + * @param propertyName the property to get + * @param c the class of the object. If c is a POJO class, it must be a member (and not an anonymous or local) + * and a static one. + * @param convertFromCamelCase boolean indicating if String should be converted from camel case to upper case separated by underscore, + * before converting to required class. + * @return the object collection. + */ + public Collection getCollection(String propertyName, Class c, boolean ... convertFromCamelCase) { + return getList(propertyName, c, convertFromCamelCase); + } + + /** + * Gets a JSONObject. + * + * @param propertyName the property to get. + * @return the JSONObject. + */ + ObjectNode getObject(String propertyName) { + if (this.propertyBag.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { + ObjectNode jsonObj = (ObjectNode) this.propertyBag.get(propertyName); + return jsonObj; + } + return null; + } + + /** + * Gets a JSONObject collection. + * + * @param propertyName the property to get. + * @return the JSONObject collection. + */ + Collection getCollection(String propertyName) { + Collection result = null; + if (this.propertyBag.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { + result = new ArrayList(); + + for (JsonNode n : this.propertyBag.findValues(propertyName)) { + result.add((ObjectNode) n); + } + } + + return result; + } + + /** + * Gets the value of a property identified by an array of property names that forms the path. + * + * @param propertyNames that form the path to the property to get. + * @return the value of the property. + */ + public Object getObjectByPath(List propertyNames) { + ObjectNode propBag = this.propertyBag; + JsonNode value = null; + String propertyName = null; + Integer matchedProperties = 0; + Iterator iterator = propertyNames.iterator(); + if (iterator.hasNext()) { + do { + propertyName = iterator.next(); + if (propBag.has(propertyName)) { + matchedProperties++; + value = propBag.get(propertyName); + if (!value.isObject()) { + break; + } + propBag = (ObjectNode) value; + } else { + break; + } + } while (iterator.hasNext()); + + if (value != null && matchedProperties == propertyNames.size()) { + return getValue(value); + } + } + + return null; + } + + static Object getValue(JsonNode value) { + if (value.isValueNode()) { + switch (value.getNodeType()) { + case BOOLEAN: + return value.asBoolean(); + case NUMBER: + if (value.isInt()) { + return value.asInt(); + } else if (value.isLong()) { + return value.asLong(); + } else if (value.isDouble()) { + return value.asDouble(); + } + case STRING : + return value.asText(); + } + } + return value; + } + + private ObjectNode fromJson(String json){ + try { + return (ObjectNode) getMapper().readTree(json); + } catch (IOException e) { + throw new IllegalArgumentException(String.format("Unable to parse JSON %s", json), e); + } + } + + private String toJson(Object object){ + try { + return getMapper().writeValueAsString(object); + } catch (JsonProcessingException e) { + throw new IllegalStateException("Unable to convert JSON to STRING", e); + } + } + + private String toPrettyJson(Object object){ + try { + return getMapper().writerWithDefaultPrettyPrinter().writeValueAsString(object); + } catch (JsonProcessingException e) { + throw new IllegalStateException("Unable to convert JSON to STRING", e); + } + } + + /** + * Converts to an Object (only POJOs and JSONObject are supported). + * + * @param the type of the object. + * @param c the class of the object, either a POJO class or JSONObject. If c is a POJO class, it must be a member + * (and not an anonymous or local) and a static one. + * @return the POJO. + */ + public T toObject(Class c) { + if (JsonSerializable.class.isAssignableFrom(c) || String.class.isAssignableFrom(c) + || Number.class.isAssignableFrom(c) || Boolean.class.isAssignableFrom(c)) { + throw new IllegalArgumentException("c can only be a POJO class or JSONObject"); + } + if (ObjectNode.class.isAssignableFrom(c)) { + // JSONObject + if (ObjectNode.class != c) { + throw new IllegalArgumentException("We support JSONObject but not its sub-classes."); + } + return c.cast(this.propertyBag); + } else { + // POJO + JsonSerializable.checkForValidPOJO(c); + try { + return this.getMapper().readValue(this.toJson(), c); + } catch (IOException e) { + throw new IllegalStateException("Failed to get POJO.", e); + } + } + } + + /** + * Converts to a JSON string. + * + * @return the JSON string. + */ + public String toJson() { + return this.toJson(SerializationFormattingPolicy.NONE); + } + + /** + * Converts to a JSON string. + * + * @param formattingPolicy the formatting policy to be used. + * @return the JSON string. + */ + public String toJson(SerializationFormattingPolicy formattingPolicy) { + this.populatePropertyBag(); + if (SerializationFormattingPolicy.INDENTED.equals(formattingPolicy) ) { + return toPrettyJson(propertyBag); + } else { + return toJson(propertyBag); + } + } + + /** + * Gets Simple STRING representation of property bag. + * + * For proper conversion to json and inclusion of the default values + * use {@link #toJson()}. + * + * @return string representation of property bag. + */ + public String toString() { + return toJson(propertyBag); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/LockedException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/LockedException.java new file mode 100644 index 0000000000000..36c22c3c965d2 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/LockedException.java @@ -0,0 +1,75 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.http.HttpHeaders; + +import java.util.Map; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class LockedException extends CosmosClientException { + private static final long serialVersionUID = 1L; + + LockedException() { + this(RMResources.Locked); + } + + public LockedException(CosmosError cosmosError, long lsn, String partitionKeyRangeId, Map responseHeaders) { + super(HttpConstants.StatusCodes.LOCKED, cosmosError, responseHeaders); + BridgeInternal.setLSN(this, lsn); + BridgeInternal.setPartitionKeyRangeId(this, partitionKeyRangeId); + } + + LockedException(String msg) { + super(HttpConstants.StatusCodes.LOCKED, msg); + } + + LockedException(String msg, String resourceAddress) { + super(msg, null, null, HttpConstants.StatusCodes.LOCKED, resourceAddress); + } + + public LockedException(String message, HttpHeaders headers, String requestUriString) { + this(message, null, headers, requestUriString); + } + + LockedException(Exception innerException) { + this(RMResources.Locked, innerException, null, null); + } + + LockedException(String message, + Exception innerException, + HttpHeaders headers, + String requestUriString) { + super(String.format("%s: %s", RMResources.Locked, message), + innerException, + HttpUtils.asMap(headers), + HttpConstants.StatusCodes.LOCKED, + requestUriString); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/MethodNotAllowedException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/MethodNotAllowedException.java new file mode 100644 index 0000000000000..f81f09bf57527 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/MethodNotAllowedException.java @@ -0,0 +1,71 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.http.HttpHeaders; + +import java.net.URI; +import java.util.Map; + +public class MethodNotAllowedException extends CosmosClientException { + MethodNotAllowedException() { + this(RMResources.MethodNotAllowed); + } + + public MethodNotAllowedException(CosmosError cosmosError, long lsn, String partitionKeyRangeId, Map responseHeaders) { + super(HttpConstants.StatusCodes.METHOD_NOT_ALLOWED, cosmosError, responseHeaders); + BridgeInternal.setLSN(this, lsn); + BridgeInternal.setPartitionKeyRangeId(this, partitionKeyRangeId); + } + + MethodNotAllowedException(String message) { + this(message, null, null, null); + } + + MethodNotAllowedException(String message, HttpHeaders headers, String requestUriString) { + this(message, null, headers, requestUriString); + } + + MethodNotAllowedException(String message, HttpHeaders headers, URI requestUri) { + this(message, headers, requestUri != null ? requestUri.toString() : null); + } + + MethodNotAllowedException(Exception innerException) { + this(RMResources.MethodNotAllowed, innerException, null, null); + } + + public MethodNotAllowedException(String message, + Exception innerException, + HttpHeaders headers, + String requestUriString) { + super(String.format("%s: %s", RMResources.MethodNotAllowed, message), + innerException, + HttpUtils.asMap(headers), + HttpConstants.StatusCodes.METHOD_NOT_ALLOWED, + requestUriString); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/NotFoundException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/NotFoundException.java new file mode 100644 index 0000000000000..bf3c5752004f9 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/NotFoundException.java @@ -0,0 +1,87 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.http.HttpHeaders; + +import java.net.URI; +import java.util.Map; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class NotFoundException extends CosmosClientException { + private static final long serialVersionUID = 1L; + + public NotFoundException() { + this(RMResources.NotFound); + } + + public NotFoundException(CosmosError cosmosError, long lsn, String partitionKeyRangeId, Map responseHeaders) { + super(HttpConstants.StatusCodes.NOTFOUND, cosmosError, responseHeaders); + BridgeInternal.setLSN(this, lsn); + BridgeInternal.setPartitionKeyRangeId(this, partitionKeyRangeId); + } + + public NotFoundException(String message) { + this(message, null, (HttpHeaders) null, null); + } + + public NotFoundException(String message, Map headers, String requestUri) { + this(message, null, headers, requestUri); + } + + NotFoundException(String message, HttpHeaders headers, String requestUri) { + this(message, null, headers, requestUri); + } + + public NotFoundException(String message, HttpHeaders headers, URI requestUri) { + this(message, headers, requestUri != null ? requestUri.toString() : null); + } + + NotFoundException(Exception innerException) { + this(RMResources.NotFound, innerException, (Map) null, null); + } + + NotFoundException(String message, + Exception innerException, + HttpHeaders headers, + String requestUri) { + this(message, innerException, HttpUtils.asMap(headers), requestUri); + } + + NotFoundException(String message, + Exception innerException, + Map headers, + String requestUri) { + super(String.format("%s: %s", RMResources.NotFound, message), + innerException, + headers, + HttpConstants.StatusCodes.NOTFOUND, + requestUri); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PartitionIsMigratingException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PartitionIsMigratingException.java new file mode 100644 index 0000000000000..3589cf67d5235 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PartitionIsMigratingException.java @@ -0,0 +1,87 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.directconnectivity.WFConstants; +import com.azure.data.cosmos.internal.http.HttpHeaders; + +import java.util.Map; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class PartitionIsMigratingException extends CosmosClientException { + + private static final long serialVersionUID = 1L; + + public PartitionIsMigratingException() { + this(RMResources.Gone); + } + + public PartitionIsMigratingException(CosmosError cosmosError, long lsn, String partitionKeyRangeId, Map responseHeaders) { + super(HttpConstants.StatusCodes.GONE, cosmosError, responseHeaders); + BridgeInternal.setLSN(this, lsn); + BridgeInternal.setPartitionKeyRangeId(this, partitionKeyRangeId); + } + + PartitionIsMigratingException(String msg) { + super(HttpConstants.StatusCodes.GONE, msg); + setSubStatus(); + } + + PartitionIsMigratingException(String msg, String resourceAddress) { + super(msg, null, null, HttpConstants.StatusCodes.GONE, resourceAddress); + setSubStatus(); + } + + public PartitionIsMigratingException(String message, HttpHeaders headers, String requestUri) { + this(message, null, headers, requestUri); + } + + PartitionIsMigratingException(Exception innerException) { + this(RMResources.Gone, innerException, null, null); + } + + PartitionIsMigratingException(String message, + Exception innerException, + HttpHeaders headers, + String requestUri) { + super(String.format("%s: %s", RMResources.Gone, message), + innerException, + HttpUtils.asMap(headers), + HttpConstants.StatusCodes.GONE, + requestUri); + + setSubStatus(); + } + + private void setSubStatus() { + this.responseHeaders().put( + WFConstants.BackendHeaders.SUB_STATUS, + Integer.toString(HttpConstants.SubStatusCodes.COMPLETING_PARTITION_MIGRATION)); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PartitionKey.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PartitionKey.java new file mode 100644 index 0000000000000..ff4130e6152da --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PartitionKey.java @@ -0,0 +1,98 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Utils; +import com.azure.data.cosmos.internal.routing.PartitionKeyInternal; + +/** + * Represents a partition key value in the Azure Cosmos DB database service. A + * partition key identifies the partition where the document is stored in. + */ +public class PartitionKey { + + private PartitionKeyInternal internalPartitionKey; + + PartitionKey(PartitionKeyInternal partitionKeyInternal) { + this.internalPartitionKey = partitionKeyInternal; + } + + /** + * Constructor. CREATE a new instance of the PartitionKey object. + * + * @param key the value of the partition key. + */ + @SuppressWarnings("serial") + public PartitionKey(final Object key) { + this.internalPartitionKey = PartitionKeyInternal.fromObjectArray(new Object[] { key }, true); + } + + /** + * Create a new instance of the PartitionKey object from a serialized JSON + * partition key. + * + * @param jsonString the JSON string representation of this PartitionKey object. + * @return the PartitionKey instance. + */ + public static PartitionKey fromJsonString(String jsonString) { + return new PartitionKey(PartitionKeyInternal.fromJsonString(jsonString)); + } + + public static PartitionKey None = new PartitionKey(PartitionKeyInternal.None); + + /** + * Serialize the PartitionKey object to a JSON string. + * + * @return the string representation of this PartitionKey object. + */ + public String toString() { + return this.internalPartitionKey.toJson(); + } + + // TODO: make private + public PartitionKeyInternal getInternalPartitionKey() { + return internalPartitionKey; + } + + /** + * Overrides the Equal operator for object comparisons between two instances of + * {@link PartitionKey} + * + * @param other The object to compare with. + * @return True if two object instance are considered equal. + */ + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + + if (this == other) { + return true; + } + + PartitionKey otherKey = Utils.as(other, PartitionKey.class); + return otherKey != null && this.internalPartitionKey.equals(otherKey.internalPartitionKey); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PartitionKeyDefinition.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PartitionKeyDefinition.java new file mode 100644 index 0000000000000..b35ae9eb6562f --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PartitionKeyDefinition.java @@ -0,0 +1,181 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; +import com.azure.data.cosmos.internal.Strings; +import com.azure.data.cosmos.internal.routing.PartitionKeyInternal; +import org.apache.commons.lang3.StringUtils; + +import java.util.ArrayList; +import java.util.List; + +/** + * Represents a partition key definition in the Azure Cosmos DB database service. A partition key definition specifies which + * document property is used as the partition key in a collection that has multiple partitions. + */ +public final class PartitionKeyDefinition extends JsonSerializable { + private List paths; + private PartitionKind kind; + private PartitionKeyDefinitionVersion version; + private Boolean systemKey; + + /** + * Constructor. Creates a new instance of the PartitionKeyDefinition object. + */ + public PartitionKeyDefinition() { + this.kind(PartitionKind.HASH); + } + + /** + * Constructor. Creates a new instance of the PartitionKeyDefinition object from a + * JSON string. + * + * @param jsonString the JSON string that represents the partition key definition. + */ + PartitionKeyDefinition(String jsonString) { + super(jsonString); + } + + /** + * Sets the partition algorithm used to calculate the partition id given a partition key. + * + * @return the partition algorithm. + */ + public PartitionKind kind() { + if (this.kind == null) { + this.kind = super.getObject(Constants.Properties.PARTITION_KIND, PartitionKind.class, true); + } + + return this.kind; + } + + /** + * Sets the partition algorithm used to calculate the partition id given a partition key. + * + * @param kind the partition algorithm. + * @return this PartitionKeyDefinition. + */ + public PartitionKeyDefinition kind(PartitionKind kind) { + this.kind = kind; + return this; + } + + public PartitionKeyDefinitionVersion version() { + if (this.version == null) { + Object versionObject = super.getObject(Constants.Properties.PARTITION_KEY_DEFINITION_VERSION, Object.class); + if (versionObject == null) { + this.version = null; + } else { + String versionStr = String.valueOf(versionObject); + if (StringUtils.isNumeric(versionStr)) { + this.version = PartitionKeyDefinitionVersion.valueOf(String.format("V%d", Integer.parseInt(versionStr))); + } else { + this.version = !Strings.isNullOrEmpty(versionStr) + ? PartitionKeyDefinitionVersion.valueOf(StringUtils.upperCase(versionStr)) + : null; + } + } + } + + return this.version; + } + + public PartitionKeyDefinition version(PartitionKeyDefinitionVersion version) { + this.version = version; + return this; + } + + /** + * Gets the document property paths for the partition key. + * + * @return the paths to the document properties that form the partition key. + */ + public List paths() { + if (this.paths == null) { + if (super.has(Constants.Properties.PARTITION_KEY_PATHS)) { + paths = super.getList(Constants.Properties.PARTITION_KEY_PATHS, String.class); + } else { + paths = new ArrayList<>(); + } + } + + return paths; + } + + /** + * Sets the document property paths for the partition key. + * + * @param paths the paths to document properties that form the partition key. + * @return this PartitionKeyDefinition. + */ + public PartitionKeyDefinition paths(List paths) { + if (paths == null || paths.size() == 0) { + throw new IllegalArgumentException("paths must not be null or empty."); + } + + this.paths = paths; + return this; + } + + /** + * Indicates if the partition key is generated by the system. + * + * @return the boolean indicating is it is a system key. + */ + Boolean isSystemKey() { + if (this.systemKey == null) { + if (super.has(Constants.Properties.SYSTEM_KEY)) { + this.systemKey = super.getBoolean(Constants.Properties.SYSTEM_KEY); + } else { + this.systemKey = false; + } + } + + return this.systemKey; + } + + PartitionKeyInternal getNonePartitionKeyValue() { + if (this.paths().size() == 0 || this.isSystemKey()) { + return PartitionKeyInternal.Empty; + } else { + return PartitionKeyInternal.UndefinedPartitionKey; + } + } + + @Override + void populatePropertyBag() { + if (this.kind != null) { + super.set(Constants.Properties.PARTITION_KIND, kind.toString()); + } + if (this.paths != null) { + super.set(Constants.Properties.PARTITION_KEY_PATHS, paths); + } + + if (this.version != null) { + super.set(Constants.Properties.PARTITION_KEY_DEFINITION_VERSION, version.toString()); + } + super.populatePropertyBag(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PartitionKeyDefinitionVersion.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PartitionKeyDefinitionVersion.java new file mode 100644 index 0000000000000..296ac803b2f92 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PartitionKeyDefinitionVersion.java @@ -0,0 +1,49 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +/** + * Partitioning version. + */ +public enum PartitionKeyDefinitionVersion { + + /** + * Original version of hash partitioning. + */ + V1(1), + + /** + * Enhanced version of hash partitioning - offers better distribution of long partition keys and uses less storage. + * + * This version should be used for any practical purpose, but it is available in newer SDKs only. + */ + V2(2); + + int val; + + PartitionKeyDefinitionVersion(int val) { + this.val = val; + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PartitionKeyRangeGoneException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PartitionKeyRangeGoneException.java new file mode 100644 index 0000000000000..cc1e9cc2c340b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PartitionKeyRangeGoneException.java @@ -0,0 +1,80 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.directconnectivity.WFConstants; +import com.azure.data.cosmos.internal.http.HttpHeaders; + +import java.util.Map; + +/** + * This exception is thrown when DocumentServiceRequest contains x-ms-documentdb-partitionkeyrangeid + * header and such range id doesn't exist. + *

+ * No retries should be made in this case, as either split or merge might have happened and query/readfeed + * must take appropriate actions. + */ +public class PartitionKeyRangeGoneException extends CosmosClientException { + + public PartitionKeyRangeGoneException() { + this(RMResources.Gone); + } + + public PartitionKeyRangeGoneException(CosmosError cosmosError, long lsn, String partitionKeyRangeId, Map responseHeaders) { + super(HttpConstants.StatusCodes.GONE, cosmosError, responseHeaders); + BridgeInternal.setLSN(this, lsn); + BridgeInternal.setPartitionKeyRangeId(this, partitionKeyRangeId); + this.setSubstatus(); + } + + public PartitionKeyRangeGoneException(String message) { + this(message, null, null, null); + } + + PartitionKeyRangeGoneException(String message, Exception innerException) { + this(message, innerException, null, null); + } + + PartitionKeyRangeGoneException(Exception innerException) { + this(RMResources.Gone, innerException, null, null); + } + + + public PartitionKeyRangeGoneException(String message, HttpHeaders headers, String requestUriString) { + super(message, null, HttpUtils.asMap(headers), HttpConstants.StatusCodes.GONE, requestUriString); + this.setSubstatus(); + } + + PartitionKeyRangeGoneException(String message, Exception innerException, HttpHeaders headers, String requestUriString) { + super(message, innerException, HttpUtils.asMap(headers), HttpConstants.StatusCodes.GONE, requestUriString); + this.setSubstatus(); + } + + private void setSubstatus() { + this.responseHeaders().put(WFConstants.BackendHeaders.SUB_STATUS, Integer.toString(HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE)); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PartitionKeyRangeIsSplittingException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PartitionKeyRangeIsSplittingException.java new file mode 100644 index 0000000000000..40c596fd31035 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PartitionKeyRangeIsSplittingException.java @@ -0,0 +1,87 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.directconnectivity.WFConstants; +import com.azure.data.cosmos.internal.http.HttpHeaders; + +import java.util.Map; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class PartitionKeyRangeIsSplittingException extends CosmosClientException { + + private static final long serialVersionUID = 1L; + + public PartitionKeyRangeIsSplittingException() { + this(RMResources.Gone); + } + + public PartitionKeyRangeIsSplittingException(CosmosError cosmosError, long lsn, String partitionKeyRangeId, Map responseHeaders) { + super(HttpConstants.StatusCodes.GONE, cosmosError, responseHeaders); + BridgeInternal.setLSN(this, lsn); + BridgeInternal.setPartitionKeyRangeId(this, partitionKeyRangeId); + } + + PartitionKeyRangeIsSplittingException(String msg) { + super(HttpConstants.StatusCodes.GONE, msg); + setSubStatus(); + } + + PartitionKeyRangeIsSplittingException(String msg, String resourceAddress) { + super(msg, null, null, HttpConstants.StatusCodes.GONE, resourceAddress); + setSubStatus(); + } + + public PartitionKeyRangeIsSplittingException(String message, HttpHeaders headers, String requestUri) { + this(message, null, headers, requestUri); + } + + PartitionKeyRangeIsSplittingException(Exception innerException) { + this(RMResources.Gone, innerException, null, null); + } + + PartitionKeyRangeIsSplittingException(String message, + Exception innerException, + HttpHeaders headers, + String requestUri) { + super(String.format("%s: %s", RMResources.Gone, message), + innerException, + HttpUtils.asMap(headers), + HttpConstants.StatusCodes.GONE, + requestUri); + + setSubStatus(); + } + + private void setSubStatus() { + this.responseHeaders().put( + WFConstants.BackendHeaders.SUB_STATUS, + Integer.toString(HttpConstants.SubStatusCodes.COMPLETING_SPLIT)); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PartitionKind.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PartitionKind.java new file mode 100644 index 0000000000000..62aac4aa3fcd1 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PartitionKind.java @@ -0,0 +1,41 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import org.apache.commons.text.WordUtils; + +/** + * Specifies the partition scheme for an multiple-partitioned collection in the Azure Cosmos DB database service. + */ +public enum PartitionKind { + /** + * The Partition of a document is calculated based on the hash value of the PartitionKey. + */ + HASH; + + @Override + public String toString() { + return WordUtils.capitalizeFully(this.name()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PermissionMode.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PermissionMode.java new file mode 100644 index 0000000000000..33b40d376f280 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PermissionMode.java @@ -0,0 +1,61 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import org.apache.commons.text.WordUtils; + +/** + * Enumeration specifying applicability of permission in the Azure Cosmos DB database service. + */ +public enum PermissionMode { + /** + * Permission applicable for read operations only. + */ + READ(0x1), + + /** + * Permission applicable for all operations. + */ + ALL(0x2); + + private int value; + + PermissionMode(int value) { + this.value = value; + } + + /** + * Gets the numerical value of the permission mode. + * + * @return the numerical value. + */ + public int getValue() { + return value; + } + + @Override + public String toString() { + return WordUtils.capitalizeFully(this.name()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PreconditionFailedException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PreconditionFailedException.java new file mode 100644 index 0000000000000..d1bffd27c4f12 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/PreconditionFailedException.java @@ -0,0 +1,76 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.http.HttpHeaders; + +import java.util.Map; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class PreconditionFailedException extends CosmosClientException { + + private static final long serialVersionUID = 1L; + + PreconditionFailedException() { + this(RMResources.PreconditionFailed); + } + + public PreconditionFailedException(CosmosError cosmosError, long lsn, String partitionKeyRangeId, Map responseHeaders) { + super(HttpConstants.StatusCodes.PRECONDITION_FAILED, cosmosError, responseHeaders); + BridgeInternal.setLSN(this, lsn); + BridgeInternal.setPartitionKeyRangeId(this, partitionKeyRangeId); + } + + PreconditionFailedException(String msg) { + super(HttpConstants.StatusCodes.PRECONDITION_FAILED, msg); + } + + PreconditionFailedException(String msg, String resourceAddress) { + super(msg, null, null, HttpConstants.StatusCodes.PRECONDITION_FAILED, resourceAddress); + } + + public PreconditionFailedException(String message, HttpHeaders headers, String requestUriString) { + this(message, null, headers, requestUriString); + } + + PreconditionFailedException(Exception innerException) { + this(RMResources.PreconditionFailed, innerException, null, null); + } + + PreconditionFailedException(String message, + Exception innerException, + HttpHeaders headers, + String requestUriString) { + super(String.format("%s: %s", RMResources.PreconditionFailed, message), + innerException, + HttpUtils.asMap(headers), + HttpConstants.StatusCodes.PRECONDITION_FAILED, + requestUriString); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/RangeIndex.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/RangeIndex.java new file mode 100644 index 0000000000000..a27b4ca0fa583 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/RangeIndex.java @@ -0,0 +1,132 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; +import org.apache.commons.lang3.StringUtils; + +/** + * Represents a range index in the Azure Cosmos DB database service. + */ +public final class RangeIndex extends Index { + + /** + * Initializes a new instance of the RangeIndex class with specified DataType. + *

+ * Here is an example to instantiate RangeIndex class passing in the DataType: + *

+     * {@code
+     *
+     * RangeIndex rangeIndex = new RangeIndex(DataType.NUMBER);
+     *
+     * }
+     * 
+ * + * @param dataType the data type. + */ + public RangeIndex(DataType dataType) { + super(IndexKind.RANGE); + this.dataType(dataType); + } + + /** + * Initializes a new instance of the RangeIndex class with specified DataType and precision. + *
+     * {@code
+     *
+     * RangeIndex rangeIndex = new RangeIndex(DataType.NUMBER, -1);
+     *
+     * }
+     * 
+ * @param dataType the data type of the RangeIndex + * @param precision the precision of the RangeIndex + */ + public RangeIndex(DataType dataType, int precision) { + super(IndexKind.RANGE); + this.dataType(dataType); + this.precision(precision); + } + + /** + * Initializes a new instance of the RangeIndex class with json string. + * + * @param jsonString the json string that represents the index. + */ + RangeIndex(String jsonString) { + super(jsonString, IndexKind.RANGE); + if (this.dataType() == null) { + throw new IllegalArgumentException("The jsonString doesn't contain a valid 'dataType'."); + } + } + + /** + * Gets data type. + * + * @return the data type. + */ + public DataType dataType() { + DataType result = null; + try { + result = DataType.valueOf(StringUtils.upperCase(super.getString(Constants.Properties.DATA_TYPE))); + } catch (IllegalArgumentException e) { + this.getLogger().warn("INVALID index dataType value {}.", super.getString(Constants.Properties.DATA_TYPE)); + } + return result; + } + + /** + * Sets data type. + * + * @param dataType the data type. + * @return the RangeIndex. + */ + public RangeIndex dataType(DataType dataType) { + super.set(Constants.Properties.DATA_TYPE, dataType.toString()); + return this; + } + + /** + * Gets precision. + * + * @return the precision. + */ + public int precision() { + return super.getInt(Constants.Properties.PRECISION); + } + + /** + * Sets precision. + * + * @param precision the precision. + * @return the RangeIndex. + */ + public RangeIndex precision(int precision) { + super.set(Constants.Properties.PRECISION, precision); + return this; + } + + boolean hasPrecision() { + return super.has(Constants.Properties.PRECISION); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/RequestEntityTooLargeException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/RequestEntityTooLargeException.java new file mode 100644 index 0000000000000..ae4b889f81e77 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/RequestEntityTooLargeException.java @@ -0,0 +1,75 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.http.HttpHeaders; + +import java.util.Map; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class RequestEntityTooLargeException extends CosmosClientException { + private static final long serialVersionUID = 1L; + + RequestEntityTooLargeException() { + this(RMResources.RequestEntityTooLarge); + } + + public RequestEntityTooLargeException(CosmosError cosmosError, long lsn, String partitionKeyRangeId, Map responseHeaders) { + super(HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE, cosmosError, responseHeaders); + BridgeInternal.setLSN(this, lsn); + BridgeInternal.setPartitionKeyRangeId(this, partitionKeyRangeId); + } + + RequestEntityTooLargeException(String msg) { + super(HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE, msg); + } + + RequestEntityTooLargeException(String msg, String resourceAddress) { + super(msg, null, null, HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE, resourceAddress); + } + + public RequestEntityTooLargeException(String message, HttpHeaders headers, String requestUriString) { + this(message, null, headers, requestUriString); + } + + RequestEntityTooLargeException(Exception innerException) { + this(RMResources.RequestEntityTooLarge, innerException, null, null); + } + + RequestEntityTooLargeException(String message, + Exception innerException, + HttpHeaders headers, + String requestUriString) { + super(String.format(RMResources.RequestEntityTooLarge, message), + innerException, + HttpUtils.asMap(headers), + HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE, + requestUriString); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/RequestRateTooLargeException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/RequestRateTooLargeException.java new file mode 100644 index 0000000000000..af5e25c77da89 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/RequestRateTooLargeException.java @@ -0,0 +1,74 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.http.HttpHeaders; + +import java.net.URI; +import java.util.Map; + +public class RequestRateTooLargeException extends CosmosClientException { + + public RequestRateTooLargeException() { + this(RMResources.TooManyRequests, null); + } + + public RequestRateTooLargeException(CosmosError cosmosError, long lsn, String partitionKeyRangeId, Map responseHeaders) { + super(HttpConstants.StatusCodes.NOTFOUND, cosmosError, responseHeaders); + BridgeInternal.setLSN(this, lsn); + BridgeInternal.setPartitionKeyRangeId(this, partitionKeyRangeId); + } + + RequestRateTooLargeException(String message, URI requestUri) { + this(message, null, null, requestUri); + } + + RequestRateTooLargeException(String message, + Exception innerException, + URI requestUri) { + this(message, innerException, null, requestUri); + } + + RequestRateTooLargeException(Exception innerException) { + this(RMResources.TooManyRequests, innerException, null, null); + } + + public RequestRateTooLargeException(String message, HttpHeaders headers, URI requestUri) { + super(message, null, HttpUtils.asMap(headers), HttpConstants.StatusCodes.TOO_MANY_REQUESTS, requestUri != null ? requestUri.toString() : null); + } + + RequestRateTooLargeException(String message, HttpHeaders headers, String requestUriString) { + super(message, null, HttpUtils.asMap(headers), HttpConstants.StatusCodes.TOO_MANY_REQUESTS, requestUriString); + } + + RequestRateTooLargeException(String message, + Exception innerException, + HttpHeaders headers, + URI requestUri) { + super(message, innerException, HttpUtils.asMap(headers), HttpConstants.StatusCodes.TOO_MANY_REQUESTS, requestUri != null ? requestUri.toString() : null); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/RequestTimeoutException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/RequestTimeoutException.java new file mode 100644 index 0000000000000..5acea29c958c0 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/RequestTimeoutException.java @@ -0,0 +1,87 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.Strings; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.http.HttpHeaders; + +import java.net.URI; +import java.util.Map; + +public class RequestTimeoutException extends CosmosClientException { + + public RequestTimeoutException() { + this(RMResources.RequestTimeout, null); + } + + public RequestTimeoutException(CosmosError cosmosError, long lsn, String partitionKeyRangeId, Map responseHeaders) { + super(HttpConstants.StatusCodes.REQUEST_TIMEOUT, cosmosError, responseHeaders); + BridgeInternal.setLSN(this, lsn); + BridgeInternal.setPartitionKeyRangeId(this, partitionKeyRangeId); + } + + public RequestTimeoutException(String message, URI requestUri) { + this(message, null, null, requestUri); + } + + RequestTimeoutException(String message, + Exception innerException, + URI requestUri, + String localIpAddress) { + this(message(localIpAddress, message), innerException, null, requestUri); + } + + RequestTimeoutException(Exception innerException) { + this(RMResources.Gone, innerException, (HttpHeaders) null, null); + } + + public RequestTimeoutException(String message, HttpHeaders headers, URI requestUrl) { + super(message, null, HttpUtils.asMap(headers), HttpConstants.StatusCodes.REQUEST_TIMEOUT, requestUrl != null ? requestUrl.toString() : null); + } + + RequestTimeoutException(String message, HttpHeaders headers, String requestUriString) { + super(message, null, HttpUtils.asMap(headers), HttpConstants.StatusCodes.REQUEST_TIMEOUT, requestUriString); + } + + RequestTimeoutException(String message, + Exception innerException, + HttpHeaders headers, + URI requestUrl) { + super(message, innerException, HttpUtils.asMap(headers), HttpConstants.StatusCodes.REQUEST_TIMEOUT, requestUrl != null ? requestUrl.toString() : null); + } + + private static String message(String localIP, String baseMessage) { + if (!Strings.isNullOrEmpty(localIP)) { + return String.format( + RMResources.ExceptionMessageAddIpAddress, + baseMessage, + localIP); + } + + return baseMessage; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/Resource.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/Resource.java new file mode 100644 index 0000000000000..99ad792972083 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/Resource.java @@ -0,0 +1,225 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.apache.commons.lang3.StringUtils; + +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; + +/** + * Represents the base resource in the Azure Cosmos DB database service. + */ +public class Resource extends JsonSerializable { + private String altLink; + + static void validateResource(Resource resource) { + if (!StringUtils.isEmpty(resource.id())) { + if (resource.id().indexOf('/') != -1 || resource.id().indexOf('\\') != -1 || + resource.id().indexOf('?') != -1 || resource.id().indexOf('#') != -1) { + throw new IllegalArgumentException("Id contains illegal chars."); + } + + if (resource.id().endsWith(" ")) { + throw new IllegalArgumentException("Id ends with a space."); + } + } + } + + /** + * Copy constructor. + * + * @param resource resource to by copied. + */ + protected Resource(Resource resource) { + this.id(resource.id()); + this.resourceId(resource.resourceId()); + this.selfLink(resource.selfLink()); + this.altLink(resource.altLink()); + this.timestamp(resource.timestamp()); + this.etag(resource.etag()); + } + + /** + * Constructor. + */ + protected Resource() { + super(); + } + + /** + * Constructor. + * + * @param objectNode the {@link ObjectNode} that represent the + * {@link JsonSerializable} + */ + Resource(ObjectNode objectNode) { + super(objectNode); + } + + /** + * Constructor. + * + * @param jsonString the json string that represents the resource. + * @param objectMapper the custom object mapper + */ + Resource(String jsonString, ObjectMapper objectMapper) { + // TODO: Made package private due to #153. #171 adding custom serialization options back. + super(jsonString, objectMapper); + } + + /** + * Constructor. + * + * @param jsonString the json string that represents the resource. + */ + protected Resource(String jsonString) { + super(jsonString); + } + + /** + * Gets the name of the resource. + * + * @return the name of the resource. + */ + public String id() { + return super.getString(Constants.Properties.ID); + } + + /** + * Sets the name of the resource. + * + * @param id the name of the resource. + * @return the resource. + */ + public Resource id(String id) { + super.set(Constants.Properties.ID, id); + return this; + } + + /** + * Gets the ID associated with the resource. + * + * @return the ID associated with the resource. + */ + public String resourceId() { + return super.getString(Constants.Properties.R_ID); + } + + // TODO: make private + /** + * Set the ID associated with the resource. + * + * @param resourceId the ID associated with the resource. + * @return the resource. + */ + public Resource resourceId(String resourceId) { + super.set(Constants.Properties.R_ID, resourceId); + return this; + } + + /** + * Get the self-link associated with the resource. + * + * @return the self link. + */ + public String selfLink() { + return super.getString(Constants.Properties.SELF_LINK); + } + + /** + * Set the self-link associated with the resource. + * + * @param selfLink the self link. + */ + Resource selfLink(String selfLink) { + super.set(Constants.Properties.SELF_LINK, selfLink); + return this; + } + + /** + * Get the last modified timestamp associated with the resource. + * + * @return the timestamp. + */ + public OffsetDateTime timestamp() { + Long seconds = super.getLong(Constants.Properties.LAST_MODIFIED); + if (seconds == null) + return null; + return OffsetDateTime.ofInstant(Instant.ofEpochSecond(seconds.longValue()), ZoneOffset.UTC); + } + + /** + * Set the last modified timestamp associated with the resource. + * + * @param timestamp the timestamp. + */ + Resource timestamp(OffsetDateTime timestamp) { + long seconds = timestamp.toEpochSecond(); + super.set(Constants.Properties.LAST_MODIFIED, seconds); + return this; + } + + /** + * Get the entity tag associated with the resource. + * + * @return the e tag. + */ + public String etag() { + return super.getString(Constants.Properties.E_TAG); + } + + /** + * Set the self-link associated with the resource. + * + * @param eTag the e tag. + */ + Resource etag(String eTag) { + super.set(Constants.Properties.E_TAG, eTag); + return this; + } + + /** + * Sets the alt-link associated with the resource from the Azure Cosmos DB + * service. + * + * @param altLink + */ + Resource altLink(String altLink) { + this.altLink = altLink; + return this; + } + + /** + * Gets the alt-link associated with the resource from the Azure Cosmos DB + * service. + */ + String altLink() { + return this.altLink; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/RetryOptions.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/RetryOptions.java new file mode 100644 index 0000000000000..e560f03dc2195 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/RetryOptions.java @@ -0,0 +1,120 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +/** + * Encapsulates retry options in the Azure Cosmos DB database service. + */ +public class RetryOptions { + private int maxRetryAttemptsOnThrottledRequests; + private int maxRetryWaitTimeInSeconds; + + /** + * Creates a new instance of the RetryOptions class and initializes all + * properties to default values. + */ + public RetryOptions() { + this.maxRetryAttemptsOnThrottledRequests = 9; + this.maxRetryWaitTimeInSeconds = 30; + } + + /** + * Gets the maximum number of retries in the case where the request fails + * because the service has applied rate limiting on the client. + * + * @return the maximum number of retries. + */ + public int maxRetryAttemptsOnThrottledRequests() { + return this.maxRetryAttemptsOnThrottledRequests; + } + + /** + * Sets the maximum number of retries in the case where the request fails + * because the service has applied rate limiting on the client. + *

+ * When a client is sending requests faster than the allowed rate, the + * service will return HttpStatusCode 429 (Too Many Request) to throttle the + * client. The current implementation in the SDK will then wait for the + * amount of time the service tells it to wait and retry after the time has + * elapsed. + *

+ * The default value is 9. This means in the case where the request is + * throttled, the same request will be issued for a maximum of 10 times to + * the server before an error is returned to the application. + * + * @param maxRetryAttemptsOnThrottledRequests the max number of retry attempts on failed requests due to a + * throttle error. + * @return the RetryOptions. + */ + public RetryOptions maxRetryAttemptsOnThrottledRequests(int maxRetryAttemptsOnThrottledRequests) { + if (maxRetryAttemptsOnThrottledRequests < 0) { + throw new IllegalArgumentException("maxRetryAttemptsOnThrottledRequests value must be a positive integer."); + } + + this.maxRetryAttemptsOnThrottledRequests = maxRetryAttemptsOnThrottledRequests; + return this; + } + + /** + * Gets the maximum retry time in seconds. + * + * @return the maximum retry time in seconds. + */ + public int maxRetryWaitTimeInSeconds() { + return this.maxRetryWaitTimeInSeconds; + } + + /** + * Sets the maximum retry time in seconds. + *

+ * When a request fails due to a throttle error, the service sends back a + * response that contains a value indicating the client should not retry + * before the time period has elapsed (Retry-After). The MaxRetryWaitTime + * flag allows the application to set a maximum wait time for all retry + * attempts. If the cumulative wait time exceeds the MaxRetryWaitTime, the + * SDK will stop retrying and return the error to the application. + *

+ * The default value is 30 seconds. + * + * @param maxRetryWaitTimeInSeconds the maximum number of seconds a request will be retried. + * @return the RetryOptions. + */ + public RetryOptions maxRetryWaitTimeInSeconds(int maxRetryWaitTimeInSeconds) { + if (maxRetryWaitTimeInSeconds < 0 || maxRetryWaitTimeInSeconds > Integer.MAX_VALUE / 1000) { + throw new IllegalArgumentException( + "value must be a positive integer between the range of 0 to " + Integer.MAX_VALUE / 1000); + } + + this.maxRetryWaitTimeInSeconds = maxRetryWaitTimeInSeconds; + return this; + } + + @Override + public String toString() { + return "RetryOptions{" + + "maxRetryAttemptsOnThrottledRequests=" + maxRetryAttemptsOnThrottledRequests + + ", maxRetryWaitTimeInSeconds=" + maxRetryWaitTimeInSeconds + + '}'; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/RetryWithException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/RetryWithException.java new file mode 100644 index 0000000000000..de0f66c62ca3b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/RetryWithException.java @@ -0,0 +1,65 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.http.HttpHeaders; + +import java.net.URI; +import java.util.Map; + +public class RetryWithException extends CosmosClientException { + + public RetryWithException(CosmosError cosmosError, long lsn, String partitionKeyRangeId, Map responseHeaders) { + super(HttpConstants.StatusCodes.RETRY_WITH, cosmosError, responseHeaders); + BridgeInternal.setLSN(this, lsn); + BridgeInternal.setPartitionKeyRangeId(this, partitionKeyRangeId); + } + + RetryWithException(String message, URI requestUri) { + this(message, null, null, requestUri); + } + + RetryWithException(String message, + Exception innerException, + URI requestUri) { + this(message, innerException, null, requestUri); + } + + public RetryWithException(String message, HttpHeaders headers, URI requestUri) { + super(message, null, HttpUtils.asMap(headers), HttpConstants.StatusCodes.RETRY_WITH, requestUri != null ? requestUri.toString() : null); + } + + RetryWithException(String message, HttpHeaders headers, String requestUriString) { + super(message, null, HttpUtils.asMap(headers), HttpConstants.StatusCodes.RETRY_WITH, requestUriString); + } + + RetryWithException(String message, + Exception innerException, + HttpHeaders headers, + URI requestUri) { + super(message, innerException, HttpUtils.asMap(headers), HttpConstants.StatusCodes.RETRY_WITH, requestUri != null ? requestUri.toString() : null); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/SerializationFormattingPolicy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/SerializationFormattingPolicy.java new file mode 100644 index 0000000000000..fde6c6334ee6d --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/SerializationFormattingPolicy.java @@ -0,0 +1,40 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +/** + * The formatting policy associated with JSON serialization in the Azure Cosmos DB database service. + */ +public enum SerializationFormattingPolicy { + + /** + * No additional formatting required. + */ + NONE, + + /** + * Indent the fields appropriately. + */ + INDENTED +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ServiceUnavailableException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ServiceUnavailableException.java new file mode 100644 index 0000000000000..39e712676ff3a --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/ServiceUnavailableException.java @@ -0,0 +1,71 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.http.HttpHeaders; + +import java.net.URI; +import java.util.Map; + +public class ServiceUnavailableException extends CosmosClientException { + ServiceUnavailableException() { + this(RMResources.ServiceUnavailable); + } + + public ServiceUnavailableException(CosmosError cosmosError, long lsn, String partitionKeyRangeId, Map responseHeaders) { + super(HttpConstants.StatusCodes.NOTFOUND, cosmosError, responseHeaders); + BridgeInternal.setLSN(this, lsn); + BridgeInternal.setPartitionKeyRangeId(this, partitionKeyRangeId); + } + + ServiceUnavailableException(String message) { + this(message, null, null, null); + } + + ServiceUnavailableException(String message, HttpHeaders headers, String requestUriString) { + this(message, null, headers, requestUriString); + } + + public ServiceUnavailableException(String message, HttpHeaders headers, URI requestUri) { + this(message, headers, requestUri != null ? requestUri.toString() : null); + } + + ServiceUnavailableException(Exception innerException) { + this(RMResources.ServiceUnavailable, innerException, null, null); + } + + public ServiceUnavailableException(String message, + Exception innerException, + HttpHeaders headers, + String requestUriString) { + super(String.format("%s: %s", RMResources.ServiceUnavailable, message), + innerException, + HttpUtils.asMap(headers), + HttpConstants.StatusCodes.SERVICE_UNAVAILABLE, + requestUriString); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/SpatialIndex.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/SpatialIndex.java new file mode 100644 index 0000000000000..77bb0fa3b6b6a --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/SpatialIndex.java @@ -0,0 +1,90 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; +import org.apache.commons.lang3.StringUtils; + +/** + * Represents a spatial index in the Azure Cosmos DB database service. + */ +final class SpatialIndex extends Index { + + /** + * Initializes a new instance of the SpatialIndex class. + *

+ * Here is an example to instantiate SpatialIndex class passing in the DataType + *

+     * {@code
+     *
+     * SpatialIndex spatialIndex = new SpatialIndex(DataType.POINT);
+     *
+     * }
+     * 
+ * + * @param dataType specifies the target data type for the index path specification. + */ + SpatialIndex(DataType dataType) { + super(IndexKind.SPATIAL); + this.dataType(dataType); + } + + /** + * Initializes a new instance of the SpatialIndex class. + * + * @param jsonString the json string that represents the index. + */ + SpatialIndex(String jsonString) { + super(jsonString, IndexKind.SPATIAL); + if (this.dataType() == null) { + throw new IllegalArgumentException("The jsonString doesn't contain a valid 'dataType'."); + } + } + + /** + * Gets data type. + * + * @return the data type. + */ + public DataType dataType() { + DataType result = null; + try { + result = DataType.valueOf(StringUtils.upperCase(super.getString(Constants.Properties.DATA_TYPE))); + } catch (IllegalArgumentException e) { + this.getLogger().warn("INVALID index dataType value {}.", super.getString(Constants.Properties.DATA_TYPE)); + } + return result; + } + + /** + * Sets data type. + * + * @param dataType the data type. + * @return the SpatialIndex. + */ + public SpatialIndex dataType(DataType dataType) { + super.set(Constants.Properties.DATA_TYPE, dataType.toString()); + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/SpatialSpec.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/SpatialSpec.java new file mode 100644 index 0000000000000..6c20175b6511b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/SpatialSpec.java @@ -0,0 +1,105 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +public class SpatialSpec extends JsonSerializable { + + private List spatialTypes; + + /** + * Constructor. + */ + public SpatialSpec() { + super(); + } + + /** + * Constructor. + * + * @param jsonString the json string that represents the included path. + */ + SpatialSpec(String jsonString) { + super(jsonString); + } + + + /** + * Gets path. + * + * @return the path. + */ + public String path() { + return super.getString(Constants.Properties.PATH); + } + + /** + * Sets path. + * + * @param path the path. + * @return the SpatialSpec. + */ + public SpatialSpec path(String path) { + super.set(Constants.Properties.PATH, path); + return this; + } + + /** + * Gets the collection of spatial types. + * + * @return the collection of spatial types. + */ + public List spatialTypes() { + if (this.spatialTypes == null) { + this.spatialTypes = super.getList(Constants.Properties.TYPES, SpatialType.class, true); + + if (this.spatialTypes == null) { + this.spatialTypes = new ArrayList(); + } + } + + return this.spatialTypes; + } + + /** + * Sets the collection of spatial types. + * + * @param spatialTypes the collection of spatial types. + * @return the SpatialSpec. + */ + public SpatialSpec spatialTypes(List spatialTypes) { + this.spatialTypes = spatialTypes; + Collection spatialTypeNames = new ArrayList(); + for (SpatialType spatialType : this.spatialTypes) { + spatialTypeNames.add(spatialType.toString()); + } + super.set(Constants.Properties.TYPES, spatialTypeNames); + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/SpatialType.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/SpatialType.java new file mode 100644 index 0000000000000..cd53b197c4378 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/SpatialType.java @@ -0,0 +1,59 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.text.WordUtils; + +/** + * Defines the target data type of an index path specification in the Azure Cosmos DB service. + * + */ +public enum SpatialType { + /** + * Represent a point data type. + */ + POINT, + + /** + * Represent a line string data type. + */ + LINE_STRING, + + /** + * Represent a polygon data type. + */ + POLYGON, + + /** + * Represent a multi-polygon data type. + */ + MULTI_POLYGON; + + @Override + public String toString() { + return StringUtils.remove(WordUtils.capitalizeFully(this.name(), '_'), '_'); + } +} + diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/SqlParameter.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/SqlParameter.java new file mode 100644 index 0000000000000..e5da14d40a873 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/SqlParameter.java @@ -0,0 +1,92 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +/** + * Represents a SQL parameter in the SqlQuerySpec used for queries in the Azure Cosmos DB database service. + */ +public final class SqlParameter extends JsonSerializable { + + + /** + * Initializes a new instance of the SqlParameter class. + */ + public SqlParameter() { + super(); + } + + /** + * Initializes a new instance of the SqlParameter class with the name and value of the parameter. + * + * @param name the name of the parameter. + * @param value the value of the parameter. + */ + public SqlParameter(String name, Object value) { + super(); + this.name(name); + this.value(value); + } + + /** + * Gets the name of the parameter. + * + * @return the name of the parameter. + */ + public String name() { + return super.getString("name"); + } + + /** + * Sets the name of the parameter. + * + * @param name the name of the parameter. + * @return the SqlParameter. + */ + public SqlParameter name(String name) { + super.set("name", name); + return this; + } + + /** + * Gets the value of the parameter. + * + * @param c the class of the parameter value. + * @param the type of the parameter + * @return the value of the parameter. + */ + public Object value(Class c) { + return super.getObject("value", c); + } + + /** + * Sets the value of the parameter. + * + * @param value the value of the parameter. + * @return the SqlParameter. + */ + public SqlParameter value(Object value) { + super.set("value", value); + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/SqlParameterList.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/SqlParameterList.java new file mode 100644 index 0000000000000..ff1d9620f00cc --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/SqlParameterList.java @@ -0,0 +1,187 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; + +/** + * Represents a collection of SQL parameters to for a SQL query in the Azure Cosmos DB database service. + */ +public final class SqlParameterList implements List { + + private List parameters; + + /** + * Initializes a new instance of the SqlParameterList class. + */ + public SqlParameterList() { + this.parameters = new ArrayList(); + } + + /** + * Initializes a new instance of the SqlParameterList class from an array of parameters. + * + * @param parameters the array of parameters. + */ + public SqlParameterList(SqlParameter... parameters) { + if (parameters == null) { + throw new IllegalArgumentException("parameters"); + } + + this.parameters = Arrays.asList(parameters); + } + + /** + * Initializes a new instance of the SqlParameterList class from a collection of parameters. + * + * @param parameters the collection of parameters. + */ + public SqlParameterList(Collection parameters) { + if (parameters == null) { + throw new IllegalArgumentException("parameters"); + } + + this.parameters = new ArrayList(parameters); + } + + @Override + public boolean add(SqlParameter parameter) { + return this.parameters.add(parameter); + } + + @Override + public boolean addAll(Collection parameters) { + return this.parameters.addAll(parameters); + } + + @Override + public boolean addAll(int index, Collection c) { + return this.parameters.addAll(index, c); + } + + @Override + public void clear() { + this.parameters.clear(); + } + + @Override + public SqlParameter get(int index) { + return this.parameters.get(index); + } + + @Override + public SqlParameter set(int index, SqlParameter element) { + return this.parameters.set(index, element); + } + + @Override + public void add(int index, SqlParameter element) { + this.parameters.add(index, element); + } + + @Override + public SqlParameter remove(int index) { + return this.parameters.remove(index); + } + + @Override + public int indexOf(Object o) { + return this.parameters.indexOf(o); + } + + @Override + public int lastIndexOf(Object o) { + return this.parameters.lastIndexOf(o); + } + + @Override + public ListIterator listIterator() { + return this.listIterator(); + } + + @Override + public ListIterator listIterator(int index) { + return this.listIterator(index); + } + + @Override + public List subList(int fromIndex, int toIndex) { + return this.parameters.subList(fromIndex, toIndex); + } + + @Override + public boolean contains(Object parameter) { + return this.parameters.contains(parameter); + } + + @Override + public boolean containsAll(Collection parameters) { + return this.parameters.containsAll(parameters); + } + + @Override + public boolean isEmpty() { + return this.parameters.isEmpty(); + } + + @Override + public Iterator iterator() { + return this.parameters.iterator(); + } + + @Override + public boolean remove(Object parameter) { + return this.parameters.remove(parameter); + } + + @Override + public boolean removeAll(Collection parameters) { + return this.parameters.removeAll(parameters); + } + + @Override + public boolean retainAll(Collection parameters) { + return this.parameters.retainAll(parameters); + } + + @Override + public int size() { + return this.parameters.size(); + } + + @Override + public Object[] toArray() { + return this.parameters.toArray(); + } + + @Override + public T[] toArray(T[] parameters) { + return this.parameters.toArray(parameters); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/SqlQuerySpec.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/SqlQuerySpec.java new file mode 100644 index 0000000000000..e5a6805e9e1c7 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/SqlQuerySpec.java @@ -0,0 +1,129 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import java.util.ArrayList; +import java.util.Collection; + +/** + * Represents a SQL query in the Azure Cosmos DB database service. + */ +public final class SqlQuerySpec extends JsonSerializable { + + private SqlParameterList parameters; + + /** + * Initializes a new instance of the SqlQuerySpec class. + */ + public SqlQuerySpec() { + super(); + } + + /** + * Initializes a new instance of the SqlQuerySpec class with the text of the + * query. + * + * @param queryText + * the query text. + */ + public SqlQuerySpec(String queryText) { + super(); + this.queryText(queryText); + } + + /** + * Initializes a new instance of the SqlQuerySpec class with the text of the + * query and parameters. + * + * @param queryText the query text. + * @param parameters the query parameters. + */ + public SqlQuerySpec(String queryText, SqlParameterList parameters) { + super(); + this.queryText(queryText); + this.parameters = parameters; + } + + /** + * Gets the text of the query. + * + * @return the query text. + */ + public String queryText() { + return super.getString("query"); + } + + /** + * Sets the text of the query. + * + * @param queryText + * the query text. + * @return the SqlQuerySpec. + */ + public SqlQuerySpec queryText(String queryText) { + super.set("query", queryText); + return this; + } + + /** + * Gets the collection of query parameters. + * + * @return the query parameters. + */ + public SqlParameterList parameters() { + if (this.parameters == null) { + Collection sqlParameters = super.getCollection("parameters", SqlParameter.class); + if (sqlParameters == null) { + sqlParameters = new ArrayList(); + } + + this.parameters = new SqlParameterList(sqlParameters); + } + + return this.parameters; + } + + /** + * Sets the collection of query parameters. + * + * @param parameters + * the query parameters. + * @return the SqlQuerySpec. + */ + public SqlQuerySpec parameters(SqlParameterList parameters) { + this.parameters = parameters; + return this; + } + + @Override + void populatePropertyBag() { + boolean defaultParameters = (this.parameters != null && this.parameters.size() != 0); + + if (defaultParameters) { + super.set("parameters", this.parameters); + } else { + super.remove("parameters"); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/TokenResolver.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/TokenResolver.java new file mode 100644 index 0000000000000..67fc295d87861 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/TokenResolver.java @@ -0,0 +1,49 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import java.util.Map; + +/** + * This interface is for client side implementation, which can be used for initializing + * AsyncDocumentClient without passing master key, resource token and permission feed.
+ *
+ * Each time the SDK create request for CosmosDB, authorization token is generated based on that + * request at client side which enables creation of one AsyncDocumentClient per application shared across various users + * with different resource permissions. + */ +@FunctionalInterface +public interface TokenResolver { + + /** + * This method will consume the request information and based on that it will generate the authorization token. + * @param properties the user properties. + * @param requestVerb Request verb i.e. GET, POST, PUT etc. + * @param resourceIdOrFullName ResourceID or resource full name. + * @param resourceType Resource type i.e. Database, DocumentCollection, Document etc. + * @return The authorization token. + */ + public String getAuthorizationToken(String requestVerb, String resourceIdOrFullName, CosmosResourceType resourceType, Map properties); + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/TriggerOperation.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/TriggerOperation.java new file mode 100644 index 0000000000000..e7d624655db0f --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/TriggerOperation.java @@ -0,0 +1,76 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import org.apache.commons.text.WordUtils; + +/** + * Specifies the operations on which a trigger should be executed in the Azure Cosmos DB database service. + */ +public enum TriggerOperation { + /** + * ALL operations. + */ + ALL(0x0), + + /** + * CREATE operations only. + */ + CREATE(0x1), + + /** + * UPDATE operations only. + */ + UPDATE(0x2), + + /** + * DELETE operations only. + */ + DELETE(0x3), + + /** + * REPLACE operations only. + */ + REPLACE(0x4); + + private int value; + + TriggerOperation(int value) { + this.value = value; + } + + /** + * Gets the numerical value of the trigger operation. + * + * @return the numerical value. + */ + public int getValue() { + return value; + } + + @Override + public String toString() { + return WordUtils.capitalizeFully(this.name()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/TriggerType.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/TriggerType.java new file mode 100644 index 0000000000000..9717d7a01ab7b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/TriggerType.java @@ -0,0 +1,61 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import org.apache.commons.text.WordUtils; + +/** + * The trigger type in the Azure Cosmos DB database service. + */ +public enum TriggerType { + /** + * Trigger should be executed before the associated operation(s). + */ + PRE(0x0), + + /** + * Trigger should be executed after the associated operation(s). + */ + POST(0x1); + + private int value; + + TriggerType(int value) { + this.value = value; + } + + /** + * Gets the numerical value of the trigger type. + * + * @return the numerical value. + */ + public int getValue() { + return value; + } + + @Override + public String toString() { + return WordUtils.capitalizeFully(this.name()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/UnauthorizedException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/UnauthorizedException.java new file mode 100644 index 0000000000000..69d77ceb7d0fb --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/UnauthorizedException.java @@ -0,0 +1,72 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.http.HttpHeaders; + +import java.net.URI; +import java.util.Map; + +public class UnauthorizedException extends CosmosClientException { + + UnauthorizedException() { + this(RMResources.Unauthorized); + } + + public UnauthorizedException(CosmosError cosmosError, long lsn, String partitionKeyRangeId, Map responseHeaders) { + super(HttpConstants.StatusCodes.UNAUTHORIZED, cosmosError, responseHeaders); + BridgeInternal.setLSN(this, lsn); + BridgeInternal.setPartitionKeyRangeId(this, partitionKeyRangeId); + } + + UnauthorizedException(String message) { + this(message, null, null, null); + } + + UnauthorizedException(String message, HttpHeaders headers, String requestUriString) { + this(message, null, headers, requestUriString); + } + + public UnauthorizedException(String message, HttpHeaders headers, URI requestUri) { + this(message, headers, requestUri != null ? requestUri.toString() : null); + } + + UnauthorizedException(Exception innerException) { + this(RMResources.Unauthorized, innerException, null, null); + } + + UnauthorizedException(String message, + Exception innerException, + HttpHeaders headers, + String requestUri) { + super(String.format("%s: %s", RMResources.Unauthorized, message), + innerException, + HttpUtils.asMap(headers), + HttpConstants.StatusCodes.UNAUTHORIZED, + requestUri); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/UniqueKey.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/UniqueKey.java new file mode 100644 index 0000000000000..1b33988b52dd5 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/UniqueKey.java @@ -0,0 +1,91 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2016 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +/** + * Represents a unique key on that enforces uniqueness constraint on documents in the collection in the Azure Cosmos DB service. + * + * 1) For partitioned collections, the value of partition key is implicitly a part of each unique key. + * 2) Uniqueness constraint is also enforced for missing values. + * For instance, if unique key policy defines a unique key with single property path, there could be only one document that has missing value for this property. + * @see UniqueKeyPolicy + */ +public class UniqueKey extends JsonSerializable { + private List paths; + + public UniqueKey() { + super(); + } + + UniqueKey(String jsonString) { + super(jsonString); + } + + /** + * Gets the paths, a set of which must be unique for each document in the Azure Cosmos DB service. + * + * The paths to enforce uniqueness on. Each path is a rooted path of the unique property in the document, + * such as "/name/first". + * + * @return the unique paths. + */ + public Collection paths() { + if (this.paths == null) { + this.paths = super.getList(Constants.Properties.PATHS, String.class); + + if (this.paths == null) { + this.paths = new ArrayList(); + } + } + + return this.paths; + } + + + /** + * Sets the paths, a set of which must be unique for each document in the Azure Cosmos DB service. + * + * The paths to enforce uniqueness on. Each path is a rooted path of the unique property in the document, + * such as "/name/first". + * + * @param paths the unique paths. + * @return the Unique Key. + */ + public UniqueKey paths(List paths) { + this.paths = paths; + return this; + } + + @Override + void populatePropertyBag() { + if (paths != null) { + super.set(Constants.Properties.PATHS, paths); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/UniqueKeyPolicy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/UniqueKeyPolicy.java new file mode 100644 index 0000000000000..4435493c6b565 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/UniqueKeyPolicy.java @@ -0,0 +1,84 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Constants; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +/** + * Represents the unique key policy configuration for specifying uniqueness constraints on documents in the + * collection in the Azure Cosmos DB service. + */ +public class UniqueKeyPolicy extends JsonSerializable { + private List uniqueKeys; + + public UniqueKeyPolicy() { + super(); + } + + /** + * Constructor. + * + * @param jsonString the json string that represents the Unique Key policy. + */ + UniqueKeyPolicy(String jsonString) { + super(jsonString); + } + + /** + * Gets or sets collection of {@link UniqueKey} that guarantee uniqueness of documents in collection + * in the Azure Cosmos DB service. + * + * @return the unique keys. + */ + public Collection uniqueKeys() { + if (this.uniqueKeys == null) { + this.uniqueKeys = super.getList(Constants.Properties.UNIQUE_KEYS, UniqueKey.class); + if (this.uniqueKeys == null) { + this.uniqueKeys = new ArrayList<>(); + } + } + return this.uniqueKeys; + } + + public UniqueKeyPolicy uniqueKeys(List uniqueKeys) { + if (uniqueKeys == null) { + throw new IllegalArgumentException("uniqueKeys cannot be null."); + } + this.uniqueKeys = uniqueKeys; + return this; + } + + @Override + void populatePropertyBag() { + if (this.uniqueKeys != null) { + for(UniqueKey uniqueKey: uniqueKeys) { + uniqueKey.populatePropertyBag(); + } + super.set(Constants.Properties.UNIQUE_KEYS, uniqueKeys); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/AsyncDocumentClient.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/AsyncDocumentClient.java new file mode 100644 index 0000000000000..0c0f0d5808a53 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/AsyncDocumentClient.java @@ -0,0 +1,1339 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.ChangeFeedOptions; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.SqlQuerySpec; +import com.azure.data.cosmos.TokenResolver; +import reactor.core.publisher.Flux; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.List; +/** + * Provides a client-side logical representation of the Azure Cosmos DB + * database service. This async client is used to configure and execute requests + * against the service. + * + *

+ * {@link AsyncDocumentClient} async APIs return project reactor's {@link + * Flux}, and so you can use project reactor {@link Flux} functionality. + * The async {@link Flux} based APIs perform the requested operation only after + * subscription. + * + *

+ * The service client encapsulates the endpoint and credentials used to access + * the Cosmos DB service. + *

+ * To instantiate you can use the {@link Builder} + *

+ * {@code
+ * ConnectionPolicy connectionPolicy = new ConnectionPolicy();
+ * connectionPolicy.connectionMode(ConnectionMode.DIRECT);
+ * AsyncDocumentClient client = new AsyncDocumentClient.Builder()
+ *         .withServiceEndpoint(serviceEndpoint)
+ *         .withMasterKeyOrResourceToken(masterKey)
+ *         .withConnectionPolicy(connectionPolicy)
+ *         .withConsistencyLevel(ConsistencyLevel.SESSION)
+ *         .build();
+ * }
+ * 
+ */ +public interface AsyncDocumentClient { + + /** + * Helper class to build {@link AsyncDocumentClient} instances + * as logical representation of the Azure Cosmos DB database service. + * + *
+     * {@code
+     * ConnectionPolicy connectionPolicy = new ConnectionPolicy();
+     * connectionPolicy.connectionMode(ConnectionMode.DIRECT);
+     * AsyncDocumentClient client = new AsyncDocumentClient.Builder()
+     *         .withServiceEndpoint(serviceEndpoint)
+     *         .withMasterKeyOrResourceToken(masterKey)
+     *         .withConnectionPolicy(connectionPolicy)
+     *         .withConsistencyLevel(ConsistencyLevel.SESSION)
+     *         .build();
+     * }
+     * 
+ */ + class Builder { + + Configs configs = new Configs(); + ConnectionPolicy connectionPolicy; + ConsistencyLevel desiredConsistencyLevel; + List permissionFeed; + String masterKeyOrResourceToken; + URI serviceEndpoint; + TokenResolver tokenResolver; + + public Builder withServiceEndpoint(String serviceEndpoint) { + try { + this.serviceEndpoint = new URI(serviceEndpoint); + } catch (URISyntaxException e) { + throw new IllegalArgumentException(e.getMessage()); + } + return this; + } + + /** + * New method withMasterKeyOrResourceToken will take either master key or resource token + * and perform authentication for accessing resource. + * + * @param masterKeyOrResourceToken MasterKey or resourceToken for authentication. + * @return current Builder. + * @deprecated use {@link #withMasterKeyOrResourceToken(String)} instead. + */ + @Deprecated + public Builder withMasterKey(String masterKeyOrResourceToken) { + this.masterKeyOrResourceToken = masterKeyOrResourceToken; + return this; + } + + /** + * This method will accept the master key , additionally it can also consume + * resource token too for authentication. + * + * @param masterKeyOrResourceToken MasterKey or resourceToken for authentication. + * @return current Builder. + */ + public Builder withMasterKeyOrResourceToken(String masterKeyOrResourceToken) { + this.masterKeyOrResourceToken = masterKeyOrResourceToken; + return this; + } + + /** + * This method will accept the permission list , which contains the + * resource tokens needed to access resources. + * + * @param permissionFeed Permission list for authentication. + * @return current Builder. + */ + public Builder withPermissionFeed(List permissionFeed) { + this.permissionFeed = permissionFeed; + return this; + } + + public Builder withConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) { + this.desiredConsistencyLevel = desiredConsistencyLevel; + return this; + } + + public Builder withConfigs(Configs configs) { + this.configs = configs; + return this; + } + + public Builder withConnectionPolicy(ConnectionPolicy connectionPolicy) { + this.connectionPolicy = connectionPolicy; + return this; + } + + /** + * This method will accept tokenResolver which is rx function, it takes arguments
+ * T1 requestVerb(STRING),
+ * T2 resourceIdOrFullName(STRING),
+ * T3 resourceType(com.azure.data.cosmos.internal.ResourceType),
+ * T4 request headers(Map)
+ *
+ * and return
+ * R authenticationToken(STRING)
+ * + * @param tokenResolver tokenResolver function for authentication. + * @return current Builder. + */ + /*public Builder withTokenResolver(Func4, STRING> tokenResolver) { + this.tokenResolver = tokenResolver; + return this; + }*/ + + /** + * This method will accept functional interface TokenResolver which helps in generation authorization + * token per request. AsyncDocumentClient can be successfully initialized with this API without passing any MasterKey, ResourceToken or PermissionFeed. + * @param tokenResolver The tokenResolver + * @return current Builder. + */ + public Builder withTokenResolver(TokenResolver tokenResolver) { + this.tokenResolver = tokenResolver; + return this; + } + + private void ifThrowIllegalArgException(boolean value, String error) { + if (value) { + throw new IllegalArgumentException(error); + } + } + + public AsyncDocumentClient build() { + + ifThrowIllegalArgException(this.serviceEndpoint == null, "cannot build client without service endpoint"); + ifThrowIllegalArgException( + this.masterKeyOrResourceToken == null && (permissionFeed == null || permissionFeed.isEmpty()) && this.tokenResolver == null, + "cannot build client without any one of masterKey, resource token, permissionFeed and tokenResolver"); + + RxDocumentClientImpl client = new RxDocumentClientImpl(serviceEndpoint, + masterKeyOrResourceToken, + permissionFeed, + connectionPolicy, + desiredConsistencyLevel, + configs, + tokenResolver); + client.init(); + return client; + } + + public Configs getConfigs() { + return configs; + } + + public void setConfigs(Configs configs) { + this.configs = configs; + } + + public ConnectionPolicy getConnectionPolicy() { + return connectionPolicy; + } + + public void setConnectionPolicy(ConnectionPolicy connectionPolicy) { + this.connectionPolicy = connectionPolicy; + } + + public ConsistencyLevel getDesiredConsistencyLevel() { + return desiredConsistencyLevel; + } + + public void setDesiredConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) { + this.desiredConsistencyLevel = desiredConsistencyLevel; + } + + public List getPermissionFeed() { + return permissionFeed; + } + + public void setPermissionFeed(List permissionFeed) { + this.permissionFeed = permissionFeed; + } + + public String getMasterKeyOrResourceToken() { + return masterKeyOrResourceToken; + } + + public void setMasterKeyOrResourceToken(String masterKeyOrResourceToken) { + this.masterKeyOrResourceToken = masterKeyOrResourceToken; + } + + public URI getServiceEndpoint() { + return serviceEndpoint; + } + + public void setServiceEndpoint(URI serviceEndpoint) { + this.serviceEndpoint = serviceEndpoint; + } + + public TokenResolver getTokenResolver() { + return tokenResolver; + } + + public void setTokenResolver(TokenResolver tokenResolver) { + this.tokenResolver = tokenResolver; + } + } + + /** + * Gets the default service endpoint as passed in by the user during construction. + * + * @return the service endpoint URI + */ + URI getServiceEndpoint(); + + /** + * Gets the current write endpoint chosen based on availability and preference. + * + * @return the write endpoint URI + */ + URI getWriteEndpoint(); + + /** + * Gets the current read endpoint chosen based on availability and preference. + * + * @return the read endpoint URI + */ + URI getReadEndpoint(); + + /** + * Gets the connection policy + * + * @return the connection policy + */ + ConnectionPolicy getConnectionPolicy(); + + /** + * Creates a database. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the created database. + * In case of failure the {@link Flux} will error. + * + * @param database the database. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the created database or an error. + */ + Flux> createDatabase(Database database, RequestOptions options); + + /** + * Deletes a database. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the deleted database. + * In case of failure the {@link Flux} will error. + * + * @param databaseLink the database link. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the deleted database or an error. + */ + Flux> deleteDatabase(String databaseLink, RequestOptions options); + + /** + * Reads a database. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the read database. + * In case of failure the {@link Flux} will error. + * + * @param databaseLink the database link. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the read database or an error. + */ + Flux> readDatabase(String databaseLink, RequestOptions options); + + /** + * Reads all databases. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response of the read databases. + * In case of failure the {@link Flux} will error. + * + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of read databases or an error. + */ + Flux> readDatabases(FeedOptions options); + + /** + * Query for databases. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response of the read databases. + * In case of failure the {@link Flux} will error. + * + * @param query the query. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of read databases or an error. + */ + Flux> queryDatabases(String query, FeedOptions options); + + /** + * Query for databases. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response of the obtained databases. + * In case of failure the {@link Flux} will error. + * + * @param querySpec the SQL query specification. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained databases or an error. + */ + Flux> queryDatabases(SqlQuerySpec querySpec, FeedOptions options); + + /** + * Creates a document collection. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the created collection. + * In case of failure the {@link Flux} will error. + * + * @param databaseLink the database link. + * @param collection the collection. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the created collection or an error. + */ + Flux> createCollection(String databaseLink, DocumentCollection collection, + RequestOptions options); + + /** + * Replaces a document collection. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the replaced document collection. + * In case of failure the {@link Flux} will error. + * + * @param collection the document collection to use. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the replaced document collection or an error. + */ + Flux> replaceCollection(DocumentCollection collection, RequestOptions options); + + /** + * Deletes a document collection by the collection link. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response for the deleted database. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the collection link. + * @param options the request options. + * @return an {@link Flux} containing the single resource response for the deleted database or an error. + */ + Flux> deleteCollection(String collectionLink, RequestOptions options); + + /** + * Reads a document collection by the collection link. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the read collection. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the collection link. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the read collection or an error. + */ + Flux> readCollection(String collectionLink, RequestOptions options); + + /** + * Reads all document collections in a database. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response of the read collections. + * In case of failure the {@link Flux} will error. + * + * @param databaseLink the database link. + * @param options the fee options. + * @return an {@link Flux} containing one or several feed response pages of the read collections or an error. + */ + Flux> readCollections(String databaseLink, FeedOptions options); + + /** + * Query for document collections in a database. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response of the obtained collections. + * In case of failure the {@link Flux} will error. + * + * @param databaseLink the database link. + * @param query the query. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained collections or an error. + */ + Flux> queryCollections(String databaseLink, String query, FeedOptions options); + + /** + * Query for document collections in a database. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response of the obtained collections. + * In case of failure the {@link Flux} will error. + * + * @param databaseLink the database link. + * @param querySpec the SQL query specification. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained collections or an error. + */ + Flux> queryCollections(String databaseLink, SqlQuerySpec querySpec, FeedOptions options); + + /** + * Creates a document. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the created document. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the link to the parent document collection. + * @param document the document represented as a POJO or Document object. + * @param options the request options. + * @param disableAutomaticIdGeneration the flag for disabling automatic id generation. + * @return an {@link Flux} containing the single resource response with the created document or an error. + */ + Flux> createDocument(String collectionLink, Object document, RequestOptions options, + boolean disableAutomaticIdGeneration); + + /** + * Upserts a document. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the upserted document. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the link to the parent document collection. + * @param document the document represented as a POJO or Document object to upsert. + * @param options the request options. + * @param disableAutomaticIdGeneration the flag for disabling automatic id generation. + * @return an {@link Flux} containing the single resource response with the upserted document or an error. + */ + Flux> upsertDocument(String collectionLink, Object document, RequestOptions options, + boolean disableAutomaticIdGeneration); + + /** + * Replaces a document using a POJO object. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the replaced document. + * In case of failure the {@link Flux} will error. + * + * @param documentLink the document link. + * @param document the document represented as a POJO or Document object. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the replaced document or an error. + */ + Flux> replaceDocument(String documentLink, Object document, RequestOptions options); + + /** + * Replaces a document with the passed in document. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the replaced document. + * In case of failure the {@link Flux} will error. + * + * @param document the document to replace (containing the document id). + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the replaced document or an error. + */ + Flux> replaceDocument(Document document, RequestOptions options); + + /** + * Deletes a document by the document link. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response for the deleted document. + * In case of failure the {@link Flux} will error. + * + * @param documentLink the document link. + * @param options the request options. + * @return an {@link Flux} containing the single resource response for the deleted document or an error. + */ + Flux> deleteDocument(String documentLink, RequestOptions options); + + /** + * Reads a document by the document link. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the read document. + * In case of failure the {@link Flux} will error. + * + * @param documentLink the document link. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the read document or an error. + */ + Flux> readDocument(String documentLink, RequestOptions options); + + /** + * Reads all documents in a document collection. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response of the read documents. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the collection link. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the read documents or an error. + */ + Flux> readDocuments(String collectionLink, FeedOptions options); + + + /** + * Query for documents in a document collection. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response of the obtained documents. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the link to the parent document collection. + * @param query the query. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained document or an error. + */ + Flux> queryDocuments(String collectionLink, String query, FeedOptions options); + + /** + * Query for documents in a document collection. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response of the obtained documents. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the link to the parent document collection. + * @param querySpec the SQL query specification. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained documents or an error. + */ + Flux> queryDocuments(String collectionLink, SqlQuerySpec querySpec, FeedOptions options); + + /** + * Query for documents change feed in a document collection. + * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained documents. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the link to the parent document collection. + * @param changeFeedOptions the change feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained documents or an error. + */ + Flux> queryDocumentChangeFeed(String collectionLink, + ChangeFeedOptions changeFeedOptions); + + /** + * Reads all partition key ranges in a document collection. + * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained partition key ranges. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the link to the parent document collection. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained partition key ranges or an error. + */ + Flux> readPartitionKeyRanges(String collectionLink, FeedOptions options); + + /** + * Creates a stored procedure. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the created stored procedure. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the collection link. + * @param storedProcedure the stored procedure to create. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the created stored procedure or an error. + */ + Flux> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, + RequestOptions options); + + /** + * Upserts a stored procedure. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the upserted stored procedure. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the collection link. + * @param storedProcedure the stored procedure to upsert. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the upserted stored procedure or an error. + */ + Flux> upsertStoredProcedure(String collectionLink, StoredProcedure storedProcedure, + RequestOptions options); + + /** + * Replaces a stored procedure. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the replaced stored procedure. + * In case of failure the {@link Flux} will error. + * + * @param storedProcedure the stored procedure to use. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the replaced stored procedure or an error. + */ + Flux> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options); + + /** + * Deletes a stored procedure by the stored procedure link. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response for the deleted stored procedure. + * In case of failure the {@link Flux} will error. + * + * @param storedProcedureLink the stored procedure link. + * @param options the request options. + * @return an {@link Flux} containing the single resource response for the deleted stored procedure or an error. + */ + Flux> deleteStoredProcedure(String storedProcedureLink, RequestOptions options); + + /** + * READ a stored procedure by the stored procedure link. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the read stored procedure. + * In case of failure the {@link Flux} will error. + * + * @param storedProcedureLink the stored procedure link. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the read stored procedure or an error. + */ + Flux> readStoredProcedure(String storedProcedureLink, RequestOptions options); + + /** + * Reads all stored procedures in a document collection link. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the read stored procedures. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the collection link. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the read stored procedures or an error. + */ + Flux> readStoredProcedures(String collectionLink, FeedOptions options); + + /** + * Query for stored procedures in a document collection. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained stored procedures. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the collection link. + * @param query the query. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained stored procedures or an error. + */ + Flux> queryStoredProcedures(String collectionLink, String query, FeedOptions options); + + /** + * Query for stored procedures in a document collection. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained stored procedures. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the collection link. + * @param querySpec the SQL query specification. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained stored procedures or an error. + */ + Flux> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, + FeedOptions options); + + /** + * Executes a stored procedure by the stored procedure link. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the stored procedure response. + * In case of failure the {@link Flux} will error. + * + * @param storedProcedureLink the stored procedure link. + * @param procedureParams the array of procedure parameter values. + * @return an {@link Flux} containing the single resource response with the stored procedure response or an error. + */ + Flux executeStoredProcedure(String storedProcedureLink, Object[] procedureParams); + + /** + * Executes a stored procedure by the stored procedure link. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the stored procedure response. + * In case of failure the {@link Flux} will error. + * + * @param storedProcedureLink the stored procedure link. + * @param options the request options. + * @param procedureParams the array of procedure parameter values. + * @return an {@link Flux} containing the single resource response with the stored procedure response or an error. + */ + Flux executeStoredProcedure(String storedProcedureLink, RequestOptions options, + Object[] procedureParams); + + /** + * Creates a trigger. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the created trigger. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the collection link. + * @param trigger the trigger. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the created trigger or an error. + */ + Flux> createTrigger(String collectionLink, Trigger trigger, RequestOptions options); + + /** + * Upserts a trigger. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the upserted trigger. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the collection link. + * @param trigger the trigger to upsert. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the upserted trigger or an error. + */ + Flux> upsertTrigger(String collectionLink, Trigger trigger, RequestOptions options); + + /** + * Replaces a trigger. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the replaced trigger. + * In case of failure the {@link Flux} will error. + * + * @param trigger the trigger to use. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the replaced trigger or an error. + */ + Flux> replaceTrigger(Trigger trigger, RequestOptions options); + + /** + * Deletes a trigger. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response for the deleted trigger. + * In case of failure the {@link Flux} will error. + * + * @param triggerLink the trigger link. + * @param options the request options. + * @return an {@link Flux} containing the single resource response for the deleted trigger or an error. + */ + Flux> deleteTrigger(String triggerLink, RequestOptions options); + + /** + * Reads a trigger by the trigger link. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response for the read trigger. + * In case of failure the {@link Flux} will error. + * + * @param triggerLink the trigger link. + * @param options the request options. + * @return an {@link Flux} containing the single resource response for the read trigger or an error. + */ + Flux> readTrigger(String triggerLink, RequestOptions options); + + /** + * Reads all triggers in a document collection. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the read triggers. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the collection link. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the read triggers or an error. + */ + Flux> readTriggers(String collectionLink, FeedOptions options); + + /** + * Query for triggers. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained triggers. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the collection link. + * @param query the query. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained triggers or an error. + */ + Flux> queryTriggers(String collectionLink, String query, FeedOptions options); + + /** + * Query for triggers. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained triggers. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the collection link. + * @param querySpec the SQL query specification. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained triggers or an error. + */ + Flux> queryTriggers(String collectionLink, SqlQuerySpec querySpec, FeedOptions options); + + /** + * Creates a user defined function. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the created user defined function. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the collection link. + * @param udf the user defined function. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the created user defined function or an error. + */ + Flux> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, + RequestOptions options); + + /** + * Upserts a user defined function. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the upserted user defined function. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the collection link. + * @param udf the user defined function to upsert. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the upserted user defined function or an error. + */ + Flux> upsertUserDefinedFunction(String collectionLink, UserDefinedFunction udf, + RequestOptions options); + + /** + * Replaces a user defined function. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the replaced user defined function. + * In case of failure the {@link Flux} will error. + * + * @param udf the user defined function. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the replaced user defined function or an error. + */ + Flux> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options); + + /** + * Deletes a user defined function. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response for the deleted user defined function. + * In case of failure the {@link Flux} will error. + * + * @param udfLink the user defined function link. + * @param options the request options. + * @return an {@link Flux} containing the single resource response for the deleted user defined function or an error. + */ + Flux> deleteUserDefinedFunction(String udfLink, RequestOptions options); + + /** + * READ a user defined function. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response for the read user defined function. + * In case of failure the {@link Flux} will error. + * + * @param udfLink the user defined function link. + * @param options the request options. + * @return an {@link Flux} containing the single resource response for the read user defined function or an error. + */ + Flux> readUserDefinedFunction(String udfLink, RequestOptions options); + + /** + * Reads all user defined functions in a document collection. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the read user defined functions. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the collection link. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the read user defined functions or an error. + */ + Flux> readUserDefinedFunctions(String collectionLink, FeedOptions options); + + /** + * Query for user defined functions. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained user defined functions. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the collection link. + * @param query the query. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained user defined functions or an error. + */ + Flux> queryUserDefinedFunctions(String collectionLink, String query, + FeedOptions options); + + /** + * Query for user defined functions. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained user defined functions. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the collection link. + * @param querySpec the SQL query specification. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained user defined functions or an error. + */ + Flux> queryUserDefinedFunctions(String collectionLink, SqlQuerySpec querySpec, + FeedOptions options); + + /** + * Reads a conflict. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the read conflict. + * In case of failure the {@link Flux} will error. + * + * @param conflictLink the conflict link. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the read conflict or an error. + */ + Flux> readConflict(String conflictLink, RequestOptions options); + + /** + * Reads all conflicts in a document collection. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the read conflicts. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the collection link. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the read conflicts or an error. + */ + Flux> readConflicts(String collectionLink, FeedOptions options); + + /** + * Query for conflicts. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained conflicts. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the collection link. + * @param query the query. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained conflicts or an error. + */ + Flux> queryConflicts(String collectionLink, String query, FeedOptions options); + + /** + * Query for conflicts. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained conflicts. + * In case of failure the {@link Flux} will error. + * + * @param collectionLink the collection link. + * @param querySpec the SQL query specification. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained conflicts or an error. + */ + Flux> queryConflicts(String collectionLink, SqlQuerySpec querySpec, FeedOptions options); + + /** + * Deletes a conflict. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response for the deleted conflict. + * In case of failure the {@link Flux} will error. + * + * @param conflictLink the conflict link. + * @param options the request options. + * @return an {@link Flux} containing the single resource response for the deleted conflict or an error. + */ + Flux> deleteConflict(String conflictLink, RequestOptions options); + + /** + * Creates a user. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the created user. + * In case of failure the {@link Flux} will error. + * + * @param databaseLink the database link. + * @param user the user to create. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the created user or an error. + */ + Flux> createUser(String databaseLink, User user, RequestOptions options); + + /** + * Upserts a user. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the upserted user. + * In case of failure the {@link Flux} will error. + * + * @param databaseLink the database link. + * @param user the user to upsert. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the upserted user or an error. + */ + Flux> upsertUser(String databaseLink, User user, RequestOptions options); + + /** + * Replaces a user. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the replaced user. + * In case of failure the {@link Flux} will error. + * + * @param user the user to use. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the replaced user or an error. + */ + Flux> replaceUser(User user, RequestOptions options); + + /** + * Deletes a user. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response for the deleted user. + * In case of failure the {@link Flux} will error. + * + * @param userLink the user link. + * @param options the request options. + * @return an {@link Flux} containing the single resource response for the deleted user or an error. + */ + Flux> deleteUser(String userLink, RequestOptions options); + + /** + * Reads a user. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the read user. + * In case of failure the {@link Flux} will error. + * + * @param userLink the user link. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the read user or an error. + */ + Flux> readUser(String userLink, RequestOptions options); + + /** + * Reads all users in a database. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the read users. + * In case of failure the {@link Flux} will error. + * + * @param databaseLink the database link. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the read users or an error. + */ + Flux> readUsers(String databaseLink, FeedOptions options); + + /** + * Query for users. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained users. + * In case of failure the {@link Flux} will error. + * + * @param databaseLink the database link. + * @param query the query. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained users or an error. + */ + Flux> queryUsers(String databaseLink, String query, FeedOptions options); + + /** + * Query for users. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained users. + * In case of failure the {@link Flux} will error. + * + * @param databaseLink the database link. + * @param querySpec the SQL query specification. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained users or an error. + */ + Flux> queryUsers(String databaseLink, SqlQuerySpec querySpec, FeedOptions options); + + /** + * Creates a permission. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the created permission. + * In case of failure the {@link Flux} will error. + * + * @param userLink the user link. + * @param permission the permission to create. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the created permission or an error. + */ + Flux> createPermission(String userLink, Permission permission, RequestOptions options); + + /** + * Upserts a permission. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the upserted permission. + * In case of failure the {@link Flux} will error. + * + * @param userLink the user link. + * @param permission the permission to upsert. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the upserted permission or an error. + */ + Flux> upsertPermission(String userLink, Permission permission, RequestOptions options); + + /** + * Replaces a permission. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the replaced permission. + * In case of failure the {@link Flux} will error. + * + * @param permission the permission to use. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the replaced permission or an error. + */ + Flux> replacePermission(Permission permission, RequestOptions options); + + /** + * Deletes a permission. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response for the deleted permission. + * In case of failure the {@link Flux} will error. + * + * @param permissionLink the permission link. + * @param options the request options. + * @return an {@link Flux} containing the single resource response for the deleted permission or an error. + */ + Flux> deletePermission(String permissionLink, RequestOptions options); + + /** + * Reads a permission. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the read permission. + * In case of failure the {@link Flux} will error. + * + * @param permissionLink the permission link. + * @param options the request options. + * @return an {@link Flux} containing the single resource response with the read permission or an error. + */ + Flux> readPermission(String permissionLink, RequestOptions options); + + /** + * Reads all permissions. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the read permissions. + * In case of failure the {@link Flux} will error. + * + * @param permissionLink the permission link. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the read permissions or an error. + */ + Flux> readPermissions(String permissionLink, FeedOptions options); + + /** + * Query for permissions. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained permissions. + * In case of failure the {@link Flux} will error. + * + * @param permissionLink the permission link. + * @param query the query. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained permissions or an error. + */ + Flux> queryPermissions(String permissionLink, String query, FeedOptions options); + + /** + * Query for permissions. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the obtained permissions. + * In case of failure the {@link Flux} will error. + * + * @param permissionLink the permission link. + * @param querySpec the SQL query specification. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained permissions or an error. + */ + Flux> queryPermissions(String permissionLink, SqlQuerySpec querySpec, FeedOptions options); + + /** + * Replaces an offer. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the replaced offer. + * In case of failure the {@link Flux} will error. + * + * @param offer the offer to use. + * @return an {@link Flux} containing the single resource response with the replaced offer or an error. + */ + Flux> replaceOffer(Offer offer); + + /** + * Reads an offer. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the read offer. + * In case of failure the {@link Flux} will error. + * + * @param offerLink the offer link. + * @return an {@link Flux} containing the single resource response with the read offer or an error. + */ + Flux> readOffer(String offerLink); + + /** + * Reads offers. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of the read offers. + * In case of failure the {@link Flux} will error. + * + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the read offers or an error. + */ + Flux> readOffers(FeedOptions options); + + /** + * Query for offers in a database. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of obtained obtained offers. + * In case of failure the {@link Flux} will error. + * + * @param query the query. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained offers or an error. + */ + Flux> queryOffers(String query, FeedOptions options); + + /** + * Query for offers in a database. + *

+ * After subscription the operation will be performed. + * The {@link Flux} will contain one or several feed response pages of obtained obtained offers. + * In case of failure the {@link Flux} will error. + * + * @param querySpec the query specification. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained offers or an error. + */ + Flux> queryOffers(SqlQuerySpec querySpec, FeedOptions options); + + /** + * Gets database account information. + *

+ * After subscription the operation will be performed. + * The {@link Flux} upon successful completion will contain a single resource response with the database account. + * In case of failure the {@link Flux} will error. + * + * @return an {@link Flux} containing the single resource response with the database account or an error. + */ + Flux getDatabaseAccount(); + + /** + * Close this {@link AsyncDocumentClient} instance and cleans up the resources. + */ + void close(); + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/AuthorizationTokenProvider.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/AuthorizationTokenProvider.java new file mode 100644 index 0000000000000..5e654336b6f05 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/AuthorizationTokenProvider.java @@ -0,0 +1,41 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import java.util.Map; + +/** + * Represents types that can provide functionality to generate authorization token for the Azure Cosmos DB database + * service. + */ +public interface AuthorizationTokenProvider { + String generateKeyAuthorizationSignature(String verb, + String resourceIdOrFullName, + ResourceType resourceType, + Map headers); + + String getAuthorizationTokenUsingResourceTokens(Map resourceTokens, + String path, + String resourceId); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/AuthorizationTokenType.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/AuthorizationTokenType.java new file mode 100644 index 0000000000000..ec36d4d000981 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/AuthorizationTokenType.java @@ -0,0 +1,35 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +public enum AuthorizationTokenType { + Invalid, + PrimaryMasterKey, + PrimaryReadonlyMasterKey, + SecondaryMasterKey, + SecondaryReadonlyMasterKey, + SystemReadOnly, + SystemReadWrite, + SystemAll, + ResourceToken +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/BackoffRetryUtility.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/BackoffRetryUtility.java new file mode 100644 index 0000000000000..b319ede033e03 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/BackoffRetryUtility.java @@ -0,0 +1,73 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import reactor.core.publisher.Mono; + +import java.time.Duration; +import java.util.concurrent.Callable; +import java.util.function.Function; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class BackoffRetryUtility { + + // transforms a retryFunc to a function which can be used by Observable.retryWhen(.) + // also it invokes preRetryCallback prior to doing retry. + public static final Quadruple InitialArgumentValuePolicyArg = Quadruple.with(false, false, + Duration.ofSeconds(60), 0); + + // a helper method for invoking callback method given the retry policy. + // it also invokes the pre retry callback prior to retrying + + // a helper method for invoking callback method given the retry policy + + // a helper method for invoking callback method given the retry policy + static public Mono executeRetry(Callable> callbackMethod, + IRetryPolicy retryPolicy) { + + return Mono.defer(() -> { + // TODO: is defer required? + try { + return callbackMethod.call(); + } catch (Exception e) { + return Mono.error(e); + } + }).retryWhen(RetryUtils.toRetryWhenFunc(retryPolicy)); + } + + static public Mono executeAsync( + Function, Mono> callbackMethod, IRetryPolicy retryPolicy, + Function, Mono> inBackoffAlternateCallbackMethod, + Duration minBackoffForInBackoffCallback) { + + return Mono.defer(() -> { + // TODO: is defer required? + return callbackMethod.apply(InitialArgumentValuePolicyArg).onErrorResume( + RetryUtils.toRetryWithAlternateFunc(callbackMethod, retryPolicy, inBackoffAlternateCallbackMethod,minBackoffForInBackoffCallback)); + }); + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/BaseAuthorizationTokenProvider.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/BaseAuthorizationTokenProvider.java new file mode 100644 index 0000000000000..5b6bd3439f0e2 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/BaseAuthorizationTokenProvider.java @@ -0,0 +1,370 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import org.apache.commons.lang3.StringUtils; + +import javax.crypto.Mac; +import javax.crypto.SecretKey; +import javax.crypto.spec.SecretKeySpec; +import java.net.URI; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import java.util.Collections; +import java.util.HashSet; +import java.util.Locale; +import java.util.Map; + +/** + * This class is used internally by both client (for generating the auth header with master/system key) and by the GATEWAY when + * verifying the auth header in the Azure Cosmos DB database service. + */ +public class BaseAuthorizationTokenProvider implements AuthorizationTokenProvider { + + private static final String AUTH_PREFIX = "type=master&ver=1.0&sig="; + private final String masterKey; + private final Mac macInstance; + + public BaseAuthorizationTokenProvider(String masterKey) { + this.masterKey = masterKey; + byte[] masterKeyDecodedBytes = Utils.Base64Decoder.decode(this.masterKey.getBytes()); + SecretKey signingKey = new SecretKeySpec(masterKeyDecodedBytes, "HMACSHA256"); + try { + this.macInstance = Mac.getInstance("HMACSHA256"); + this.macInstance.init(signingKey); + } catch (NoSuchAlgorithmException | InvalidKeyException e) { + throw new IllegalStateException(e); + } + } + + private static String getResourceSegment(ResourceType resourceType) { + switch (resourceType) { + case Attachment: + return Paths.ATTACHMENTS_PATH_SEGMENT; + case Database: + return Paths.DATABASES_PATH_SEGMENT; + case Conflict: + return Paths.CONFLICTS_PATH_SEGMENT; + case Document: + return Paths.DOCUMENTS_PATH_SEGMENT; + case DocumentCollection: + return Paths.COLLECTIONS_PATH_SEGMENT; + case Offer: + return Paths.OFFERS_PATH_SEGMENT; + case Permission: + return Paths.PERMISSIONS_PATH_SEGMENT; + case StoredProcedure: + return Paths.STORED_PROCEDURES_PATH_SEGMENT; + case Trigger: + return Paths.TRIGGERS_PATH_SEGMENT; + case UserDefinedFunction: + return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; + case User: + return Paths.USERS_PATH_SEGMENT; + case PartitionKeyRange: + return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; + case Media: + return Paths.MEDIA_PATH_SEGMENT; + case DatabaseAccount: + return ""; + default: + return null; + } + } + + /** + * This API is a helper method to create auth header based on client request using masterkey. + * + * @param verb the verb. + * @param resourceIdOrFullName the resource id or full name + * @param resourceType the resource type. + * @param headers the request headers. + * @return the key authorization signature. + */ + public String generateKeyAuthorizationSignature(String verb, + String resourceIdOrFullName, + ResourceType resourceType, + Map headers) { + return this.generateKeyAuthorizationSignature(verb, resourceIdOrFullName, + BaseAuthorizationTokenProvider.getResourceSegment(resourceType).toLowerCase(), headers); + } + + /** + * This API is a helper method to create auth header based on client request using masterkey. + * + * @param verb the verb + * @param resourceIdOrFullName the resource id or full name + * @param resourceSegment the resource segment + * @param headers the request headers + * @return the key authorization signature + */ + public String generateKeyAuthorizationSignature(String verb, + String resourceIdOrFullName, + String resourceSegment, + Map headers) { + if (verb == null || verb.isEmpty()) { + throw new IllegalArgumentException("verb"); + } + + if (resourceIdOrFullName == null) { + resourceIdOrFullName = ""; + } + + if (resourceSegment == null) { + throw new IllegalArgumentException("resourceSegment"); + } + + if (headers == null) { + throw new IllegalArgumentException("headers"); + } + + if (this.masterKey == null || this.masterKey.isEmpty()) { + throw new IllegalArgumentException("masterKey"); + } + + if(!PathsHelper.isNameBased(resourceIdOrFullName)) { + resourceIdOrFullName = resourceIdOrFullName.toLowerCase(Locale.ROOT); + } + + // Skipping lower casing of resourceId since it may now contain "ID" of the resource as part of the FullName + StringBuilder body = new StringBuilder(); + body.append(verb.toLowerCase()) + .append('\n') + .append(resourceSegment) + .append('\n') + .append(resourceIdOrFullName) + .append('\n'); + + if (headers.containsKey(HttpConstants.HttpHeaders.X_DATE)) { + body.append(headers.get(HttpConstants.HttpHeaders.X_DATE).toLowerCase()); + } + + body.append('\n'); + + if (headers.containsKey(HttpConstants.HttpHeaders.HTTP_DATE)) { + body.append(headers.get(HttpConstants.HttpHeaders.HTTP_DATE).toLowerCase()); + } + + body.append('\n'); + + Mac mac = null; + try { + mac = (Mac) this.macInstance.clone(); + } catch (CloneNotSupportedException e) { + throw new IllegalStateException(e); + } + + byte[] digest = mac.doFinal(body.toString().getBytes()); + + String auth = Utils.encodeBase64String(digest); + + return AUTH_PREFIX + auth; + } + + /** + * This API is a helper method to create auth header based on client request using resourceTokens. + * + * @param resourceTokens the resource tokens. + * @param path the path. + * @param resourceId the resource id. + * @return the authorization token. + */ + public String getAuthorizationTokenUsingResourceTokens(Map resourceTokens, + String path, + String resourceId) { + if (resourceTokens == null) { + throw new IllegalArgumentException("resourceTokens"); + } + + String resourceToken = null; + if (resourceTokens.containsKey(resourceId) && resourceTokens.get(resourceId) != null) { + resourceToken = resourceTokens.get(resourceId); + } else if (StringUtils.isEmpty(path) || StringUtils.isEmpty(resourceId)) { + if (resourceTokens.size() > 0) { + resourceToken = resourceTokens.values().iterator().next(); + } + } else { + // Get the last resource id from the path and use that to find the corresponding token. + String[] pathParts = StringUtils.split(path, "/"); + String[] resourceTypes = {"dbs", "colls", "docs", "sprocs", "udfs", "triggers", "users", "permissions", + "attachments", "media", "conflicts"}; + HashSet resourceTypesSet = new HashSet(); + Collections.addAll(resourceTypesSet, resourceTypes); + + for (int i = pathParts.length - 1; i >= 0; --i) { + + if (!resourceTypesSet.contains(pathParts[i]) && resourceTokens.containsKey(pathParts[i])) { + resourceToken = resourceTokens.get(pathParts[i]); + } + } + } + + return resourceToken; + } + public String generateKeyAuthorizationSignature(String verb, URI uri, Map headers) { + if (StringUtils.isEmpty(verb)) { + throw new IllegalArgumentException(String.format(RMResources.StringArgumentNullOrEmpty, "verb")); + } + + if (uri == null) { + throw new IllegalArgumentException("uri"); + } + + if (headers == null) { + throw new IllegalArgumentException("headers"); + } + PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); + getResourceTypeAndIdOrFullName(uri, pathInfo); + return generateKeyAuthorizationSignatureNew(verb, pathInfo.resourceIdOrFullName, pathInfo.resourcePath, + headers); + } + + public String generateKeyAuthorizationSignatureNew(String verb, String resourceIdValue, String resourceType, + Map headers) { + if (StringUtils.isEmpty(verb)) { + throw new IllegalArgumentException(String.format(RMResources.StringArgumentNullOrEmpty, "verb")); + } + + if (resourceType == null) { + throw new IllegalArgumentException(String.format(RMResources.StringArgumentNullOrEmpty, "resourceType")); // can be empty + } + + if (headers == null) { + throw new IllegalArgumentException("headers"); + } + // Order of the values included in the message payload is a protocol that + // clients/BE need to follow exactly. + // More headers can be added in the future. + // If any of the value is optional, it should still have the placeholder value + // of "" + // OperationType -> ResourceType -> ResourceId/OwnerId -> XDate -> Date + String verbInput = verb; + String resourceIdInput = resourceIdValue; + String resourceTypeInput = resourceType; + + String authResourceId = getAuthorizationResourceIdOrFullName(resourceTypeInput, resourceIdInput); + String payLoad = generateMessagePayload(verbInput, authResourceId, resourceTypeInput, headers); + Mac mac = null; + try { + mac = (Mac) this.macInstance.clone(); + } catch (CloneNotSupportedException e) { + throw new IllegalStateException(e); + } + byte[] digest = mac.doFinal(payLoad.getBytes()); + String authorizationToken = Utils.encodeBase64String(digest); + String authtoken = AUTH_PREFIX + authorizationToken; + return HttpUtils.urlEncode(authtoken); + } + + private String generateMessagePayload(String verb, String resourceId, String resourceType, + Map headers) { + String xDate = headers.get(HttpConstants.HttpHeaders.X_DATE); + String date = headers.get(HttpConstants.HttpHeaders.HTTP_DATE); + // At-least one of date header should present + // https://docs.microsoft.com/en-us/rest/api/documentdb/access-control-on-documentdb-resources + if (StringUtils.isEmpty(xDate) && (StringUtils.isEmpty(date) || StringUtils.isWhitespace(date))) { + headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); + xDate = Utils.nowAsRFC1123(); + } + + // for name based, it is case sensitive, we won't use the lower case + if (!PathsHelper.isNameBased(resourceId)) { + resourceId = resourceId.toLowerCase(); + } + + StringBuilder payload = new StringBuilder(); + payload.append(verb.toLowerCase()) + .append('\n') + .append(resourceType.toLowerCase()) + .append('\n') + .append(resourceId) + .append('\n') + .append(xDate.toLowerCase()) + .append('\n') + .append(StringUtils.isEmpty(xDate) ? date.toLowerCase() : "") + .append('\n'); + + return payload.toString(); + } + + private String getAuthorizationResourceIdOrFullName(String resourceType, String resourceIdOrFullName) { + if (StringUtils.isEmpty(resourceType) || StringUtils.isEmpty(resourceIdOrFullName)) { + return resourceIdOrFullName; + } + if (PathsHelper.isNameBased(resourceIdOrFullName)) { + // resource fullname is always end with name (not type segment like docs/colls). + return resourceIdOrFullName; + } + + if (resourceType.equalsIgnoreCase(Paths.OFFERS_PATH_SEGMENT) + || resourceType.equalsIgnoreCase(Paths.PARTITIONS_PATH_SEGMENT) + || resourceType.equalsIgnoreCase(Paths.TOPOLOGY_PATH_SEGMENT) + || resourceType.equalsIgnoreCase(Paths.RID_RANGE_PATH_SEGMENT)) { + return resourceIdOrFullName; + } + + ResourceId parsedRId = ResourceId.parse(resourceIdOrFullName); + if (resourceType.equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) { + return parsedRId.getDatabaseId().toString(); + } else if (resourceType.equalsIgnoreCase(Paths.USERS_PATH_SEGMENT)) { + return parsedRId.getUserId().toString(); + } else if (resourceType.equalsIgnoreCase(Paths.COLLECTIONS_PATH_SEGMENT)) { + return parsedRId.getDocumentCollectionId().toString(); + } else if (resourceType.equalsIgnoreCase(Paths.DOCUMENTS_PATH_SEGMENT)) { + return parsedRId.getDocumentId().toString(); + } else { + // leaf node + return resourceIdOrFullName; + } + } + + private void getResourceTypeAndIdOrFullName(URI uri, PathInfo pathInfo) { + if (uri == null) { + throw new IllegalArgumentException("uri"); + } + + pathInfo.resourcePath = StringUtils.EMPTY; + pathInfo.resourceIdOrFullName = StringUtils.EMPTY; + + String[] segments = StringUtils.split(uri.toString(), Constants.Properties.PATH_SEPARATOR); + if (segments == null || segments.length < 1) { + throw new IllegalArgumentException(RMResources.InvalidUrl); + } + // Authorization code is fine with Uri not having resource id and path. + // We will just return empty in that case + String pathAndQuery = StringUtils.EMPTY ; + if(StringUtils.isNotEmpty(uri.getPath())) { + pathAndQuery+= uri.getPath(); + } + if(StringUtils.isNotEmpty(uri.getQuery())) { + pathAndQuery+="?"; + pathAndQuery+= uri.getQuery(); + } + if (!PathsHelper.tryParsePathSegments(pathAndQuery, pathInfo, null)) { + pathInfo.resourcePath = StringUtils.EMPTY; + pathInfo.resourceIdOrFullName = StringUtils.EMPTY; + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/BaseDatabaseAccountConfigurationProvider.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/BaseDatabaseAccountConfigurationProvider.java new file mode 100644 index 0000000000000..8fc51eadbe534 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/BaseDatabaseAccountConfigurationProvider.java @@ -0,0 +1,62 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.ConsistencyLevel; + +/** + * Used internally to provides functionality to work with database account configuration in the Azure Cosmos DB database service. + */ +public class BaseDatabaseAccountConfigurationProvider implements DatabaseAccountConfigurationProvider { + private ConsistencyLevel desiredConsistencyLevel; + private DatabaseAccount databaseAccount; + + public BaseDatabaseAccountConfigurationProvider(DatabaseAccount databaseAccount, ConsistencyLevel desiredConsistencyLevel) { + this.databaseAccount = databaseAccount; + this.desiredConsistencyLevel = desiredConsistencyLevel; + } + + public ConsistencyLevel getStoreConsistencyPolicy() { + ConsistencyLevel databaseAccountConsistency = this.databaseAccount.getConsistencyPolicy().defaultConsistencyLevel(); + if (this.desiredConsistencyLevel == null) { + return databaseAccountConsistency; + } else if (!Utils.isValidConsistency(databaseAccountConsistency, this.desiredConsistencyLevel)) { + throw new IllegalArgumentException(String.format( + "ConsistencyLevel %1s specified in the request is invalid when service is configured with consistency level %2s. Ensure the request consistency level is not stronger than the service consistency level.", + this.desiredConsistencyLevel.toString(), + databaseAccountConsistency.toString())); + } else { + return this.desiredConsistencyLevel; + } + } + + public int getMaxReplicaSetSize() { + return this.databaseAccount.getReplicationPolicy().getMaxReplicaSetSize(); + } + + @Override + public String getQueryEngineConfiguration() { + return databaseAccount.get("queryEngineConfiguration").toString(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Bytes.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Bytes.java new file mode 100644 index 0000000000000..0c8fc14fe6ad0 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Bytes.java @@ -0,0 +1,40 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + + +public class Bytes { + + public static void reverse(byte[] bytes, int offset, int endIndex) { + for(int i = offset, j = endIndex - 1; i < j; --j, i++) { + byte aux = bytes[i]; + bytes[i] = bytes[j]; + bytes[j] = aux; + } + } + + public static void reverse(byte[] bytes) { + Bytes.reverse(bytes, 0, bytes.length); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ChangeFeedQueryImpl.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ChangeFeedQueryImpl.java new file mode 100644 index 0000000000000..91b36e4bd4ff4 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ChangeFeedQueryImpl.java @@ -0,0 +1,151 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ChangeFeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.internal.query.Paginator; +import com.azure.data.cosmos.internal.routing.PartitionKeyInternal; +import com.azure.data.cosmos.internal.routing.PartitionKeyRangeIdentity; +import reactor.core.publisher.Flux; + +import java.util.HashMap; +import java.util.Map; +import java.util.function.BiFunction; +import java.util.function.Function; + +import static com.azure.data.cosmos.CommonsBridgeInternal.partitionKeyRangeIdInternal; + +class ChangeFeedQueryImpl { + + private static final String IfNonMatchAllHeaderValue = "*"; + private final RxDocumentClientImpl client; + private final ResourceType resourceType; + private final Class klass; + private final String documentsLink; + private final ChangeFeedOptions options; + + public ChangeFeedQueryImpl(RxDocumentClientImpl client, + ResourceType resourceType, + Class klass, + String collectionLink, + ChangeFeedOptions changeFeedOptions) { + + this.client = client; + this.resourceType = resourceType; + this.klass = klass; + this.documentsLink = Utils.joinPath(collectionLink, Paths.DOCUMENTS_PATH_SEGMENT); + changeFeedOptions = changeFeedOptions != null ? changeFeedOptions: new ChangeFeedOptions(); + + + if (resourceType.isPartitioned() && partitionKeyRangeIdInternal(changeFeedOptions) == null && changeFeedOptions.partitionKey() == null) { + throw new IllegalArgumentException(RMResources.PartitionKeyRangeIdOrPartitionKeyMustBeSpecified); + } + + if (changeFeedOptions.partitionKey() != null && + !Strings.isNullOrEmpty(partitionKeyRangeIdInternal(changeFeedOptions))) { + + throw new IllegalArgumentException(String.format( + RMResources.PartitionKeyAndParitionKeyRangeIdBothSpecified + , "feedOptions")); + } + + String initialNextIfNoneMatch = null; + + boolean canUseStartFromBeginning = true; + if (changeFeedOptions.requestContinuation() != null) { + initialNextIfNoneMatch = changeFeedOptions.requestContinuation(); + canUseStartFromBeginning = false; + } + + if(changeFeedOptions.startDateTime() != null){ + canUseStartFromBeginning = false; + } + + if (canUseStartFromBeginning && !changeFeedOptions.startFromBeginning()) { + initialNextIfNoneMatch = IfNonMatchAllHeaderValue; + } + + this.options = getChangeFeedOptions(changeFeedOptions, initialNextIfNoneMatch); + } + + private RxDocumentServiceRequest createDocumentServiceRequest(String continuationToken, int pageSize) { + Map headers = new HashMap<>(); + + if (options.maxItemCount() != null) { + headers.put(HttpConstants.HttpHeaders.PAGE_SIZE, String.valueOf(options.maxItemCount())); + } + + // On REST level, change feed is using IF_NONE_MATCH/ETag instead of continuation. + if(continuationToken != null) { + headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, continuationToken); + } + + headers.put(HttpConstants.HttpHeaders.A_IM, HttpConstants.A_IMHeaderValues.INCREMENTAL_FEED); + + if (options.partitionKey() != null) { + PartitionKeyInternal partitionKey = options.partitionKey().getInternalPartitionKey(); + headers.put(HttpConstants.HttpHeaders.PARTITION_KEY, partitionKey.toJson()); + } + + if(options.startDateTime() != null){ + String dateTimeInHttpFormat = Utils.zonedDateTimeAsUTCRFC1123(options.startDateTime()); + headers.put(HttpConstants.HttpHeaders.IF_MODIFIED_SINCE, dateTimeInHttpFormat); + } + + RxDocumentServiceRequest req = RxDocumentServiceRequest.create( + OperationType.ReadFeed, + resourceType, + documentsLink, + headers, + options); + + if (partitionKeyRangeIdInternal(options) != null) { + req.routeTo(new PartitionKeyRangeIdentity(partitionKeyRangeIdInternal(this.options))); + } + + return req; + } + + private ChangeFeedOptions getChangeFeedOptions(ChangeFeedOptions options, String continuationToken) { + ChangeFeedOptions newOps = new ChangeFeedOptions(options); + newOps.requestContinuation(continuationToken); + return newOps; + } + + public Flux> executeAsync() { + + BiFunction createRequestFunc = this::createDocumentServiceRequest; + + Function>> executeFunc = this::executeRequestAsync; + + return Paginator.getPaginatedChangeFeedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, options.maxItemCount() != null ? options.maxItemCount(): -1); + } + + private Flux> executeRequestAsync(RxDocumentServiceRequest request) { + return client.readFeed(request) + .map( rsp -> BridgeInternal.toChaneFeedResponsePage(rsp, klass)); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ClearingSessionContainerClientRetryPolicy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ClearingSessionContainerClientRetryPolicy.java new file mode 100644 index 0000000000000..09b510367b480 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ClearingSessionContainerClientRetryPolicy.java @@ -0,0 +1,91 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.CosmosClientException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + * + * This retry policy is designed to work with in a pair with ClientRetryPolicy. + * The inner retryPolicy must be a ClientRetryPolicy or a retry policy delegating to it. + * + * The expectation that is the outer retry policy in the retry policy chain and nobody can overwrite ShouldRetryResult. + * Once we clear the session we expect call to fail and throw exception to the client. Otherwise we may violate session consistency. + */ +public class ClearingSessionContainerClientRetryPolicy implements IDocumentClientRetryPolicy { + + private final static Logger logger = LoggerFactory.getLogger(ClearingSessionContainerClientRetryPolicy.class); + + private final IDocumentClientRetryPolicy retryPolicy; + private final ISessionContainer sessionContainer; + private RxDocumentServiceRequest request; + private boolean hasTriggered = false; + + public ClearingSessionContainerClientRetryPolicy(ISessionContainer sessionContainer, IDocumentClientRetryPolicy retryPolicy) { + this.sessionContainer = sessionContainer; + this.retryPolicy = retryPolicy; + } + + @Override + public void onBeforeSendRequest(RxDocumentServiceRequest request) { + this.request = request; + this.retryPolicy.onBeforeSendRequest(request); + } + + @Override + public Mono shouldRetry(Exception e) { + + return this.retryPolicy.shouldRetry(e).flatMap(shouldRetryResult -> { + + if (!shouldRetryResult.shouldRetry && !this.hasTriggered) + { + CosmosClientException clientException = Utils.as(e, CosmosClientException.class); + + if (this.request == null) { + // someone didn't call OnBeforeSendRequest - nothing we can do + logger.error("onBeforeSendRequest is not invoked, encountered failure due to request being null", e); + return Mono.just(ShouldRetryResult.error(e)); + } + + if (clientException != null && this.request.getIsNameBased() && + Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.NOTFOUND) && + Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)) + { + // Clear the session token, because the collection name might be reused. + logger.warn("Clear the token for named base request {}", request.getResourceAddress()); + + this.sessionContainer.clearTokenByCollectionFullName(request.getResourceAddress()); + + this.hasTriggered = true; + } + } + + return Mono.just(shouldRetryResult); + }); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ClientRetryPolicy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ClientRetryPolicy.java new file mode 100644 index 0000000000000..239899573eb5f --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ClientRetryPolicy.java @@ -0,0 +1,228 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosResponseDiagnostics; +import com.azure.data.cosmos.RetryOptions; +import com.azure.data.cosmos.internal.directconnectivity.WebExceptionUtility; +import org.apache.commons.collections4.list.UnmodifiableList; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.net.URL; +import java.time.Duration; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + * + * Client policy is combination of endpoint change retry + throttling retry. + */ +public class ClientRetryPolicy implements IDocumentClientRetryPolicy { + + private final static Logger logger = LoggerFactory.getLogger(ClientRetryPolicy.class); + + final static int RetryIntervalInMS = 1000; //Once we detect failover wait for 1 second before retrying request. + final static int MaxRetryCount = 120; + + private final IDocumentClientRetryPolicy throttlingRetry; + private final GlobalEndpointManager globalEndpointManager; + private final boolean enableEndpointDiscovery; + private int failoverRetryCount; + + private int sessionTokenRetryCount; + private boolean isReadRequest; + private boolean canUseMultipleWriteLocations; + private URL locationEndpoint; + private RetryContext retryContext; + private CosmosResponseDiagnostics cosmosResponseDiagnostics; + + public ClientRetryPolicy(GlobalEndpointManager globalEndpointManager, + boolean enableEndpointDiscovery, + RetryOptions retryOptions) { + + this.throttlingRetry = new ResourceThrottleRetryPolicy( + retryOptions.maxRetryAttemptsOnThrottledRequests(), + retryOptions.maxRetryWaitTimeInSeconds()); + this.globalEndpointManager = globalEndpointManager; + this.failoverRetryCount = 0; + this.enableEndpointDiscovery = enableEndpointDiscovery; + this.sessionTokenRetryCount = 0; + this.canUseMultipleWriteLocations = false; + this.cosmosResponseDiagnostics = BridgeInternal.createCosmosResponseDiagnostics(); + } + + @Override + public Mono shouldRetry(Exception e) { + if (this.locationEndpoint == null) { + // on before request is not invoked because Document Service Request creation failed. + logger.error("locationEndpoint is null because ClientRetryPolicy::onBeforeRequest(.) is not invoked, " + + "probably request creation failed due to invalid options, serialization setting, etc."); + return Mono.just(ShouldRetryResult.error(e)); + } + + this.retryContext = null; + // Received 403.3 on write region, initiate the endpoint re-discovery + CosmosClientException clientException = Utils.as(e, CosmosClientException.class); + if (clientException != null && clientException.cosmosResponseDiagnostics() != null) { + this.cosmosResponseDiagnostics = clientException.cosmosResponseDiagnostics(); + } + if (clientException != null && + Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && + Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.FORBIDDEN_WRITEFORBIDDEN)) + { + logger.warn("Endpoint not writable. Will refresh cache and retry. {}", e.toString()); + return this.shouldRetryOnEndpointFailureAsync(false); + } + + // Regional endpoint is not available yet for reads (e.g. add/ online of region is in progress) + if (clientException != null && + Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && + Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.DATABASE_ACCOUNT_NOTFOUND) && + this.isReadRequest) + { + logger.warn("Endpoint not available for reads. Will refresh cache and retry. {}", e.toString()); + return this.shouldRetryOnEndpointFailureAsync(true); + } + + // Received Connection error (HttpRequestException), initiate the endpoint rediscovery + if (WebExceptionUtility.isNetworkFailure(e)) { + logger.warn("Endpoint not reachable. Will refresh cache and retry. {}" , e.toString()); + return this.shouldRetryOnEndpointFailureAsync(this.isReadRequest); + } + + if (clientException != null && + Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.NOTFOUND) && + Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)) { + return Mono.just(this.shouldRetryOnSessionNotAvailable()); + } + + return this.throttlingRetry.shouldRetry(e); + } + + private ShouldRetryResult shouldRetryOnSessionNotAvailable() { + this.sessionTokenRetryCount++; + + if (!this.enableEndpointDiscovery) { + // if endpoint discovery is disabled, the request cannot be retried anywhere else + return ShouldRetryResult.noRetry(); + } else { + if (this.canUseMultipleWriteLocations) { + UnmodifiableList endpoints = this.isReadRequest ? this.globalEndpointManager.getReadEndpoints() : this.globalEndpointManager.getWriteEndpoints(); + + if (this.sessionTokenRetryCount > endpoints.size()) { + // When use multiple write locations is true and the request has been tried + // on all locations, then don't retry the request + return ShouldRetryResult.noRetry(); + } else { + this.retryContext = new RetryContext(this.sessionTokenRetryCount - 1, this.sessionTokenRetryCount > 1); + return ShouldRetryResult.retryAfter(Duration.ZERO); + } + } else { + if (this.sessionTokenRetryCount > 1) { + // When cannot use multiple write locations, then don't retry the request if + // we have already tried this request on the write location + return ShouldRetryResult.noRetry(); + } else { + this.retryContext = new RetryContext(this.sessionTokenRetryCount - 1, false); + return ShouldRetryResult.retryAfter(Duration.ZERO); + } + } + } + } + + private Mono shouldRetryOnEndpointFailureAsync(boolean isReadRequest) { + if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { + logger.warn("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {}", this.failoverRetryCount); + return Mono.just(ShouldRetryResult.noRetry()); + } + + this.failoverRetryCount++; + + // Mark the current read endpoint as unavailable + if (this.isReadRequest) { + logger.warn("marking the endpoint {} as unavailable for read",this.locationEndpoint); + this.globalEndpointManager.markEndpointUnavailableForRead(this.locationEndpoint); + } else { + logger.warn("marking the endpoint {} as unavailable for write",this.locationEndpoint); + this.globalEndpointManager.markEndpointUnavailableForWrite(this.locationEndpoint); + } + + // Some requests may be in progress when the endpoint manager and client are closed. + // In that case, the request won't succeed since the http client is closed. + // Therefore just skip the retry here to avoid the delay because retrying won't go through in the end. + + Duration retryDelay = Duration.ZERO; + if (!this.isReadRequest) { + logger.debug("Failover happening. retryCount {}", this.failoverRetryCount); + if (this.failoverRetryCount > 1) { + //if retried both endpoints, follow regular retry interval. + retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); + } + } else { + retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); + } + this.retryContext = new RetryContext(this.failoverRetryCount, false); + return this.globalEndpointManager.refreshLocationAsync(null) + .then(Mono.just(ShouldRetryResult.retryAfter(retryDelay))); + } + + @Override + public void onBeforeSendRequest(RxDocumentServiceRequest request) { + this.isReadRequest = request.isReadOnlyRequest(); + this.canUseMultipleWriteLocations = this.globalEndpointManager.CanUseMultipleWriteLocations(request); + if (request.requestContext != null) { + request.requestContext.cosmosResponseDiagnostics = this.cosmosResponseDiagnostics; + } + + // clear previous location-based routing directive + if (request.requestContext != null) { + request.requestContext.ClearRouteToLocation(); + } + if (this.retryContext != null) { + // set location-based routing directive based on request retry context + request.requestContext.RouteToLocation(this.retryContext.retryCount, this.retryContext.retryRequestOnPreferredLocations); + } + + // Resolve the endpoint for the request and pin the resolution to the resolved endpoint + // This enables marking the endpoint unavailability on endpoint failover/unreachability + this.locationEndpoint = this.globalEndpointManager.resolveServiceEndpoint(request); + if (request.requestContext != null) { + request.requestContext.RouteToLocation(this.locationEndpoint); + } + } + private class RetryContext { + + public int retryCount; + public boolean retryRequestOnPreferredLocations; + + public RetryContext(int retryCount, + boolean retryRequestOnPreferredLocations) { + this.retryCount = retryCount; + this.retryRequestOnPreferredLocations = retryRequestOnPreferredLocations; + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Configs.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Configs.java new file mode 100644 index 0000000000000..0913c35ba1ee0 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Configs.java @@ -0,0 +1,177 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.internal.directconnectivity.Protocol; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.SslProvider; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.net.ssl.SSLException; + +public class Configs { + private static final Logger logger = LoggerFactory.getLogger(Configs.class); + private final SslContext sslContext; + + private static final String PROTOCOL = "cosmos.directModeProtocol"; + private static final Protocol DEFAULT_PROTOCOL = Protocol.TCP; + + private static final String UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = "COSMOS.UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS"; + + private static final String MAX_HTTP_BODY_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_BODY_LENGTH_IN_BYTES"; + private static final String MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES"; + private static final String MAX_HTTP_CHUNK_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_CHUNK_SIZE_IN_BYTES"; + private static final String MAX_HTTP_HEADER_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_HEADER_SIZE_IN_BYTES"; + private static final String MAX_DIRECT_HTTPS_POOL_SIZE = "COSMOS.MAX_DIRECT_HTTP_CONNECTION_LIMIT"; + + private static final int DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = 5 * 60; + + private static final int DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES = 6 * 1024 * 1024; //6MB + private static final int DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH = 4096; //4KB + private static final int DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES = 8192; //8KB + private static final int DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE = 32 * 1024; //32 KB + + private static final int MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES = 6; + private static final int MAX_NUMBER_OF_PRIMARY_READ_RETRIES = 6; + private static final int MAX_NUMBER_OF_READ_QUORUM_RETRIES = 6; + private static final int DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS = 5; + + private static final int MAX_BARRIER_RETRIES_FOR_MULTI_REGION = 30; + private static final int BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 30; + + private static final int MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION = 4; + private static final int SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 10; + private static final int CPU_CNT = Runtime.getRuntime().availableProcessors(); + private static final int DEFAULT_DIRECT_HTTPS_POOL_SIZE = CPU_CNT * 500; + + private static final String REACTOR_NETTY_CONNECTION_POOL_NAME = "reactor-netty-connection-pool"; + + public Configs() { + this.sslContext = sslContextInit(); + } + + private SslContext sslContextInit() { + try { + SslProvider sslProvider = SslContext.defaultClientProvider(); + return SslContextBuilder.forClient().sslProvider(sslProvider).build(); + } catch (SSLException sslException) { + logger.error("Fatal error cannot instantiate ssl context due to {}", sslException.getMessage(), sslException); + throw new IllegalStateException(sslException); + } + } + + public SslContext getSslContext() { + return this.sslContext; + } + + public Protocol getProtocol() { + String protocol = getJVMConfigAsString(PROTOCOL, DEFAULT_PROTOCOL.toString()); + try { + return Protocol.valueOf(StringUtils.upperCase(protocol.toLowerCase())); + } catch (Exception e) { + logger.error("Parsing protocol {} failed. Using the default {}.", protocol, DEFAULT_PROTOCOL, e); + return DEFAULT_PROTOCOL; + } + } + + public int getMaxNumberOfReadBarrierReadRetries() { + return MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES; + } + + public int getMaxNumberOfPrimaryReadRetries() { + return MAX_NUMBER_OF_PRIMARY_READ_RETRIES; + } + + public int getMaxNumberOfReadQuorumRetries() { + return MAX_NUMBER_OF_READ_QUORUM_RETRIES; + } + + public int getDelayBetweenReadBarrierCallsInMs() { + return DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS; + } + + public int getMaxBarrierRetriesForMultiRegion() { + return MAX_BARRIER_RETRIES_FOR_MULTI_REGION; + } + + public int getBarrierRetryIntervalInMsForMultiRegion() { + return BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION; + } + + public int getMaxShortBarrierRetriesForMultiRegion() { + return MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION; + } + + public int getShortBarrierRetryIntervalInMsForMultiRegion() { + return SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION; + } + + public int getDirectHttpsMaxConnectionLimit() { + return getJVMConfigAsInt(MAX_DIRECT_HTTPS_POOL_SIZE, DEFAULT_DIRECT_HTTPS_POOL_SIZE); + } + + public int getMaxHttpHeaderSize() { + return getJVMConfigAsInt(MAX_HTTP_HEADER_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE); + } + + public int getMaxHttpInitialLineLength() { + return getJVMConfigAsInt(MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH); + } + + public int getMaxHttpChunkSize() { + return getJVMConfigAsInt(MAX_HTTP_CHUNK_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES); + } + + public int getMaxHttpBodyLength() { + return getJVMConfigAsInt(MAX_HTTP_BODY_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES); + } + + public int getUnavailableLocationsExpirationTimeInSeconds() { + return getJVMConfigAsInt(UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS, DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS); + } + + public String getReactorNettyConnectionPoolName() { + return REACTOR_NETTY_CONNECTION_POOL_NAME; + } + + private static String getJVMConfigAsString(String propName, String defaultValue) { + String propValue = System.getProperty(propName); + return StringUtils.defaultString(propValue, defaultValue); + } + + private static int getJVMConfigAsInt(String propName, int defaultValue) { + String propValue = System.getProperty(propName); + return getIntValue(propValue, defaultValue); + } + + private static int getIntValue(String val, int defaultValue) { + if (StringUtils.isEmpty(val)) { + return defaultValue; + } else { + return Integer.valueOf(val); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Conflict.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Conflict.java new file mode 100644 index 0000000000000..12133673af9c3 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Conflict.java @@ -0,0 +1,99 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.Resource; + +import java.lang.reflect.InvocationTargetException; + +/** + * Represents a conflict in the version of a particular resource in the Azure Cosmos DB database service. + *

+ * During rare failure scenarios, conflicts are generated for the documents in transit. Clients can inspect the + * respective conflict instances for resources and operations in conflict. + */ +public final class Conflict extends Resource { + /** + * Initialize a conflict object. + */ + public Conflict() { + super(); + } + + /** + * Initialize a conflict object from json string. + * + * @param jsonString the json string that represents the conflict. + */ + public Conflict(String jsonString) { + super(jsonString); + } + + /** + * Gets the operation kind. + * + * @return the operation kind. + */ + public String getOperationKind() { + return super.getString(Constants.Properties.OPERATION_TYPE); + } + + /** + * Gets the type of the conflicting resource. + * + * @return the resource type. + */ + public String getResouceType() { + return super.getString(Constants.Properties.RESOURCE_TYPE); + } + + /** + * Gets the resource ID for the conflict in the Azure Cosmos DB service. + * @return resource Id for the conflict. + */ + public String getSourceResourceId() { + return super.getString(Constants.Properties.SOURCE_RESOURCE_ID); + } + + /** + * Gets the conflicting resource in the Azure Cosmos DB service. + * @param the type of the object. + * @param klass The returned type of conflicting resource. + * @return The conflicting resource. + */ + public T getResource(Class klass) { + String resourceAsString = super.getString(Constants.Properties.CONTENT); + + if (!Strings.isNullOrEmpty(resourceAsString)) { + try { + return klass.getConstructor(String.class).newInstance(resourceAsString); + } catch (InstantiationException | IllegalAccessException | IllegalArgumentException + | InvocationTargetException | NoSuchMethodException | SecurityException e) { + throw new IllegalStateException("Failed to instantiate class object.", e); + } + } else { + return null; + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Constants.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Constants.java new file mode 100644 index 0000000000000..adc9f98681a44 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Constants.java @@ -0,0 +1,224 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +/** + * Used internally. Constants in the Azure Cosmos DB database service Java SDK. + */ +public final class Constants { + + public static final class Quota { + // Quota Strings + public static final String DATABASE = "databases"; + public static final String COLLECTION = "collections"; + public static final String USER = "users"; + public static final String PERMISSION = "permissions"; + public static final String COLLECTION_SIZE = "collectionSize"; + public static final String DOCUMENTS_SIZE = "documentsSize"; + public static final String STORED_PROCEDURE = "storedProcedures"; + public static final String TRIGGER = "triggers"; + public static final String USER_DEFINED_FUNCTION = "functions"; + public static final String DELIMITER_CHARS = "=|;"; + public static final String DOCUMENTS_COUNT = "documentsCount"; + } + + public static final class Properties { + public static final String ID = "id"; + public static final String R_ID = "_rid"; + public static final String SELF_LINK = "_self"; + public static final String LAST_MODIFIED = "_ts"; + public static final String COUNT = "_count"; + public static final String E_TAG = "_etag"; + public static final String AGGREGATE = "_aggregate"; + + public static final String CONSISTENCY_POLICY = "consistencyPolicy"; + public static final String DEFAULT_CONSISTENCY_LEVEL = "defaultConsistencyLevel"; + public static final String MAX_STALENESS_PREFIX = "maxStalenessPrefix"; + public static final String MAX_STALENESS_INTERVAL_IN_SECONDS = "maxIntervalInSeconds"; + public static final String PARENTS = "parents"; + + public static final String DATABASES_LINK = "_dbs"; + public static final String COLLECTIONS_LINK = "_colls"; + public static final String USERS_LINK = "_users"; + public static final String PERMISSIONS_LINK = "_permissions"; + public static final String ATTACHMENTS_LINK = "_attachments"; + public static final String STORED_PROCEDURES_LINK = "_sprocs"; + public static final String TRIGGERS_LINK = "_triggers"; + public static final String USER_DEFINED_FUNCTIONS_LINK = "_udfs"; + public static final String CONFLICTS_LINK = "_conflicts"; + public static final String DOCUMENTS_LINK = "_docs"; + public static final String RESOURCE_LINK = "resource"; + public static final String MEDIA_LINK = "media"; + + public static final String PERMISSION_MODE = "permissionMode"; + public static final String RESOURCE_KEY = "key"; + public static final String TOKEN = "_token"; + public static final String SQL_API_TYPE = "0x10"; + + // Scripting + public static final String BODY = "body"; + public static final String TRIGGER_TYPE = "triggerType"; + public static final String TRIGGER_OPERATION = "triggerOperation"; + + public static final String MAX_SIZE = "maxSize"; + public static final String CURRENT_USAGE = "currentUsage"; + + public static final String CONTENT = "content"; + + public static final String CONTENT_TYPE = "contentType"; + + // ErrorResource. + public static final String CODE = "code"; + public static final String MESSAGE = "message"; + public static final String ERROR_DETAILS = "errorDetails"; + public static final String ADDITIONAL_ERROR_INFO = "additionalErrorInfo"; + + // PartitionInfo. + public static final String RESOURCE_TYPE = "resourceType"; + public static final String SERVICE_INDEX = "serviceIndex"; + public static final String PARTITION_INDEX = "partitionIndex"; + + public static final String ADDRESS_LINK = "addresses"; + public static final String USER_REPLICATION_POLICY = "userReplicationPolicy"; + public static final String USER_CONSISTENCY_POLICY = "userConsistencyPolicy"; + public static final String SYSTEM_REPLICATION_POLICY = "systemReplicationPolicy"; + public static final String READ_POLICY = "readPolicy"; + public static final String QUERY_ENGINE_CONFIGURATION = "queryEngineConfiguration"; + + //ReplicationPolicy + public static final String REPLICATION_POLICY = "replicationPolicy"; + public static final String ASYNC_REPLICATION = "asyncReplication"; + public static final String MAX_REPLICA_SET_SIZE = "maxReplicasetSize"; + public static final String MIN_REPLICA_SET_SIZE = "minReplicaSetSize"; + + //Indexing Policy. + public static final String INDEXING_POLICY = "indexingPolicy"; + public static final String AUTOMATIC = "automatic"; + public static final String STRING_PRECISION = "StringPrecision"; + public static final String NUMERIC_PRECISION = "NumericPrecision"; + public static final String MAX_PATH_DEPTH = "maxPathDepth"; + public static final String INDEXING_MODE = "indexingMode"; + public static final String INDEX_TYPE = "IndexType"; + public static final String INDEX_KIND = "kind"; + public static final String DATA_TYPE = "dataType"; + public static final String PRECISION = "precision"; + + public static final String PATHS = "paths"; + public static final String PATH = "path"; + public static final String INCLUDED_PATHS = "includedPaths"; + public static final String EXCLUDED_PATHS = "excludedPaths"; + public static final String INDEXES = "indexes"; + public static final String COMPOSITE_INDEXES = "compositeIndexes"; + public static final String ORDER = "order"; + public static final String SPATIAL_INDEXES = "spatialIndexes"; + public static final String TYPES = "types"; + + // Unique index. + public static final String UNIQUE_KEY_POLICY = "uniqueKeyPolicy"; + public static final String UNIQUE_KEYS = "uniqueKeys"; + + // Conflict. + public static final String CONFLICT = "conflict"; + public static final String OPERATION_TYPE = "operationType"; + public static final String SOURCE_RESOURCE_ID = "resourceId"; + + // Offer resource + public static final String OFFER_TYPE = "offerType"; + public static final String OFFER_VERSION = "offerVersion"; + public static final String OFFER_CONTENT = "content"; + public static final String OFFER_THROUGHPUT = "offerThroughput"; + public static final String OFFER_VERSION_V1 = "V1"; + public static final String OFFER_VERSION_V2 = "V2"; + public static final String OFFER_RESOURCE_ID = "offerResourceId"; + + // PartitionKey + public static final String PARTITION_KEY = "partitionKey"; + public static final String PARTITION_KEY_PATHS = "paths"; + public static final String PARTITION_KIND = "kind"; + public static final String PARTITION_KEY_DEFINITION_VERSION = "version"; + public static final String SYSTEM_KEY = "systemKey"; + + public static final String RESOURCE_PARTITION_KEY = "resourcePartitionKey"; + public static final String PARTITION_KEY_RANGE_ID = "partitionKeyRangeId"; + public static final String MIN_INCLUSIVE_EFFECTIVE_PARTITION_KEY = "minInclusiveEffectivePartitionKey"; + public static final String MAX_EXCLUSIVE_EFFECTIVE_PARTITION_KEY = "maxExclusiveEffectivePartitionKey"; + + // AddressResource + public static final String IS_PRIMARY = "isPrimary"; + public static final String PROTOCOL = "protocol"; + public static final String LOGICAL_URI = "logicalUri"; + public static final String PHYISCAL_URI = "physcialUri"; + + // Time-to-Live + public static final String TTL = "ttl"; + public static final String DEFAULT_TTL = "defaultTtl"; + + // Global DB account properties + public static final String Name = "name"; + public static final String WRITABLE_LOCATIONS = "writableLocations"; + public static final String READABLE_LOCATIONS = "readableLocations"; + public static final String DATABASE_ACCOUNT_ENDPOINT = "databaseAccountEndpoint"; + + //Authorization + public static final String MASTER_TOKEN = "master"; + public static final String RESOURCE_TOKEN = "resource"; + public static final String TOKEN_VERSION = "1.0"; + public static final String AUTH_SCHEMA_TYPE = "type"; + public static final String AUTH_VERSION = "ver"; + public static final String AUTH_SIGNATURE = "sig"; + public static final String READ_PERMISSION_MODE = "read"; + public static final String ALL_PERMISSION_MODE = "all"; + public static final String PATH_SEPARATOR = "/"; + + public static final int DEFAULT_MAX_PAGE_SIZE = 100; + public static final String ENABLE_MULTIPLE_WRITE_LOCATIONS = "enableMultipleWriteLocations"; + + // Conflict resolution policy + public static final String CONFLICT_RESOLUTION_POLICY = "conflictResolutionPolicy"; + public static final String MODE = "mode"; + public static final String CONFLICT_RESOLUTION_PATH = "conflictResolutionPath"; + public static final String CONFLICT_RESOLUTION_PROCEDURE = "conflictResolutionProcedure"; + + //Handler names for RXNetty httpClient + public static final String SSL_HANDLER_NAME = "ssl-handler"; + public static final String SSL_COMPLETION_HANDLER_NAME = "ssl-completion-handler"; + public static final String HTTP_PROXY_HANDLER_NAME = "http-proxy-handler"; + public static final String LOGGING_HANDLER_NAME = "logging-handler"; + } + + public static final class UrlEncodingInfo { + public static final String PLUS_SYMBOL_ESCAPED = "\\+"; + public static final String PLUS_SYMBOL_URI_ENCODING = "%2b"; + public static final String SINGLE_SPACE_URI_ENCODING = "%20"; + public static final String UTF_8 = "UTF-8"; + } + + public static final class PartitionedQueryExecutionInfo { + public static final int VERSION_1 = 1; + } + + public static final class QueryExecutionContext { + public static final String INCREMENTAL_FEED_HEADER_VALUE = "Incremental feed"; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ContentSerializationFormat.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ContentSerializationFormat.java new file mode 100644 index 0000000000000..12edb9f2d6df8 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ContentSerializationFormat.java @@ -0,0 +1,37 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal; + +public enum ContentSerializationFormat { + /** + * Standard JSON RFC UTF-8 text + */ + JsonText, + + /** + * CUSTOM binary for Cosmos DB that encodes a superset of JSON values. + */ + CosmosBinary, +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Database.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Database.java new file mode 100644 index 0000000000000..af8e1df73cb0e --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Database.java @@ -0,0 +1,86 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.Resource; +import org.apache.commons.lang3.StringUtils; + +/** + * Represents a Database in the Azure Cosmos DB database service. A database manages users, permissions and a set of collections + *

+ * Each Azure Cosmos DB Service is able to support multiple independent named databases, with the database being the + * logical container for data. Each Database consists of one or more collections, each of which in turn contain one or + * more documents. Since databases are an an administrative resource and the Service Master Key will be required in + * order to access and successfully complete any action using the User APIs. + */ +public final class Database extends Resource { + + /** + * Initialize a database object. + */ + public Database() { + super(); + } + + /** + * Sets the id + * @param id the name of the resource. + * @return the current instance of Database + */ + public Database id(String id){ + super.id(id); + return this; + } + + /** + * Initialize a database object from json string. + * + * @param jsonString the json string. + */ + public Database(String jsonString) { + super(jsonString); + } + + /** + * Gets the self-link for collections in the database + * + * @return the collections link. + */ + public String getCollectionsLink() { + return String.format("%s/%s", + StringUtils.stripEnd(super.selfLink(), "/"), + super.getString(Constants.Properties.COLLECTIONS_LINK)); + } + + /** + * Gets the self-link for users in the database. + * + * @return the users link. + */ + public String getUsersLink() { + return String.format("%s/%s", + StringUtils.stripEnd(super.selfLink(), "/"), + super.getString(Constants.Properties.USERS_LINK)); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/DatabaseAccount.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/DatabaseAccount.java new file mode 100644 index 0000000000000..8f2fd00f6e016 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/DatabaseAccount.java @@ -0,0 +1,291 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.ConsistencyPolicy; +import com.azure.data.cosmos.Resource; +import com.fasterxml.jackson.core.type.TypeReference; +import org.apache.commons.lang3.ObjectUtils; +import org.apache.commons.lang3.StringUtils; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static com.azure.data.cosmos.BridgeInternal.setProperty; +import static com.azure.data.cosmos.BridgeInternal.setResourceSelfLink; +import static com.azure.data.cosmos.BridgeInternal.populatePropertyBagJsonSerializable; + +/** + * Represents a database account in the Azure Cosmos DB database service. + */ +public class DatabaseAccount extends Resource { + private ConsistencyPolicy consistencyPolicy; + + private long maxMediaStorageUsageInMB; + private long mediaStorageUsageInMB; + private ReplicationPolicy replicationPolicy; + private ReplicationPolicy systemReplicationPolicy; + private Map queryEngineConfiguration; + + /** + * Constructor. + */ + public DatabaseAccount() { + setResourceSelfLink(this, ""); + } + + /** + * Initialize a database account object from json string. + * + * @param jsonString the json string that represents the database account. + */ + public DatabaseAccount(String jsonString) { + super(jsonString); + } + + /** + * Get the databases link of the databaseAccount. + * + * @return the databases link. + */ + public String getDatabasesLink() { + return super.getString(Constants.Properties.DATABASES_LINK); + } + + /** + * Set the databases of the databaseAccount. + * + * @param databasesLink the databases link. + */ + public void setDatabasesLink(String databasesLink) { + setProperty(this, Constants.Properties.DATABASES_LINK, databasesLink); + } + + /** + * Get the medialink of the databaseAccount. + * + * @return the media link. + */ + public String getMediaLink() { + return super.getString(Constants.Properties.MEDIA_LINK); + } + + /** + * Set the medialink of the databaseAccount. + * + * @param medialink the media link. + */ + public void setMediaLink(String medialink) { + setProperty(this, Constants.Properties.MEDIA_LINK, medialink); + } + + /** + * Get the addresseslink of the databaseAccount. + * + * @return the addresses link. + */ + public String getAddressesLink() { + return super.getString(Constants.Properties.ADDRESS_LINK); + } + + /** + * Set the addresseslink of the databaseAccount. + * + * @param addresseslink the addresses link. + */ + public void setAddressesLink(String addresseslink) { + setProperty(this, Constants.Properties.ADDRESS_LINK, addresseslink); + } + + /** + * Attachment content (media) storage quota in MBs Retrieved from gateway. + * + * @return the max media storage usage in MB. + */ + public long getMaxMediaStorageUsageInMB() { + return this.maxMediaStorageUsageInMB; + } + + public void setMaxMediaStorageUsageInMB(long value) { + this.maxMediaStorageUsageInMB = value; + } + + /** + * Current attachment content (media) usage in MBs. + *

+ * Retrieved from gateway. Value is returned from cached information updated + * periodically and is not guaranteed to be real time. + * + * @return the media storage usage in MB. + */ + public long getMediaStorageUsageInMB() { + return this.mediaStorageUsageInMB; + } + + public void setMediaStorageUsageInMB(long value) { + this.mediaStorageUsageInMB = value; + } + + /** + * Gets the ConsistencyPolicy properties. + * + * @return the consistency policy. + */ + public ConsistencyPolicy getConsistencyPolicy() { + if (this.consistencyPolicy == null) { + this.consistencyPolicy = super.getObject(Constants.Properties.USER_CONSISTENCY_POLICY, + ConsistencyPolicy.class); + + if (this.consistencyPolicy == null) { + this.consistencyPolicy = new ConsistencyPolicy(); + } + } + return this.consistencyPolicy; + } + + /** + * Gets the ReplicationPolicy properties. + * + * @return the replication policy. + */ + public ReplicationPolicy getReplicationPolicy() { + if (this.replicationPolicy == null) { + this.replicationPolicy = super.getObject(Constants.Properties.USER_REPLICATION_POLICY, + ReplicationPolicy.class); + + if (this.replicationPolicy == null) { + this.replicationPolicy = new ReplicationPolicy(); + } + } + + return this.replicationPolicy; + } + + /** + * Gets the SystemReplicationPolicy properties. + * + * @return the system replication policy. + */ + public ReplicationPolicy getSystemReplicationPolicy() { + if (this.systemReplicationPolicy == null) { + this.systemReplicationPolicy = super.getObject(Constants.Properties.SYSTEM_REPLICATION_POLICY, + ReplicationPolicy.class); + + if (this.systemReplicationPolicy == null) { + this.systemReplicationPolicy = new ReplicationPolicy(); + } + } + + return this.systemReplicationPolicy; + } + + /** + * Gets the QueryEngineConfiuration properties. + * + * @return the query engine configuration. + */ + public Map getQueryEngineConfiuration() { + if (this.queryEngineConfiguration == null) { + String queryEngineConfigurationJsonString = super.getObject(Constants.Properties.QUERY_ENGINE_CONFIGURATION, + String.class); + if (StringUtils.isNotEmpty(queryEngineConfigurationJsonString)) { + TypeReference> typeRef = new TypeReference>() { + }; + try { + this.queryEngineConfiguration = Utils.getSimpleObjectMapper() + .readValue(queryEngineConfigurationJsonString, typeRef); + } catch (IOException e) { + throw new IllegalArgumentException(e); + } + if (this.queryEngineConfiguration == null) { + this.queryEngineConfiguration = new HashMap<>(); + } + } + } + + return this.queryEngineConfiguration; + } + + /** + * Gets the list of writable locations for this database account. + * + * @return the list of writable locations. + */ + public Iterable getWritableLocations() { + return super.getCollection(Constants.Properties.WRITABLE_LOCATIONS, DatabaseAccountLocation.class); + } + + /** + * Sets the list of writable locations for this database account. + *

+ * The list of writable locations are returned by the service. + * + * @param locations the list of writable locations. + */ + public void setWritableLocations(Iterable locations) { + setProperty(this, Constants.Properties.WRITABLE_LOCATIONS, locations); + } + + /** + * Gets the list of readable locations for this database account. + * + * @return the list of readable locations. + */ + public Iterable getReadableLocations() { + return super.getCollection(Constants.Properties.READABLE_LOCATIONS, DatabaseAccountLocation.class); + } + + /** + * Sets the list of readable locations for this database account. + *

+ * The list of readable locations are returned by the service. + * + * @param locations the list of readable locations. + */ + public void setReadableLocations(Iterable locations) { + setProperty(this, Constants.Properties.READABLE_LOCATIONS, locations); + } + + public boolean isEnableMultipleWriteLocations() { + return ObjectUtils.defaultIfNull(super.getBoolean(Constants.Properties.ENABLE_MULTIPLE_WRITE_LOCATIONS), false); + } + + public void setEnableMultipleWriteLocations(boolean value) { + setProperty(this, Constants.Properties.ENABLE_MULTIPLE_WRITE_LOCATIONS, value); + } + + public void populatePropertyBag() { + if (this.consistencyPolicy != null) { + populatePropertyBagJsonSerializable(this.consistencyPolicy); + setProperty(this, Constants.Properties.USER_CONSISTENCY_POLICY, this.consistencyPolicy); + } + } + + @Override + public String toJson() { + this.populatePropertyBag(); + return super.toJson(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/DatabaseAccountConfigurationProvider.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/DatabaseAccountConfigurationProvider.java new file mode 100644 index 0000000000000..a82915676cb05 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/DatabaseAccountConfigurationProvider.java @@ -0,0 +1,37 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.ConsistencyLevel; + +/** + * Defines an interface to work with database account configuration in the Azure Cosmos DB database service. + */ +public interface DatabaseAccountConfigurationProvider { + ConsistencyLevel getStoreConsistencyPolicy(); + + int getMaxReplicaSetSize(); + + String getQueryEngineConfiguration(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/DatabaseAccountLocation.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/DatabaseAccountLocation.java new file mode 100644 index 0000000000000..160dd825ee868 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/DatabaseAccountLocation.java @@ -0,0 +1,88 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.JsonSerializable; + +import static com.azure.data.cosmos.BridgeInternal.setProperty; + +/** + * Represents the location of a database account in the Azure Cosmos DB database service. + */ +public class DatabaseAccountLocation extends JsonSerializable { + + /** + * DEFAULT Constructor. Creates a new instance of the + * DatabaseAccountLocation object. + */ + public DatabaseAccountLocation() { + super(); + } + + /** + * Creates a new instance of the DatabaseAccountLocation object from a JSON + * string. + * + * @param jsonString the JSON string that represents the DatabaseAccountLocation object. + */ + public DatabaseAccountLocation(String jsonString) { + super(jsonString); + } + + /** + * Gets The name of the database account location. + * + * @return the name of the database account location. + */ + public String getName() { + return super.getString(Constants.Properties.Name); + } + + /** + * Sets the name of the database account location. + * + * @param name the name of the database account location. + */ + public void setName(String name) { + setProperty(this, Constants.Properties.Name, name); + } + + /** + * Gets The endpoint (the URI) of the database account location. + * + * @return the endpoint of the database account location. + */ + public String getEndpoint() { + return super.getString(Constants.Properties.DATABASE_ACCOUNT_ENDPOINT); + } + + /** + * Sets the endpoint (the URI) of the database account location. + * + * @param endpoint the endpoint of the database account location. + */ + public void setEndpoint(String endpoint) { + setProperty(this, Constants.Properties.DATABASE_ACCOUNT_ENDPOINT, endpoint); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/DatabaseAccountManagerInternal.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/DatabaseAccountManagerInternal.java new file mode 100644 index 0000000000000..e423da21f639a --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/DatabaseAccountManagerInternal.java @@ -0,0 +1,54 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.ConnectionPolicy; +import reactor.core.publisher.Flux; + +import java.net.URI; + +public interface DatabaseAccountManagerInternal { + + /** + * Gets database account information. + * + * @param endpoint the endpoint from which gets the database account + * @return the database account. + */ + Flux getDatabaseAccountFromEndpoint(URI endpoint); + + /** + * Gets the connection policy + * + * @return connection policy + */ + ConnectionPolicy getConnectionPolicy(); + + /** + * Gets the service endpoint + * + * @return service endpoint + */ + URI getServiceEndpoint(); + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/DatabaseForTest.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/DatabaseForTest.java new file mode 100644 index 0000000000000..3d2ca095c6f3b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/DatabaseForTest.java @@ -0,0 +1,128 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.SqlParameter; +import com.azure.data.cosmos.SqlParameterList; +import com.azure.data.cosmos.SqlQuerySpec; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; + +import java.time.Duration; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; +import java.util.List; + +public class DatabaseForTest { + private static Logger logger = LoggerFactory.getLogger(DatabaseForTest.class); + public static final String SHARED_DB_ID_PREFIX = "RxJava.SDKTest.SharedDatabase"; + private static final Duration CLEANUP_THRESHOLD_DURATION = Duration.ofHours(2); + private static final String DELIMITER = "_"; + private static DateTimeFormatter TIME_FORMATTER = DateTimeFormatter.ofPattern("yyyyMMdd'T'HHmmss"); + + public LocalDateTime createdTime; + public Database createdDatabase; + + private DatabaseForTest(Database db, LocalDateTime createdTime) { + this.createdDatabase = db; + this.createdTime = createdTime; + } + + private boolean isStale() { + return isOlderThan(CLEANUP_THRESHOLD_DURATION); + } + + private boolean isOlderThan(Duration dur) { + return createdTime.isBefore(LocalDateTime.now().minus(dur)); + } + + public static String generateId() { + return SHARED_DB_ID_PREFIX + DELIMITER + TIME_FORMATTER.format(LocalDateTime.now()) + DELIMITER + RandomStringUtils.randomAlphabetic(3); + } + + private static DatabaseForTest from(Database db) { + if (db == null || db.id() == null || db.selfLink() == null) { + return null; + } + + String id = db.id(); + if (id == null) { + return null; + } + + String[] parts = StringUtils.split(id, DELIMITER); + if (parts.length != 3) { + return null; + } + if (!StringUtils.equals(parts[0], SHARED_DB_ID_PREFIX)) { + return null; + } + + try { + LocalDateTime parsedTime = LocalDateTime.parse(parts[1], TIME_FORMATTER); + return new DatabaseForTest(db, parsedTime); + } catch (Exception e) { + return null; + } + } + + public static DatabaseForTest create(DatabaseManager client) { + Database dbDef = new Database(); + dbDef.id(generateId()); + + Database db = client.createDatabase(dbDef).single().block().getResource(); + DatabaseForTest dbForTest = DatabaseForTest.from(db); + assert(dbForTest != null); + return dbForTest; + } + + public static void cleanupStaleTestDatabases(DatabaseManager client) { + logger.info("Cleaning stale test databases ..."); + List dbs = client.queryDatabases( + new SqlQuerySpec("SELECT * FROM c WHERE STARTSWITH(c.id, @PREFIX)", + new SqlParameterList(new SqlParameter("@PREFIX", DatabaseForTest.SHARED_DB_ID_PREFIX)))) + .flatMap(page -> Flux.fromIterable(page.results())).collectList().block(); + + for (Database db : dbs) { + assert(db.id().startsWith(DatabaseForTest.SHARED_DB_ID_PREFIX)); + + DatabaseForTest dbForTest = DatabaseForTest.from(db); + + if (db != null && dbForTest.isStale()) { + logger.info("Deleting database {}", db.id()); + client.deleteDatabase(db.id()).single().block(); + } + } + } + + public interface DatabaseManager { + Flux> queryDatabases(SqlQuerySpec query); + Flux> createDatabase(Database databaseDefinition); + Flux> deleteDatabase(String id); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Document.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Document.java new file mode 100644 index 0000000000000..9e4f5e2dfb9ca --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Document.java @@ -0,0 +1,134 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.Resource; +import com.fasterxml.jackson.databind.ObjectMapper; + +import java.io.IOException; + +import static com.azure.data.cosmos.BridgeInternal.remove; +import static com.azure.data.cosmos.BridgeInternal.setProperty; +import static com.azure.data.cosmos.BridgeInternal.setMapper; + +/** + * Represents a document in the Azure Cosmos DB database service. + *

+ * A document is a structured JSON document. There is no set schema for the JSON documents, and a document may contain + * any number of custom properties as well as an optional list of attachments. Document is an application resource and + * can be authorized using the master key or resource keys. + */ +public class Document extends Resource { + + /** + * Initialize a document object. + */ + public Document() { + super(); + } + + /** + * Sets the id + * @param id the name of the resource. + * @return the current instance of the Document + */ + public Document id(String id){ + super.id(id); + return this; + } + + /** + * Initialize a document object from json string. + * + * @param jsonString the json string that represents the document object. + * @param objectMapper the custom object mapper + */ + Document(String jsonString, ObjectMapper objectMapper) { + // TODO: Made package private due to #153. #171 adding custom serialization options back. + super(jsonString); + setMapper(this, objectMapper); + } + + /** + * Initialize a document object from json string. + * + * @param jsonString the json string that represents the document object. + */ + public Document(String jsonString) { + super(jsonString); + } + + public static Document FromObject(Object document, ObjectMapper objectMapper) { + Document typedDocument; + if (document instanceof Document) { + typedDocument = (Document) document; + } else { + try { + return new Document(objectMapper.writeValueAsString(document)); + } catch (IOException e) { + throw new IllegalArgumentException("Can't serialize the object into the json string", e); + } + } + return typedDocument; + } + + /** + * Gets the document's time-to-live value. + * + * @return the document's time-to-live value in seconds. + */ + public Integer getTimeToLive() { + if (super.has(Constants.Properties.TTL)) { + return super.getInt(Constants.Properties.TTL); + } + + return null; + } + + /** + * Sets the document's time-to-live value. + *

+ * A document's time-to-live value is an optional property. If set, the document expires after the specified number + * of seconds since its last write time. The value of this property should be one of the following: + *

+ * null - indicates the time-to-live value for this document inherits from the parent collection's default time-to-live value. + *

+ * nonzero positive integer - indicates the number of seconds before the document expires. It overrides the default time-to-live + * value specified on the parent collection, unless the parent collection's default time-to-live is null. + *

+ * -1 - indicates the document never expires. It overrides the default time-to-live + * value specified on the parent collection, unless the parent collection's default time-to-live is null. + * + * @param timeToLive the document's time-to-live value in seconds. + */ + public void setTimeToLive(Integer timeToLive) { + // a "null" value is represented as a missing element on the wire. + // setting timeToLive to null should remove the property from the property bag. + if (timeToLive != null) { + setProperty(this, Constants.Properties.TTL, timeToLive); + } else if (super.has(Constants.Properties.TTL)) { + remove(this, Constants.Properties.TTL); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/DocumentCollection.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/DocumentCollection.java new file mode 100644 index 0000000000000..13e552434f094 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/DocumentCollection.java @@ -0,0 +1,320 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.ConflictResolutionPolicy; +import com.azure.data.cosmos.IndexingPolicy; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.UniqueKeyPolicy; +import org.apache.commons.lang3.StringUtils; + +import static com.azure.data.cosmos.BridgeInternal.populatePropertyBagJsonSerializable; +import static com.azure.data.cosmos.BridgeInternal.setProperty; +import static com.azure.data.cosmos.BridgeInternal.remove; + +/** + * Represents a document collection in the Azure Cosmos DB database service. A collection is a named logical container + * for documents. + *

+ * A database may contain zero or more named collections and each collection consists of zero or more JSON documents. + * Being schema-free, the documents in a collection do not need to share the same structure or fields. Since collections + * are application resources, they can be authorized using either the master key or resource keys. + */ +public final class DocumentCollection extends Resource { + private IndexingPolicy indexingPolicy; + private UniqueKeyPolicy uniqueKeyPolicy; + private PartitionKeyDefinition partitionKeyDefinition; + + /** + * Initialize a document collection object. + */ + public DocumentCollection() { + super(); + } + + /** + * Sets the id and returns the document collection + * @param id the name of the resource. + * @return + */ + public DocumentCollection id(String id){ + super.id(id); + return this; + } + + /** + * Initialize a document collection object from json string. + * + * @param jsonString the json string that represents the document collection. + */ + public DocumentCollection(String jsonString) { + super(jsonString); + } + + /** + * Gets the indexing policy. + * + * @return the indexing policy. + */ + public IndexingPolicy getIndexingPolicy() { + if (this.indexingPolicy == null) { + if (super.has(Constants.Properties.INDEXING_POLICY)) { + this.indexingPolicy = super.getObject(Constants.Properties.INDEXING_POLICY, IndexingPolicy.class); + } else { + this.indexingPolicy = new IndexingPolicy(); + } + } + + return this.indexingPolicy; + } + + /** + * Sets the indexing policy. + * + * @param indexingPolicy the indexing policy. + */ + public void setIndexingPolicy(IndexingPolicy indexingPolicy) { + if (indexingPolicy == null) { + throw new IllegalArgumentException("IndexingPolicy cannot be null."); + } + + this.indexingPolicy = indexingPolicy; + } + + /** + * Gets the collection's partition key definition. + * + * @return the partition key definition. + */ + public PartitionKeyDefinition getPartitionKey() { + if (this.partitionKeyDefinition == null) { + + if (super.has(Constants.Properties.PARTITION_KEY)) { + this.partitionKeyDefinition = super.getObject(Constants.Properties.PARTITION_KEY, PartitionKeyDefinition.class); + } else { + this.partitionKeyDefinition = new PartitionKeyDefinition(); + } + } + + return this.partitionKeyDefinition; + } + + /** + * Sets the collection's partition key definition. + * + * @param partitionKey the partition key definition. + */ + public void setPartitionKey(PartitionKeyDefinition partitionKey) { + if (partitionKey == null) { + throw new IllegalArgumentException("partitionKeyDefinition cannot be null."); + } + + this.partitionKeyDefinition = partitionKey; + } + + /** + * Gets the collection's default time-to-live value. + * + * @return the default time-to-live value in seconds. + */ + public Integer getDefaultTimeToLive() { + if (super.has(Constants.Properties.DEFAULT_TTL)) { + return super.getInt(Constants.Properties.DEFAULT_TTL); + } + + return null; + } + + /** + * Sets the collection's default time-to-live value. + *

+ * The default time-to-live value on a collection is an optional property. If set, the documents within the collection + * expires after the specified number of seconds since their last write time. The value of this property should be one of the following: + *

+ * null - indicates evaluation of time-to-live is disabled and documents within the collection will never expire, regardless whether + * individual documents have their time-to-live set. + *

+ * nonzero positive integer - indicates the default time-to-live value for all documents within the collection. This value can be overridden + * by individual documents' time-to-live value. + *

+ * -1 - indicates by default all documents within the collection never expire. This value can be overridden by individual documents' + * time-to-live value. + * + * @param timeToLive the default time-to-live value in seconds. + */ + public void setDefaultTimeToLive(Integer timeToLive) { + // a "null" value is represented as a missing element on the wire. + // setting timeToLive to null should remove the property from the property bag. + if (timeToLive != null) { + setProperty(this, Constants.Properties.DEFAULT_TTL, timeToLive); + } else if (super.has(Constants.Properties.DEFAULT_TTL)) { + remove(this, Constants.Properties.DEFAULT_TTL); + } + } + + /** + * Sets the Uni that guarantees uniqueness of documents in collection in the Azure Cosmos DB service. + * @return UniqueKeyPolicy + */ + public UniqueKeyPolicy getUniqueKeyPolicy() { + + // Thread safe lazy initialization for case when collection is cached (and is basically readonly). + if (this.uniqueKeyPolicy == null) { + this.uniqueKeyPolicy = super.getObject(Constants.Properties.UNIQUE_KEY_POLICY, UniqueKeyPolicy.class); + + if (this.uniqueKeyPolicy == null) { + this.uniqueKeyPolicy = new UniqueKeyPolicy(); + } + } + + return this.uniqueKeyPolicy; + } + + public void setUniqueKeyPolicy(UniqueKeyPolicy uniqueKeyPolicy) { + if (uniqueKeyPolicy == null) { + throw new IllegalArgumentException("uniqueKeyPolicy cannot be null."); + } + + this.uniqueKeyPolicy = uniqueKeyPolicy; + setProperty(this, Constants.Properties.UNIQUE_KEY_POLICY, uniqueKeyPolicy); + } + + /** + * Gets the conflictResolutionPolicy that is used for resolving conflicting writes + * on documents in different regions, in a collection in the Azure Cosmos DB service. + * + * @return ConflictResolutionPolicy + */ + public ConflictResolutionPolicy getConflictResolutionPolicy() { + return super.getObject(Constants.Properties.CONFLICT_RESOLUTION_POLICY, ConflictResolutionPolicy.class); + } + + /** + * Sets the conflictResolutionPolicy that is used for resolving conflicting writes + * on documents in different regions, in a collection in the Azure Cosmos DB service. + * + * @param value ConflictResolutionPolicy to be used. + */ + public void setConflictResolutionPolicy(ConflictResolutionPolicy value) { + if (value == null) { + throw new IllegalArgumentException("CONFLICT_RESOLUTION_POLICY cannot be null."); + } + + setProperty(this, Constants.Properties.CONFLICT_RESOLUTION_POLICY, value); + } + + + /** + * Gets the self-link for documents in a collection. + * + * @return the document link. + */ + public String getDocumentsLink() { + return String.format("%s/%s", + StringUtils.stripEnd(super.selfLink(), "/"), + super.getString(Constants.Properties.DOCUMENTS_LINK)); + } + + /** + * Gets the self-link for stored procedures in a collection. + * + * @return the stored procedures link. + */ + public String getStoredProceduresLink() { + return String.format("%s/%s", + StringUtils.stripEnd(super.selfLink(), "/"), + super.getString(Constants.Properties.STORED_PROCEDURES_LINK)); + } + + /** + * Gets the self-link for triggers in a collection. + * + * @return the trigger link. + */ + public String getTriggersLink() { + return StringUtils.removeEnd(this.selfLink(), "/") + + "/" + super.getString(Constants.Properties.TRIGGERS_LINK); + } + + /** + * Gets the self-link for user defined functions in a collection. + * + * @return the user defined functions link. + */ + public String getUserDefinedFunctionsLink() { + return StringUtils.removeEnd(this.selfLink(), "/") + + "/" + super.getString(Constants.Properties.USER_DEFINED_FUNCTIONS_LINK); + } + + /** + * Gets the self-link for conflicts in a collection. + * + * @return the conflicts link. + */ + public String getConflictsLink() { + return StringUtils.removeEnd(this.selfLink(), "/") + + "/" + super.getString(Constants.Properties.CONFLICTS_LINK); + } + + void populatePropertyBag() { + if (this.indexingPolicy == null) { + this.getIndexingPolicy(); + } + if (this.uniqueKeyPolicy == null) { + this.getUniqueKeyPolicy(); + } + + if (this.partitionKeyDefinition != null) { + populatePropertyBagJsonSerializable(this.partitionKeyDefinition); + setProperty(this, Constants.Properties.PARTITION_KEY, this.partitionKeyDefinition); + } + populatePropertyBagJsonSerializable(this.indexingPolicy); + populatePropertyBagJsonSerializable(this.uniqueKeyPolicy); + + setProperty(this, Constants.Properties.INDEXING_POLICY, this.indexingPolicy); + setProperty(this, Constants.Properties.UNIQUE_KEY_POLICY, this.uniqueKeyPolicy); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || !DocumentCollection.class.isAssignableFrom(obj.getClass())) { + return false; + } + + DocumentCollection typedObj = (DocumentCollection) obj; + return typedObj.resourceId().equals(this.resourceId()); + } + + @Override + public int hashCode() { + return this.resourceId().hashCode(); + } + + @Override + public String toJson() { + this.populatePropertyBag(); + return super.toJson(); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/DocumentServiceRequestContext.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/DocumentServiceRequestContext.java new file mode 100644 index 0000000000000..9cb502aa9deae --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/DocumentServiceRequestContext.java @@ -0,0 +1,120 @@ +/* + * + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosResponseDiagnostics; +import com.azure.data.cosmos.internal.directconnectivity.StoreResponse; +import com.azure.data.cosmos.internal.directconnectivity.StoreResult; +import com.azure.data.cosmos.internal.directconnectivity.TimeoutHelper; +import com.azure.data.cosmos.internal.routing.PartitionKeyInternal; + +import java.net.URL; +import java.util.List; + +public class DocumentServiceRequestContext implements Cloneable{ + public volatile boolean forceAddressRefresh; + public volatile boolean forceRefreshAddressCache; + public volatile RequestChargeTracker requestChargeTracker; + public volatile TimeoutHelper timeoutHelper; + public volatile String resolvedCollectionRid; + public volatile ISessionToken sessionToken; + public volatile long quorumSelectedLSN; + public volatile long globalCommittedSelectedLSN; + public volatile StoreResponse globalStrongWriteResponse; + public volatile ConsistencyLevel originalRequestConsistencyLevel; + public volatile PartitionKeyRange resolvedPartitionKeyRange; + public volatile Integer regionIndex; + public volatile Boolean usePreferredLocations; + public volatile Integer locationIndexToRoute; + public volatile URL locationEndpointToRoute; + public volatile boolean performedBackgroundAddressRefresh; + public volatile boolean performLocalRefreshOnGoneException; + public volatile List storeResponses; + public volatile StoreResult quorumSelectedStoreResponse; + public volatile PartitionKeyInternal effectivePartitionKey; + public volatile CosmosResponseDiagnostics cosmosResponseDiagnostics; + + /** + * Sets routing directive for GlobalEndpointManager to resolve the request + * to endpoint based on location index. + * + * @param locationIndex Index of the location to which the request should be routed. + * @param usePreferredLocations Use preferred locations to route request. + */ + public void RouteToLocation(int locationIndex, boolean usePreferredLocations) { + this.locationIndexToRoute = locationIndex; + this.usePreferredLocations = usePreferredLocations; + this.locationEndpointToRoute = null; + } + + /** + * Sets location-based routing directive for GlobalEndpointManager to resolve + * the request to given locationEndpoint. + * + * @param locationEndpoint Location endpoint to which the request should be routed. + */ + public void RouteToLocation(URL locationEndpoint) { + this.locationEndpointToRoute = locationEndpoint; + this.locationIndexToRoute = null; + this.usePreferredLocations = null; + } + + /** + * Clears location-based routing directive + */ + public void ClearRouteToLocation() { + this.locationIndexToRoute = null; + this.locationEndpointToRoute = null; + this.usePreferredLocations = null; + } + + @Override + public DocumentServiceRequestContext clone() { + DocumentServiceRequestContext context = new DocumentServiceRequestContext(); + context.forceAddressRefresh = this.forceAddressRefresh; + context.forceRefreshAddressCache = this.forceRefreshAddressCache; + context.requestChargeTracker = this.requestChargeTracker; + context.timeoutHelper = this.timeoutHelper; + context.resolvedCollectionRid = this.resolvedCollectionRid; + context.sessionToken = this.sessionToken; + context.quorumSelectedLSN = this.quorumSelectedLSN; + context.globalCommittedSelectedLSN = this.globalCommittedSelectedLSN; + context.globalStrongWriteResponse = this.globalStrongWriteResponse; + context.originalRequestConsistencyLevel = this.originalRequestConsistencyLevel; + context.resolvedPartitionKeyRange = this.resolvedPartitionKeyRange; + context.regionIndex = this.regionIndex; + context.usePreferredLocations = this.usePreferredLocations; + context.locationIndexToRoute = this.locationIndexToRoute; + context.locationEndpointToRoute = this.locationEndpointToRoute; + context.performLocalRefreshOnGoneException = this.performLocalRefreshOnGoneException; + context.effectivePartitionKey = this.effectivePartitionKey; + context.performedBackgroundAddressRefresh = this.performedBackgroundAddressRefresh; + context.cosmosResponseDiagnostics = this.cosmosResponseDiagnostics; + + return context; + } +} + diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/EnumerationDirection.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/EnumerationDirection.java new file mode 100644 index 0000000000000..4515312aadb21 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/EnumerationDirection.java @@ -0,0 +1,37 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal; + +public enum EnumerationDirection { + /** + * Use forward direction + */ + Forward, + + /** + * Use reverse direction + */ + Reverse +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Exceptions.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Exceptions.java new file mode 100644 index 0000000000000..c45993030547e --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Exceptions.java @@ -0,0 +1,50 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.CosmosClientException; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class Exceptions { + + public static boolean isStatusCode(CosmosClientException e, int status) { + return status == e.statusCode(); + } + + public static boolean isSubStatusCode(CosmosClientException e, int subStatus) { + return subStatus == e.subStatusCode(); + } + + public static boolean isPartitionSplit(CosmosClientException e) { + return isStatusCode(e, HttpConstants.StatusCodes.GONE) + && isSubStatusCode(e, HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE); + } + + public static boolean isNameCacheStale(CosmosClientException e) { + return isStatusCode(e, HttpConstants.StatusCodes.GONE) + && isSubStatusCode(e, HttpConstants.SubStatusCodes.NAME_CACHE_IS_STALE); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/FanoutOperationState.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/FanoutOperationState.java new file mode 100644 index 0000000000000..862ed362c944c --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/FanoutOperationState.java @@ -0,0 +1,37 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal; + +public enum FanoutOperationState { + /** + * Fanout operation started + */ + Started, + + /** + * Fanout operation completed + */ + Completed +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/GlobalEndpointManager.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/GlobalEndpointManager.java new file mode 100644 index 0000000000000..90d5957302a0c --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/GlobalEndpointManager.java @@ -0,0 +1,260 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.internal.routing.LocationCache; +import com.azure.data.cosmos.internal.routing.LocationHelper; +import org.apache.commons.collections4.list.UnmodifiableList; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Scheduler; +import reactor.core.scheduler.Schedulers; + +import java.net.URISyntaxException; +import java.net.URL; +import java.time.Duration; +import java.time.LocalDateTime; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; + +/** + * Endpoint region cache manager implementation. Supports cross region address routing based on + * availability and preference list. + */ +public class GlobalEndpointManager implements AutoCloseable { + private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); + + private final int backgroundRefreshLocationTimeIntervalInMS; + private final LocationCache locationCache; + private final URL defaultEndpoint; + private final ConnectionPolicy connectionPolicy; + private final DatabaseAccountManagerInternal owner; + private final AtomicBoolean isRefreshing; + private final ExecutorService executor = Executors.newSingleThreadExecutor(); + private final Scheduler scheduler = Schedulers.fromExecutor(executor); + private volatile boolean isClosed; + + public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { + this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; + try { + this.locationCache = new LocationCache( + new ArrayList<>(connectionPolicy.preferredLocations() != null ? + connectionPolicy.preferredLocations(): + Collections.emptyList() + ), + owner.getServiceEndpoint().toURL(), + connectionPolicy.enableEndpointDiscovery(), + BridgeInternal.getUseMultipleWriteLocations(connectionPolicy), + configs); + + this.owner = owner; + this.defaultEndpoint = owner.getServiceEndpoint().toURL(); + this.connectionPolicy = connectionPolicy; + + this.isRefreshing = new AtomicBoolean(false); + this.isClosed = false; + } catch (Exception e) { + throw new IllegalArgumentException(e); + } + } + + public void init() { + // TODO: add support for openAsync + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/332589 + startRefreshLocationTimerAsync(true).block(); + } + + public UnmodifiableList getReadEndpoints() { + // readonly + return this.locationCache.getReadEndpoints(); + } + + public UnmodifiableList getWriteEndpoints() { + //readonly + return this.locationCache.getWriteEndpoints(); + } + + public static Mono getDatabaseAccountFromAnyLocationsAsync( + URL defaultEndpoint, List locations, Function> getDatabaseAccountFn) { + + return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( + e -> { + logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); + if (locations.isEmpty()) { + return Mono.error(e); + } + + Flux> obs = Flux.range(0, locations.size()) + .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); + + // iterate and get the database account from the first non failure, otherwise get the last error. + Mono res = Flux.concatDelayError(obs).take(1).single(); + return res.doOnError( + innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); + }); + } + + public URL resolveServiceEndpoint(RxDocumentServiceRequest request) { + return this.locationCache.resolveServiceEndpoint(request); + } + + public void markEndpointUnavailableForRead(URL endpoint) { + logger.debug("Marking endpoint {} unavailable for read",endpoint); + this.locationCache.markEndpointUnavailableForRead(endpoint);; + } + + public void markEndpointUnavailableForWrite(URL endpoint) { + logger.debug("Marking endpoint {} unavailable for Write",endpoint); + this.locationCache.markEndpointUnavailableForWrite(endpoint); + } + + public boolean CanUseMultipleWriteLocations(RxDocumentServiceRequest request) { + return this.locationCache.canUseMultipleWriteLocations(request); + } + + public void close() { + this.isClosed = true; + this.executor.shutdown(); + logger.debug("GlobalEndpointManager closed."); + } + + public Mono refreshLocationAsync(DatabaseAccount databaseAccount) { + return Mono.defer(() -> { + logger.debug("refreshLocationAsync() invoked"); + if (!isRefreshing.compareAndSet(false, true)) { + logger.debug("in the middle of another refresh. Not invoking a new refresh."); + return Mono.empty(); + } + + logger.debug("will refresh"); + return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); + }); + } + + private Mono refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { + return Mono.defer(() -> { + logger.debug("refreshLocationPrivateAsync() refreshing locations"); + + if (databaseAccount != null) { + this.locationCache.onDatabaseAccountRead(databaseAccount); + } + + Utils.ValueHolder canRefreshInBackground = new Utils.ValueHolder<>(); + if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { + logger.debug("shouldRefreshEndpoints: true"); + + if (databaseAccount == null && !canRefreshInBackground.v) { + logger.debug("shouldRefreshEndpoints: can't be done in background"); + + Mono databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( + this.defaultEndpoint, + new ArrayList<>(this.connectionPolicy.preferredLocations()), + this::getDatabaseAccountAsync); + + return databaseAccountObs.map(dbAccount -> { + this.locationCache.onDatabaseAccountRead(dbAccount); + return dbAccount; + }).flatMap(dbAccount -> { + // trigger a startRefreshLocationTimerAsync don't wait on it. + this.startRefreshLocationTimerAsync(); + return Mono.empty(); + }); + } + + // trigger a startRefreshLocationTimerAsync don't wait on it. + this.startRefreshLocationTimerAsync(); + + return Mono.empty(); + } else { + logger.debug("shouldRefreshEndpoints: false, nothing to do."); + this.isRefreshing.set(false); + return Mono.empty(); + } + }); + } + + private void startRefreshLocationTimerAsync() { + startRefreshLocationTimerAsync(false).subscribe(); + } + + private Mono startRefreshLocationTimerAsync(boolean initialization) { + + if (this.isClosed) { + logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); + // if client is already closed, nothing to be done, just return. + return Mono.empty(); + } + + logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); + LocalDateTime now = LocalDateTime.now(); + + int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; + + return Mono.delay(Duration.ofMillis(delayInMillis)) + .flatMap( + t -> { + if (this.isClosed) { + logger.warn("client already closed"); + // if client is already closed, nothing to be done, just return. + return Mono.empty(); + } + + logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); + Mono databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.preferredLocations()), + this::getDatabaseAccountAsync); + + return databaseAccountObs.flatMap(dbAccount -> { + logger.debug("db account retrieved"); + return this.refreshLocationPrivateAsync(dbAccount); + }); + }).onErrorResume(ex -> { + logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); + + this.startRefreshLocationTimerAsync(); + return Mono.empty(); + }).subscribeOn(scheduler); + } + + private Mono getDatabaseAccountAsync(URL serviceEndpoint) { + try { + return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint.toURI()) + .doOnNext(i -> logger.debug("account retrieved: {}", i)).single(); + } catch (URISyntaxException e) { + return Mono.error(e); + } + } + + public boolean isClosed() { + return this.isClosed; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/HttpConstants.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/HttpConstants.java new file mode 100644 index 0000000000000..d1a31ecd600af --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/HttpConstants.java @@ -0,0 +1,326 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +/** + * Used internally. HTTP constants in the Azure Cosmos DB database service Java + * SDK. + */ +public class HttpConstants { + public static class HttpMethods { + public static final String GET = "GET"; + public static final String POST = "POST"; + public static final String PUT = "PUT"; + public static final String DELETE = "DELETE"; + public static final String HEAD = "HEAD"; + public static final String OPTIONS = "OPTIONS"; + public static final String PATCH = "PATCH"; + } + + public static class QueryStrings { + public static final String URL = "$resolveFor"; + public static final String FILTER = "$filter"; + public static final String PARTITION_KEY_RANGE_IDS = "$partitionKeyRangeIds"; + } + + public static class HttpHeaders { + public static final String AUTHORIZATION = "authorization"; + public static final String E_TAG = "etag"; + public static final String METHOD_OVERRIDE = "X-HTTP-Method"; + public static final String SLUG = "Slug"; + public static final String CONTENT_TYPE = "Content-Type"; + public static final String LAST_MODIFIED = "Last-Modified"; + public static final String CONTENT_ENCODING = "Content-Encoding"; + public static final String CHARACTER_SET = "CharacterSet"; + public static final String USER_AGENT = "User-Agent"; + public static final String IF_MODIFIED_SINCE = "If-Modified-Since"; + public static final String IF_MATCH = "If-Match"; + public static final String IF_NONE_MATCH = "If-NONE-Match"; + public static final String CONTENT_LENGTH = "Content-Length"; + public static final String ACCEPT_ENCODING = "Accept-Encoding"; + public static final String KEEP_ALIVE = "Keep-Alive"; + public static final String CONNECTION = "Connection"; + public static final String CACHE_CONTROL = "Cache-Control"; + public static final String TRANSFER_ENCODING = "Transfer-Encoding"; + public static final String CONTENT_LANGUAGE = "Content-Language"; + public static final String CONTENT_LOCATION = "Content-Location"; + public static final String CONTENT_MD5 = "Content-Md5"; + public static final String CONTENT_RANGE = "Content-RANGE"; + public static final String ACCEPT = "Accept"; + public static final String ACCEPT_CHARSET = "Accept-Charset"; + public static final String ACCEPT_LANGUAGE = "Accept-Language"; + public static final String IF_RANGE = "If-RANGE"; + public static final String IF_UNMODIFIED_SINCE = "If-Unmodified-Since"; + public static final String MAX_FORWARDS = "Max-Forwards"; + public static final String PROXY_AUTHORIZATION = "Proxy-Authorization"; + public static final String ACCEPT_RANGES = "Accept-Ranges"; + public static final String PROXY_AUTHENTICATE = "Proxy-Authenticate"; + public static final String RETRY_AFTER = "Retry-After"; + public static final String SET_COOKIE = "Set-Cookie"; + public static final String WWW_AUTHENTICATE = "Www-Authenticate"; + public static final String ORIGIN = "Origin"; + public static final String HOST = "Host"; + public static final String ACCESS_CONTROL_ALLOW_ORIGIN = "Access-Control-Allow-Origin"; + public static final String ACCESS_CONTROL_ALLOW_HEADERS = "Access-Control-Allow-Headers"; + public static final String KEY_VALUE_ENCODING_FORMAT = "application/x-www-form-urlencoded"; + public static final String WRAP_ASSERTION_FORMAT = "wrap_assertion_format"; + public static final String WRAP_ASSERTION = "wrap_assertion"; + public static final String WRAP_SCOPE = "wrap_scope"; + public static final String SIMPLE_TOKEN = "SWT"; + public static final String HTTP_DATE = "date"; + public static final String PREFER = "Prefer"; + public static final String LOCATION = "Location"; + public static final String REFERER = "referer"; + + // Query + public static final String QUERY = "x-ms-documentdb-query"; + public static final String IS_QUERY = "x-ms-documentdb-isquery"; + public static final String ENABLE_CROSS_PARTITION_QUERY = "x-ms-documentdb-query-enablecrosspartition"; + public static final String PARALLELIZE_CROSS_PARTITION_QUERY = "x-ms-documentdb-query-parallelizecrosspartitionquery"; + + // Our custom DocDB headers + public static final String CONTINUATION = "x-ms-continuation"; + public static final String PAGE_SIZE = "x-ms-max-item-count"; + public static final String RESPONSE_CONTINUATION_TOKEN_LIMIT_IN_KB = "x-ms-documentdb-responsecontinuationtokenlimitinkb"; + + // Request sender generated. Simply echoed by backend. + public static final String ACTIVITY_ID = "x-ms-activity-id"; + public static final String PRE_TRIGGER_INCLUDE = "x-ms-documentdb-pre-trigger-include"; + public static final String PRE_TRIGGER_EXCLUDE = "x-ms-documentdb-pre-trigger-exclude"; + public static final String POST_TRIGGER_INCLUDE = "x-ms-documentdb-post-trigger-include"; + public static final String POST_TRIGGER_EXCLUDE = "x-ms-documentdb-post-trigger-exclude"; + public static final String INDEXING_DIRECTIVE = "x-ms-indexing-directive"; + public static final String SESSION_TOKEN = "x-ms-session-token"; + public static final String CONSISTENCY_LEVEL = "x-ms-consistency-level"; + public static final String X_DATE = "x-ms-date"; + public static final String COLLECTION_PARTITION_INFO = "x-ms-collection-partition-info"; + public static final String COLLECTION_SERVICE_INFO = "x-ms-collection-service-info"; + public static final String RETRY_AFTER_IN_MILLISECONDS = "x-ms-retry-after-ms"; + public static final String IS_FEED_UNFILTERED = "x-ms-is-feed-unfiltered"; + public static final String RESOURCE_TOKEN_EXPIRY = "x-ms-documentdb-expiry-seconds"; + public static final String ENABLE_SCAN_IN_QUERY = "x-ms-documentdb-query-enable-scan"; + public static final String EMIT_VERBOSE_TRACES_IN_QUERY = "x-ms-documentdb-query-emit-traces"; + + // target lsn for head requests + public static final String TARGET_LSN = "x-ms-target-lsn"; + public static final String TARGET_GLOBAL_COMMITTED_LSN = "x-ms-target-global-committed-lsn"; + + // Request validation + public static final String REQUEST_VALIDATION_FAILURE = "x-ms-request-validation-failure"; + + public static final String WRITE_REQUEST_TRIGGER_ADDRESS_REFRESH = "x-ms-write-request-trigger-refresh"; + + // Quota Info + public static final String MAX_RESOURCE_QUOTA = "x-ms-resource-quota"; + public static final String CURRENT_RESOURCE_QUOTA_USAGE = "x-ms-resource-usage"; + public static final String MAX_MEDIA_STORAGE_USAGE_IN_MB = "x-ms-max-media-storage-usage-mb"; + + // Usage Info + public static final String REQUEST_CHARGE = "x-ms-request-charge"; + public static final String CURRENT_MEDIA_STORAGE_USAGE_IN_MB = "x-ms-media-storage-usage-mb"; + public static final String DATABASE_ACCOUNT_CONSUMED_DOCUMENT_STORAGE_IN_MB = "x-ms-databaseaccount-consumed-mb"; + public static final String DATABASE_ACCOUNT_RESERVED_DOCUMENT_STORAGE_IN_MB = "x-ms-databaseaccount-reserved-mb"; + public static final String DATABASE_ACCOUNT_PROVISIONED_DOCUMENT_STORAGE_IN_MB = "x-ms-databaseaccount-provisioned-mb"; + + // Address related headers. + public static final String FORCE_REFRESH = "x-ms-force-refresh"; + public static final String ITEM_COUNT = "x-ms-item-count"; + public static final String NEW_RESOURCE_ID = "x-ms-new-resource-id"; + public static final String USE_MASTER_COLLECTION_RESOLVER = "x-ms-use-master-collection-resolver"; + + // Admin Headers + public static final String FULL_UPGRADE = "x-ms-force-full-upgrade"; + public static final String ONLY_UPGRADE_SYSTEM_APPLICATIONS = "x-ms-only-upgrade-system-applications"; + public static final String ONLY_UPGRADE_NON_SYSTEM_APPLICATIONS = "x-ms-only-upgrade-non-system-applications"; + public static final String UPGRADE_FABRIC_RING_CODE_AND_CONFIG = "x-ms-upgrade-fabric-code-config"; + public static final String IGNORE_IN_PROGRESS_UPGRADE = "x-ms-ignore-inprogress-upgrade"; + public static final String UPGRADE_VERIFICATION_KIND = "x-ms-upgrade-verification-kind"; + public static final String IS_CANARY = "x-ms-iscanary"; + public static final String FORCE_DELETE = "x-ms-force-delete"; + + // Version headers and values + public static final String VERSION = "x-ms-version"; + public static final String SCHEMA_VERSION = "x-ms-schemaversion"; + public static final String SERVER_VERSION = "x-ms-serviceversion"; + public static final String GATEWAY_VERSION = "x-ms-gatewayversion"; + + // RDFE Resource Provider headers + public static final String OCP_RESOURCE_PROVIDER_REGISTERED_URI = "ocp-resourceprovider-registered-uri"; + + // For Document service management operations only. This is in + // essence a 'handle' to (long running) operations. + public static final String REQUEST_ID = "x-ms-request-id"; + + // Object returning this determines what constitutes state and what + // last state change means. For replica, it is the last role change. + public static final String LAST_STATE_CHANGE_UTC = "x-ms-last-state-change-utc"; + + // CSM specific headers + // Client-request-id: Optional caller-specified request ID, in the form + // of a GUID + public static final String CLIENT_REQUEST_ID = "x-ms-client-request-id"; + + // Offer header + public static final String OFFER_TYPE = "x-ms-offer-type"; + public static final String OFFER_THROUGHPUT = "x-ms-offer-throughput"; + public static final String OFFER_IS_RU_PER_MINUTE_THROUGHPUT_ENABLED = "x-ms-offer-is-ru-per-minute-throughput-enabled"; + + // Upsert header + public static final String IS_UPSERT = "x-ms-documentdb-is-upsert"; + + // Index progress headers + public static final String INDEX_TRANSFORMATION_PROGRESS = "x-ms-documentdb-collection-index-transformation-progress"; + public static final String LAZY_INDEXING_PROGRESS = "x-ms-documentdb-collection-lazy-indexing-progress"; + + // Owner name + public static final String OWNER_FULL_NAME = "x-ms-alt-content-path"; + + // Owner ID used for name based request in session token. + public static final String OWNER_ID = "x-ms-content-path"; + + // Partition headers + public static final String PARTITION_KEY = "x-ms-documentdb-partitionkey"; + public static final String PARTITION_KEY_RANGE_ID = "x-ms-documentdb-partitionkeyrangeid"; + + // Error response sub status code + public static final String SUB_STATUS = "x-ms-substatus"; + + public static final String LSN = "lsn"; + + // CUSTOM DocDB JavaScript logging headers + public static final String SCRIPT_ENABLE_LOGGING = "x-ms-documentdb-script-enable-logging"; + public static final String SCRIPT_LOG_RESULTS = "x-ms-documentdb-script-log-results"; + + // Collection quota + public static final String POPULATE_QUOTA_INFO = "x-ms-documentdb-populatequotainfo"; + + // ChangeFeed + public static final String A_IM = "A-IM"; + public static final String ALLOW_TENTATIVE_WRITES = "x-ms-cosmos-allow-tentative-writes"; + + // These properties were added to support RNTBD and they've been added here to + // reduce merge conflicts + + public static final String CAN_CHARGE = "x-ms-cancharge"; + public static final String CAN_OFFER_REPLACE_COMPLETE = "x-ms-can-offer-replace-complete"; + public static final String CAN_THROTTLE = "x-ms-canthrottle"; + public static final String CLIENT_RETRY_ATTEMPT_COUNT = "x-ms-client-retry-attempt-count"; + public static final String COLLECTION_INDEX_TRANSFORMATION_PROGRESS = "x-ms-documentdb-collection-index-transformation-progress"; + public static final String COLLECTION_LAZY_INDEXING_PROGRESS = "x-ms-documentdb-collection-lazy-indexing-progress"; + public static final String COLLECTION_REMOTE_STORAGE_SECURITY_IDENTIFIER = "x-ms-collection-security-identifier"; + public static final String CONTENT_SERIALIZATION_FORMAT = "x-ms-documentdb-content-serialization-format"; + public static final String DISABLE_RNTBD_CHANNEL = "x-ms-disable-rntbd-channel"; + public static final String DISABLE_RU_PER_MINUTE_USAGE = "x-ms-documentdb-disable-ru-per-minute-usage"; + public static final String ENABLE_LOGGING = "x-ms-documentdb-script-enable-logging"; + public static final String ENABLE_LOW_PRECISION_ORDER_BY = "x-ms-documentdb-query-enable-low-precision-order-by"; + public static final String END_EPK = "x-ms-end-epk"; + public static final String END_ID = "x-ms-end-id"; + public static final String ENUMERATION_DIRECTION = "x-ms-enumeration-direction"; + public static final String FILTER_BY_SCHEMA_RESOURCE_ID = "x-ms-documentdb-filterby-schema-rid"; + public static final String FORCE_QUERY_SCAN = "x-ms-documentdb-force-query-scan"; + public static final String GATEWAY_SIGNATURE = "x-ms-gateway-signature"; + public static final String IS_AUTO_SCALE_REQUEST = "x-ms-is-auto-scale"; + public static final String IS_READ_ONLY_SCRIPT = "x-ms-is-readonly-script"; + public static final String LOG_RESULTS = "x-ms-documentdb-script-log-results"; + public static final String MIGRATE_COLLECTION_DIRECTIVE = "x-ms-migratecollection-directive"; + public static final String POPULATE_COLLECTION_THROUGHPUT_INFO = "x-ms-documentdb-populatecollectionthroughputinfo"; + public static final String POPULATE_PARTITION_STATISTICS = "x-ms-documentdb-populatepartitionstatistics"; + public static final String POPULATE_QUERY_METRICS = "x-ms-documentdb-populatequerymetrics"; + public static final String PROFILE_REQUEST = "x-ms-profile-request"; + public static final String READ_FEED_KEY_TYPE = "x-ms-read-key-type"; + public static final String REMAINING_TIME_IN_MS_ON_CLIENT_REQUEST = "x-ms-remaining-time-in-ms-on-client"; + public static final String RESTORE_METADATA_FILTER = "x-ms-restore-metadata-filter"; + public static final String SHARED_OFFER_THROUGHPUT = "x-ms-cosmos-shared-offer-throughput"; + public static final String START_EPK = "x-ms-start-epk"; + public static final String START_ID = "x-ms-start-id"; + public static final String SUPPORT_SPATIAL_LEGACY_COORDINATES = "x-ms-documentdb-supportspatiallegacycoordinates"; + public static final String TRANSPORT_REQUEST_ID = "x-ms-transport-request-id"; + public static final String USE_POLYGONS_SMALLER_THAN_AHEMISPHERE = "x-ms-documentdb-usepolygonssmallerthanahemisphere"; + public static final String API_TYPE = "x-ms-cosmos-apitype"; + public static final String QUERY_METRICS = "x-ms-documentdb-query-metrics"; + + } + + public static class A_IMHeaderValues { + public static final String INCREMENTAL_FEED = "Incremental Feed"; + } + + public static class Versions { + public static final String CURRENT_VERSION = "2018-12-31"; + + // TODO: FIXME we can use maven plugin for generating a version file + // @see + // https://stackoverflow.com/questions/2469922/generate-a-version-java-file-in-maven + public static final String SDK_VERSION = "3.0.0"; + public static final String SDK_NAME = "cosmosdb-java-sdk"; + } + + public static class StatusCodes { + public static final int NOT_MODIFIED = 304; + // Client error + public static final int MINIMUM_STATUSCODE_AS_ERROR_GATEWAY = 400; + public static final int BADREQUEST = 400; + public static final int UNAUTHORIZED = 401; + public static final int FORBIDDEN = 403; + public static final int NOTFOUND = 404; + public static final int METHOD_NOT_ALLOWED = 405; + public static final int REQUEST_TIMEOUT = 408; + public static final int CONFLICT = 409; + public static final int GONE = 410; + public static final int PRECONDITION_FAILED = 412; + public static final int REQUEST_ENTITY_TOO_LARGE = 413; + public static final int LOCKED = 423; + public static final int TOO_MANY_REQUESTS = 429; + public static final int RETRY_WITH = 449; + + public static final int SERVICE_UNAVAILABLE = 503; + public static final int INTERNAL_SERVER_ERROR = 500; + } + + public static class SubStatusCodes { + // Unknown SubStatus Code + public static final int UNKNOWN = 0; + + // 400: Bad Request substatus + public static final int PARTITION_KEY_MISMATCH = 1001; + public static final int CROSS_PARTITION_QUERY_NOT_SERVABLE = 1004; + + // 410: StatusCodeType_Gone: substatus + public static final int NAME_CACHE_IS_STALE = 1000; + public static final int PARTITION_KEY_RANGE_GONE = 1002; + public static final int COMPLETING_SPLIT = 1007; + public static final int COMPLETING_PARTITION_MIGRATION = 1008; + + // 403: Forbidden substatus + public static final int FORBIDDEN_WRITEFORBIDDEN = 3; + public static final int DATABASE_ACCOUNT_NOTFOUND = 1008; + + // 404: LSN in session token is higher + public static final int READ_SESSION_NOT_AVAILABLE = 1002; + } + + public static class HeaderValues { + public static final String NoCache = "no-cache"; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/IAuthorizationTokenProvider.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/IAuthorizationTokenProvider.java new file mode 100644 index 0000000000000..f7de0a86be903 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/IAuthorizationTokenProvider.java @@ -0,0 +1,34 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import java.util.Map; + +public interface IAuthorizationTokenProvider { + String getUserAuthorizationToken(String resourceAddress, + ResourceType resourceType, + String get, + Map headers, + AuthorizationTokenType primarymasterkey, + Map properties); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ICollectionRoutingMapCache.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ICollectionRoutingMapCache.java new file mode 100644 index 0000000000000..1ac11e218facb --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ICollectionRoutingMapCache.java @@ -0,0 +1,48 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.internal.routing.CollectionRoutingMap; +import reactor.core.publisher.Mono; + +import java.util.Map; + +// TODO: add documentation +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + **/ +public interface ICollectionRoutingMapCache { + default Mono tryLookupAsync( + String collectionRid, + CollectionRoutingMap previousValue, + Map properties) { + return tryLookupAsync(collectionRid, previousValue, false, properties); + } + + Mono tryLookupAsync( + String collectionRid, + CollectionRoutingMap previousValue, + boolean forceRefreshCollectionRoutingMap, + Map properties); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/IDocumentClientRetryPolicy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/IDocumentClientRetryPolicy.java new file mode 100644 index 0000000000000..b1ce820c586d8 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/IDocumentClientRetryPolicy.java @@ -0,0 +1,70 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import reactor.core.publisher.Mono; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public interface IDocumentClientRetryPolicy extends IRetryPolicy { + + // TODO: this is just a place holder for now. As .Net has this method. + // I have to spend more time figure out what's the right pattern for this (if method needed) + + ///

+ /// Method that is called before a request is sent to allow the retry policy implementation + /// to modify the state of the request. + /// + /// The request being sent to the service. + /// + /// Currently only read operations will invoke this method. There is no scenario for write + /// operations to modify requests before retrying. + /// + + // TODO: I need to investigate what's the right contract here and/or if/how this is useful + void onBeforeSendRequest(RxDocumentServiceRequest request); + + + class NoRetry implements IDocumentClientRetryPolicy { + + private static NoRetry instance = new NoRetry(); + + private NoRetry() {} + + public static NoRetry getInstance() { + return instance; + } + + @Override + public void onBeforeSendRequest(RxDocumentServiceRequest request) { + // no op + } + + @Override + public Mono shouldRetry(Exception e) { + return Mono.just(ShouldRetryResult.error(e)); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/IRetryPolicy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/IRetryPolicy.java new file mode 100644 index 0000000000000..84e753bf33d03 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/IRetryPolicy.java @@ -0,0 +1,97 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import reactor.core.publisher.Mono; + +import java.time.Duration; + +// TODO update documentation +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public interface IRetryPolicy { + // this capture all the retry logic + // TODO: design decision should this return a single or an observable? + + /// + /// Method that is called to determine from the policy that needs to retry on the exception + /// + /// Exception during the callback method invocation + /// + /// If the retry needs to be attempted or not + Mono shouldRetry(Exception e); + + + class ShouldRetryResult { + /// + /// How long to wait before next retry. 0 indicates retry immediately. + /// + public final Duration backOffTime; + public final Exception exception; + public boolean shouldRetry; + public final Quadruple policyArg; + + private ShouldRetryResult(Duration dur, Exception e, boolean shouldRetry, + Quadruple policyArg) { + this.backOffTime = dur; + this.exception = e; + this.shouldRetry = shouldRetry; + this.policyArg = policyArg; + } + + public static ShouldRetryResult retryAfter(Duration dur) { + Utils.checkNotNullOrThrow(dur, "duration", "cannot be null"); + return new ShouldRetryResult(dur, null, true, null); + } + + public static ShouldRetryResult retryAfter(Duration dur, + Quadruple policyArg) { + Utils.checkNotNullOrThrow(dur, "duration", "cannot be null"); + return new ShouldRetryResult(dur, null, true, policyArg); + } + + public static ShouldRetryResult error(Exception e) { + Utils.checkNotNullOrThrow(e, "exception", "cannot be null"); + return new ShouldRetryResult(null, e, false, null); + } + + public static ShouldRetryResult noRetry() { + return new ShouldRetryResult(null, null, false, null); + } + + public void throwIfDoneTrying(Exception capturedException) throws Exception { + if (this.shouldRetry) { + return; + } + + if (this.exception == null) { + throw capturedException; + } else { + throw this.exception; + } + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/IRetryPolicyFactory.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/IRetryPolicyFactory.java new file mode 100644 index 0000000000000..a13ba894372de --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/IRetryPolicyFactory.java @@ -0,0 +1,31 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public interface IRetryPolicyFactory { + IDocumentClientRetryPolicy getRequestPolicy(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/IRoutingMapProvider.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/IRoutingMapProvider.java new file mode 100644 index 0000000000000..6e940ba093cd6 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/IRoutingMapProvider.java @@ -0,0 +1,50 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.internal.routing.Range; +import reactor.core.publisher.Mono; + +import java.util.List; +import java.util.Map; + +//TODO: update documentation +//TODO: add two overload methods for forceRefresh = false +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + **/ +public interface IRoutingMapProvider { + /// + /// Returns list of effective partition key ranges for a collection. + /// + /// Collection for which to retrieve routing map. + /// This method will return all ranges which overlap this range. + /// Whether forcefully refreshing the routing map is necessary + /// List of effective partition key ranges for a collection or null if collection doesn't exist. + Mono> tryGetOverlappingRangesAsync(String collectionResourceId, Range range, + boolean forceRefresh /* = false */, Map properties); + + Mono tryGetPartitionKeyRangeByIdAsync(String collectionResourceId, String partitionKeyRangeId, + boolean forceRefresh /* = false */, Map properties); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ISessionContainer.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ISessionContainer.java new file mode 100644 index 0000000000000..ed47596aff626 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ISessionContainer.java @@ -0,0 +1,89 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.internal.ISessionToken; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; + +import java.util.Map; + +public interface ISessionContainer { + /** + * Returns a serialized map of partitionKeyRangeId to session token. If a entity is name based then the method extracts name from + * ResourceAddress and use it to identify collection otherwise it uses ResourceId. Returns empty string if collection is unknown + * @param entity {@link RxDocumentServiceRequest entity} + * @return serialzed map of partitionKeyRangeId to session token or empty string is collection is unknown + */ + String resolveGlobalSessionToken(RxDocumentServiceRequest entity); + + /** + * Returns a session token identified by partitionKeyRangeId(*) from a collection identified either by ResourceAddress + * (in case of name based entity) or either by ResourceId. + * + * If partitionKeyRangeId is not in the collection's partitionKeyRangeId token map then method + * iterates through request.RequestContext.ResolvedPartitionKeyRange.Parents starting from tail and + * returns a corresponding token if there is a match. + * @param entity {@link RxDocumentServiceRequest} + * @param partitionKeyRangeId partitionKeyRangeId + * @return Returns a session token identified by partitionKeyRangeId(*) from a collection identified either by ResourceAddress + * (in case of name based entity) or either by ResourceId. + */ + ISessionToken resolvePartitionLocalSessionToken(RxDocumentServiceRequest entity, String partitionKeyRangeId); + + /** + * Atomically: removes partitionKeyRangeId token map associated with resourceId, + * maps resourceId to collectionFullName and removes its map as well + * @param resourceId resourceId + */ + void clearTokenByResourceId(String resourceId); + + /** + * Atomically: removes partitionKeyRangeId token map associated with collectionFullName, maps collectionFullName to resourceId and + * removes its map as well. + * @param collectionFullName collectionFullName + */ + void clearTokenByCollectionFullName(String collectionFullName); + + /** + * Infers collectionFullName using responseHeaders[HttpConstants.HttpHeaders.OwnerFullName] or request.ResourceAddress, + * infers resourceId using responseHeaders[HttpConstants.HttpHeaders.OwnerId] or request.ResourceId, + * and adds responseHeaders[HttpConstants.HttpHeaders.SessionToken] session token to the (collectionFullName, resourceId)'s + * partitionKeyRangeId token map. + * + * NB: Silently does nothing for master queries, or when it's impossible to infer collectionRid and collectionFullName + * from the request, or then SessionToken is missing in responseHeader. + * + * @param request {@link RxDocumentServiceRequest} + * @param responseHeaders responseHeaders + */ + void setSessionToken(RxDocumentServiceRequest request, Map responseHeaders); + + /** + * Adds responseHeaders[HttpConstants.HttpHeaders.SessionToken] session token to the (collectionFullName, collectionRid)'s partitionKeyRangeId token map. + * @param collectionRid collectionRid + * @param collectionFullName collectionFullName + * @param responseHeaders responseHeaders + */ + void setSessionToken(String collectionRid, String collectionFullName, Map responseHeaders); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ISessionToken.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ISessionToken.java new file mode 100644 index 0000000000000..49c6b383e98da --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ISessionToken.java @@ -0,0 +1,63 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.CosmosClientException; + +/** + * Models session token. + * + * We make assumption that instances of this interface are immutable (read only after they are constructed), so if you want to change + * this behaviour please review all of its uses and make sure that mutability doesn't break anything. + */ +public interface ISessionToken { + + String PARTITION_KEY_RANGE_SESSION_SEPARATOR = ":"; + + /** + * Returns true if this instance of session token is valid with respect to other session token. + * This is used to decide if the client can accept server's response (based on comparison between client's + * and server's session token) + * + * @param other SESSION token to validate + * @return true if this instance of session token is valid with respect to other session token; + * false otherwise + */ + boolean isValid(ISessionToken other) throws CosmosClientException; + + /** + * Returns a new instance of session token obtained by merging this session token with + * the given session token other. + * + * Merge is commutative operation, so a.Merge(b).Equals(b.Merge(a)) + * + * @param other Other session token to merge + * @return Instance of merged session token + */ + ISessionToken merge(ISessionToken other) throws CosmosClientException; + + long getLSN(); + + String convertToString(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Integers.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Integers.java new file mode 100644 index 0000000000000..2369c1c815118 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Integers.java @@ -0,0 +1,50 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +public class Integers { + public static int tryParse(String value, int defaultValue) { + if (Strings.isNullOrEmpty(value)) { + return defaultValue; + } + + try { + return Integer.valueOf(value); + } catch (NumberFormatException e) { + return defaultValue; + } + } + + public static Integer tryParse(String value) { + if (Strings.isNullOrEmpty(value)) { + return null; + } + + try { + return Integer.valueOf(value); + } catch (NumberFormatException e) { + return null; + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/InternalConstants.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/InternalConstants.java new file mode 100644 index 0000000000000..5f56ba4fbed15 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/InternalConstants.java @@ -0,0 +1,47 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +class InternalConstants { + + static class ResourceKeys { + static final String ATTACHMENTS = "Attachments"; + static final String CONFLICTS = "Conflicts"; + static final String DATABASES = "Databases"; + static final String DOCUMENTS = "Documents"; + static final String DOCUMENT_COLLECTIONS = "DocumentCollections"; + static final String OFFERS = "Offers"; + static final String PERMISSIONS = "Permissions"; + static final String PARTITION_KEY_RANGES = "PartitionKeyRanges"; + static final String TRIGGERS = "Triggers"; + static final String STOREDPROCEDURES = "StoredProcedures"; + static final String USERS = "Users"; + static final String USER_DEFINED_FUNCTIONS = "UserDefinedFunctions"; + static final String ADDRESSES = "Addresss"; + } + + static class StreamApi { + static final int STREAM_LENGTH_EOF = -1; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/InvalidPartitionExceptionRetryPolicy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/InvalidPartitionExceptionRetryPolicy.java new file mode 100644 index 0000000000000..a76203f6e2cea --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/InvalidPartitionExceptionRetryPolicy.java @@ -0,0 +1,88 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.internal.caches.RxCollectionCache; +import reactor.core.publisher.Mono; + +import java.time.Duration; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class InvalidPartitionExceptionRetryPolicy implements IDocumentClientRetryPolicy { + + private final RxCollectionCache clientCollectionCache; + private final IDocumentClientRetryPolicy nextPolicy; + private final String collectionLink; + private final FeedOptions feedOptions; + + private volatile boolean retried = false; + + public InvalidPartitionExceptionRetryPolicy(RxCollectionCache collectionCache, + IDocumentClientRetryPolicy nextPolicy, + String resourceFullName, + FeedOptions feedOptions) { + + this.clientCollectionCache = collectionCache; + this.nextPolicy = nextPolicy; + + // TODO the resource address should be inferred from exception + this.collectionLink = com.azure.data.cosmos.internal.Utils.getCollectionName(resourceFullName); + this.feedOptions = feedOptions; + } + + @Override + public void onBeforeSendRequest(RxDocumentServiceRequest request) { + this.nextPolicy.onBeforeSendRequest(request); + } + + @Override + public Mono shouldRetry(Exception e) { + CosmosClientException clientException = Utils.as(e, CosmosClientException.class); + if (clientException != null && + Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.GONE) && + Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.NAME_CACHE_IS_STALE)) { + if (!this.retried) { + // TODO: resource address should be accessible from the exception + //this.clientCollectionCache.Refresh(clientException.ResourceAddress); + // TODO: this is blocking. is that fine? + if(this.feedOptions != null) { + this.clientCollectionCache.refresh(collectionLink,this.feedOptions.properties()); + } else { + this.clientCollectionCache.refresh(collectionLink,null); + } + + this.retried = true; + return Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO)); + } else { + return Mono.just(ShouldRetryResult.error(e)); + } + } + + return this.nextPolicy.shouldRetry(e); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/JavaStreamUtils.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/JavaStreamUtils.java new file mode 100644 index 0000000000000..5e07f0b845ff3 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/JavaStreamUtils.java @@ -0,0 +1,42 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import java.util.Collection; +import java.util.stream.Collectors; + +public class JavaStreamUtils { + + private static String safeToString(T t) { + return t != null ? t.toString() : "null"; + } + + public static String toString(Collection collection, String delimiter) { + return collection.stream() + .map( t -> safeToString(t) ) + .collect(Collectors.joining(delimiter)); + } + + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/LifeCycleUtils.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/LifeCycleUtils.java new file mode 100644 index 0000000000000..5209b9a1d64f0 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/LifeCycleUtils.java @@ -0,0 +1,41 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class LifeCycleUtils { + private final static Logger logger = LoggerFactory.getLogger(LifeCycleUtils.class); + public static void closeQuietly(AutoCloseable closeable) { + try { + if (closeable != null) { + logger.debug("closing an instance of {}", closeable.getClass().getCanonicalName()); + closeable.close(); + } + } catch (Exception e) { + logger.warn("attempting to close an instance of {} failed", closeable.getClass().getCanonicalName(), e); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Lists.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Lists.java new file mode 100644 index 0000000000000..4287412c33b62 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Lists.java @@ -0,0 +1,33 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import java.util.List; + +public class Lists { + + public static V firstOrDefault(List list, V defaultValue) { + return list.isEmpty() ? defaultValue : list.get(0); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Longs.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Longs.java new file mode 100644 index 0000000000000..757782057e061 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Longs.java @@ -0,0 +1,38 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +public class Longs { + public static long tryParse(String value, long defaultValue) { + if (Strings.isNullOrEmpty(value)) { + return defaultValue; + } + + try { + return Long.valueOf(value); + } catch (NumberFormatException e) { + return defaultValue; + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/MigrateCollectionDirective.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/MigrateCollectionDirective.java new file mode 100644 index 0000000000000..ffead99b1a898 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/MigrateCollectionDirective.java @@ -0,0 +1,37 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal; + +public enum MigrateCollectionDirective { + /** + * Move to SSD + */ + Thaw, + + /** + * Move to HDD + */ + Freeze +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/MutableVolatile.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/MutableVolatile.java new file mode 100644 index 0000000000000..921476855f13d --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/MutableVolatile.java @@ -0,0 +1,34 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +public class MutableVolatile { + + public MutableVolatile(T initValue){ + v = initValue; + } + + public MutableVolatile() {} + public volatile T v; +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ObservableHelper.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ObservableHelper.java new file mode 100644 index 0000000000000..1a5af062b3042 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ObservableHelper.java @@ -0,0 +1,67 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.util.concurrent.Callable; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + * + **/ +public class ObservableHelper { + + static public Mono inlineIfPossible(Callable> function, IRetryPolicy retryPolicy) { + + if (retryPolicy == null) { + // shortcut + try { + return function.call(); + } catch (Exception e) { + return Mono.error(e); + } + } else { + return BackoffRetryUtility.executeRetry(function, retryPolicy); + } + } + + static public Flux inlineIfPossibleAsObs(Callable> function, IRetryPolicy retryPolicy) { + + if (retryPolicy == null) { + // shortcut + return Flux.defer(() -> { + try { + return function.call(); + } catch (Exception e) { + return Flux.error(e); + } + }); + + } else { + return BackoffRetryUtility.executeRetry(() -> function.call().single(), retryPolicy).flux(); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Offer.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Offer.java new file mode 100644 index 0000000000000..22c787aeeaa67 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Offer.java @@ -0,0 +1,155 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.Resource; +import com.fasterxml.jackson.databind.node.ObjectNode; + +/** + * Represents an offer in the Azure Cosmos DB database service. + */ +public class Offer extends Resource { + + /** + * Initialize an new instance of the Offer object. + * + * @param offerThroughput the throughput value for this offer. + */ + public Offer(int offerThroughput) { + super(); + this.setOfferVersion(Constants.Properties.OFFER_VERSION_V2); + this.setOfferType(""); + ObjectNode content = Utils.getSimpleObjectMapper().createObjectNode(); + content.put(Constants.Properties.OFFER_THROUGHPUT, offerThroughput); + this.setContent(content); + } + + /** + * Initialize an offer object from json string. + * + * @param jsonString the json string that represents the offer. + */ + public Offer(String jsonString) { + super(jsonString); + } + + /** + * Gets the self-link of a resource to which the resource offer applies. + * + * @return the resource link. + */ + public String getResourceLink() { + return super.getString(Constants.Properties.RESOURCE_LINK); + } + + /** + * Sets the self-link of a resource to which the resource offer applies. + * + * @param resourceLink the resource link. + */ + void setResourceLink(String resourceLink) { + BridgeInternal.setProperty(this, Constants.Properties.RESOURCE_LINK, resourceLink); + } + + /** + * Sets the target resource id of a resource to which this offer applies. + * + * @return the resource id. + */ + public String getOfferResourceId() { + return super.getString(Constants.Properties.OFFER_RESOURCE_ID); + } + + /** + * Sets the target resource id of a resource to which this offer applies. + * + * @param resourceId the resource id. + */ + void setOfferResourceId(String resourceId) { + BridgeInternal.setProperty(this, Constants.Properties.OFFER_RESOURCE_ID, resourceId); + } + + /** + * Gets the OfferType for the resource offer. + * + * @return the offer type. + */ + public String getOfferType() { + return super.getString(Constants.Properties.OFFER_TYPE); + } + + /** + * Sets the OfferType for the resource offer. + * + * @param offerType the offer type. + */ + public void setOfferType(String offerType) { + BridgeInternal.setProperty(this, Constants.Properties.OFFER_TYPE, offerType); + } + + /** + * Gets the version of the current offer. + * + * @return the offer version. + */ + public String getOfferVersion() { + return super.getString(Constants.Properties.OFFER_VERSION); + } + + /** + * Sets the offer version. + * + * @param offerVersion the version of the offer. + */ + public void setOfferVersion(String offerVersion) { + BridgeInternal.setProperty(this, Constants.Properties.OFFER_VERSION, offerVersion); + } + + /** + * Gets the offer throughput for this offer. + * + * @return the offer throughput. + */ + public int getThroughput() { + return this.getContent().get(Constants.Properties.OFFER_THROUGHPUT).asInt(); + } + + /** + * Sets the offer throughput for this offer. + * + * @param throughput the throughput of this offer. + */ + public void setThroughput(int throughput) { + this.getContent().put(Constants.Properties.OFFER_THROUGHPUT, throughput); + } + + private ObjectNode getContent() { + return BridgeInternal.getObject(this, Constants.Properties.OFFER_CONTENT); + } + + private void setContent(ObjectNode offerContent) { + BridgeInternal.setProperty(this, Constants.Properties.OFFER_CONTENT, offerContent); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/OperationType.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/OperationType.java new file mode 100644 index 0000000000000..6256a7756ef85 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/OperationType.java @@ -0,0 +1,73 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +/** + * Operation types in the Azure Cosmos DB database service. + */ +public enum OperationType { + AbortPartitionMigration, + AbortSplit, + AddComputeGatewayRequestCharges, + BatchApply, + BatchReportThroughputUtilization, + CompletePartitionMigration, + CompleteSplit, + Crash, + Create, + Delete, + ExecuteJavaScript, + ForceConfigRefresh, + GetSplitPoint, + Head, + HeadFeed, + MigratePartition, + Pause, + PreCreateValidation, + OfferPreGrowValidation, + OfferUpdateOperation, + PreReplaceValidation, + Query, + Read, + ReadFeed, + Recreate, + Recycle, + Replace, + Resume, + SqlQuery, + Stop, + Throttle, + Update, + Upsert; + + public boolean isWriteOperation() { + return this == Create || + this == Delete || + this == Recreate || + this == ExecuteJavaScript || + this == Replace || + this == Upsert || + this == Update; + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/PartitionKeyMismatchRetryPolicy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/PartitionKeyMismatchRetryPolicy.java new file mode 100644 index 0000000000000..b2dfd9aba5df3 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/PartitionKeyMismatchRetryPolicy.java @@ -0,0 +1,108 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.internal.caches.RxClientCollectionCache; +import reactor.core.publisher.Mono; + +import java.time.Duration; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + * + * A RetryPolicy implementation that ensures the PartitionKeyDefinitionMap is up-to-date. + * Entries in the PartitionKeyDefinitionMap can become stale if a collection is deleted + * and then recreated with the same name but a different partition key definition, if + * the request is made using name-based links. + * + * TODO: verify with Sergii, other than collection deleted and recreated with the same name + * is there any other scenario which this should be used? + * + */ +public class PartitionKeyMismatchRetryPolicy implements IDocumentClientRetryPolicy { + private RxClientCollectionCache clientCollectionCache; + private IDocumentClientRetryPolicy nextRetryPolicy; + private AtomicInteger retriesAttempted = new AtomicInteger(0); + private String collectionLink; + private RequestOptions options; + private final static int MaxRetries = 1; + + + public PartitionKeyMismatchRetryPolicy( + RxClientCollectionCache clientCollectionCache, + IDocumentClientRetryPolicy nextRetryPolicy, + String resourceFullName, + RequestOptions requestOptions) { + this.clientCollectionCache = clientCollectionCache; + this.nextRetryPolicy = nextRetryPolicy; + + // TODO: this should be retrievable from document client exception. + collectionLink = com.azure.data.cosmos.internal.Utils.getCollectionName(resourceFullName); + this.options = options; + } + + + /// + /// Should the caller retry the operation. + /// + /// Exception that occured when the operation was tried + /// + /// True indicates caller should retry, False otherwise + public Mono shouldRetry(Exception exception) { + CosmosClientException clientException = Utils.as(exception, CosmosClientException.class) ; + + if (clientException != null && + Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.BADREQUEST) && + Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.PARTITION_KEY_MISMATCH) + && this.retriesAttempted.get() < MaxRetries) { + //Debug.Assert(clientException.ResourceAddress != null); + + // TODO: + //this.clientCollectionCache.refresh(clientException.ResourceAddress); + if (this.options != null) { + this.clientCollectionCache.refresh(collectionLink, this.options.getProperties()); + } else { + this.clientCollectionCache.refresh(collectionLink, null); + } + + this.retriesAttempted.incrementAndGet(); + + return Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO)); + } + + return this.nextRetryPolicy.shouldRetry(exception); + } + + + /* (non-Javadoc) + * @see com.azure.data.cosmos.internal.internal.query.IDocumentClientRetryPolicy#onBeforeSendRequest(RxDocumentServiceRequest) + */ + @Override + public void onBeforeSendRequest(RxDocumentServiceRequest request) { + // TODO Auto-generated method stub + this.nextRetryPolicy.onBeforeSendRequest(request); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/PartitionKeyRange.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/PartitionKeyRange.java new file mode 100644 index 0000000000000..4404f44aff93b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/PartitionKeyRange.java @@ -0,0 +1,134 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.internal.routing.Range; + +import java.util.List; + +/** + * Represent a partition key range in the Azure Cosmos DB database service. + */ +public class PartitionKeyRange extends Resource { + public static final String MINIMUM_INCLUSIVE_EFFECTIVE_PARTITION_KEY = ""; + public static final String MAXIMUM_EXCLUSIVE_EFFECTIVE_PARTITION_KEY = "FF"; + public static final String MASTER_PARTITION_KEY_RANGE_ID = "M"; + + /** + * Initialize a partition key range object. + */ + public PartitionKeyRange() { + super(); + } + + /** + * Initialize a partition key range object from json string. + * + * @param jsonString + * the json string that represents the partition key range + * object. + */ + public PartitionKeyRange(String jsonString) { + super(jsonString); + } + + /** + * Set id of partition key range + * @param id the name of the resource. + * @return the partition key range + */ + public PartitionKeyRange id(String id) { + super.id(id); + return this; + } + + public PartitionKeyRange(String id, String minInclusive, String maxExclusive) { + super(); + this.id(id); + this.setMinInclusive(minInclusive); + this.setMaxExclusive(maxExclusive); + } + + public PartitionKeyRange(String id, String minInclusive, String maxExclusive, List parents) { + super(); + this.id(id); + this.setMinInclusive(minInclusive); + this.setMaxExclusive(maxExclusive); + this.setParents(parents); + } + + public String getMinInclusive() { + return super.getString("minInclusive"); + } + + public void setMinInclusive(String minInclusive) { + BridgeInternal.setProperty(this, "minInclusive", minInclusive); + } + + public String getMaxExclusive() { + return super.getString("maxExclusive"); + } + + public void setMaxExclusive(String maxExclusive) { + BridgeInternal.setProperty(this, "maxExclusive", maxExclusive); + } + + public Range toRange() { + return new Range(this.getMinInclusive(), this.getMaxExclusive(), true, false); + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof PartitionKeyRange)) { + return false; + } + + PartitionKeyRange otherRange = (PartitionKeyRange) obj; + + return this.id().compareTo(otherRange.id()) == 0 + && this.getMinInclusive().compareTo(otherRange.getMinInclusive()) == 0 + && this.getMaxExclusive().compareTo(otherRange.getMaxExclusive()) == 0; + } + + @Override + public int hashCode() { + int hash = 0; + hash = (hash * 397) ^ this.id().hashCode(); + hash = (hash * 397) ^ this.getMinInclusive().hashCode(); + hash = (hash * 397) ^ this.getMaxExclusive().hashCode(); + return hash; + } + + public void setParents(List parents) { + BridgeInternal.setProperty(this, Constants.Properties.PARENTS, parents); + } + + /** + * Used internally to indicate the ID of the parent range + * @return a list partition key range ID + */ + public List getParents() { return this.getList(Constants.Properties.PARENTS, String.class); } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/PartitionKeyRangeGoneRetryPolicy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/PartitionKeyRangeGoneRetryPolicy.java new file mode 100644 index 0000000000000..a72c6fef177f8 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/PartitionKeyRangeGoneRetryPolicy.java @@ -0,0 +1,122 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.internal.caches.IPartitionKeyRangeCache; +import com.azure.data.cosmos.internal.caches.RxCollectionCache; +import com.azure.data.cosmos.internal.routing.CollectionRoutingMap; +import reactor.core.publisher.Mono; + +import java.time.Duration; + +// TODO: this need testing +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class PartitionKeyRangeGoneRetryPolicy implements IDocumentClientRetryPolicy { + + private final RxCollectionCache collectionCache; + private final IDocumentClientRetryPolicy nextRetryPolicy; + private final IPartitionKeyRangeCache partitionKeyRangeCache; + private final String collectionLink; + private final FeedOptions feedOptions; + private volatile boolean retried; + + public PartitionKeyRangeGoneRetryPolicy( + RxCollectionCache collectionCache, + IPartitionKeyRangeCache partitionKeyRangeCache, + String collectionLink, + IDocumentClientRetryPolicy nextRetryPolicy, + FeedOptions feedOptions) { + this.collectionCache = collectionCache; + this.partitionKeyRangeCache = partitionKeyRangeCache; + this.collectionLink = collectionLink; + this.nextRetryPolicy = nextRetryPolicy; + this.feedOptions = feedOptions; + } + + /// + /// Should the caller retry the operation. + /// + /// Exception that occured when the operation was tried + /// + /// True indicates caller should retry, False otherwise + public Mono shouldRetry(Exception exception) { + CosmosClientException clientException = Utils.as(exception, CosmosClientException.class); + if (clientException != null && + Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.GONE) && + Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE)) { + + if (this.retried){ + return Mono.just(ShouldRetryResult.error(clientException)); + } + + RxDocumentServiceRequest request = RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.DocumentCollection, + this.collectionLink, + null + // AuthorizationTokenType.PrimaryMasterKey) + ); + if (this.feedOptions != null) { + request.properties = this.feedOptions.properties(); + } + Mono collectionObs = this.collectionCache.resolveCollectionAsync(request); + + return collectionObs.flatMap(collection -> { + + Mono routingMapObs = this.partitionKeyRangeCache.tryLookupAsync(collection.resourceId(), null, request.properties); + + Mono refreshedRoutingMapObs = routingMapObs.flatMap(routingMap -> { + // Force refresh. + return this.partitionKeyRangeCache.tryLookupAsync( + collection.resourceId(), + routingMap, + request.properties); + }).switchIfEmpty(Mono.defer(Mono::empty)); + + // TODO: Check if this behavior can be replaced by doOnSubscribe + return refreshedRoutingMapObs.flatMap(rm -> { + this.retried = true; + return Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO)); + }).switchIfEmpty(Mono.defer(() -> { + this.retried = true; + return Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO)); + })); + + }); + + } else { + return this.nextRetryPolicy.shouldRetry(exception); + } + } + + @Override + public void onBeforeSendRequest(RxDocumentServiceRequest request) { + this.nextRetryPolicy.onBeforeSendRequest(request); + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/PathInfo.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/PathInfo.java new file mode 100644 index 0000000000000..c03d3bac6d036 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/PathInfo.java @@ -0,0 +1,41 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +/** + * Represents a resource path's information in the Azure Cosmos DB database service. + */ +public final class PathInfo { + public boolean isFeed; + public String resourcePath; + public String resourceIdOrFullName; + public boolean isNameBased; + + public PathInfo(boolean isFeed, String resourcePath, String resourceIdOrFullName, boolean isNameBased) { + this.isFeed = isFeed; + this.resourcePath = resourcePath; + this.resourceIdOrFullName = resourceIdOrFullName; + this.isNameBased = isNameBased; + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/PathParser.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/PathParser.java new file mode 100644 index 0000000000000..9855d6f2fe8c2 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/PathParser.java @@ -0,0 +1,85 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import java.util.ArrayList; +import java.util.List; + +/** + * Provide functionality to parse resource paths in the Azure Cosmos DB database service. + */ +public final class PathParser { + + private static final char segmentSeparator = '/'; + + public static List getPathParts(String path) { + ArrayList tokens = new ArrayList(); + int currentIndex = 0; + + while (currentIndex < path.length()) { + if (path.charAt(currentIndex) != segmentSeparator) { + throw new IllegalArgumentException(String.format("INVALID path, failed at index %d.", currentIndex)); + } + + if (++currentIndex == path.length()) + break; + + if (path.charAt(currentIndex) == '\"' || path.charAt(currentIndex) == '\'') { + char quote = path.charAt(currentIndex); + int newIndex = ++currentIndex; + while (true) { + newIndex = path.indexOf(quote, newIndex); + if (newIndex == -1) { + throw new IllegalArgumentException(String.format("INVALID path, failed at index %d.", currentIndex)); + } + + if (path.charAt(newIndex - 1) != '\\') { + break; + } + + ++newIndex; + } + + String token = path.substring(currentIndex, newIndex); + tokens.add(token); + currentIndex = newIndex + 1; + } else { + int newIndex = path.indexOf(segmentSeparator, currentIndex); + String token = null; + if (newIndex == -1) { + token = path.substring(currentIndex); + currentIndex = path.length(); + } else { + token = path.substring(currentIndex, newIndex); + currentIndex = newIndex; + } + + token = token.trim(); + tokens.add(token); + } + } + + return tokens; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Paths.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Paths.java new file mode 100644 index 0000000000000..de499bdc2afea --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Paths.java @@ -0,0 +1,61 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +/** + * Used internally. Contains string constants to work with the paths in the Azure Cosmos DB database service. + */ +public class Paths { + static final String ROOT = "/"; + + public static final String DATABASES_PATH_SEGMENT = "dbs"; + public static final String DATABASES_ROOT = ROOT + DATABASES_PATH_SEGMENT; + + public static final String USERS_PATH_SEGMENT = "users"; + public static final String PERMISSIONS_PATH_SEGMENT = "permissions"; + public static final String COLLECTIONS_PATH_SEGMENT = "colls"; + public static final String STORED_PROCEDURES_PATH_SEGMENT = "sprocs"; + public static final String TRIGGERS_PATH_SEGMENT = "triggers"; + public static final String USER_DEFINED_FUNCTIONS_PATH_SEGMENT = "udfs"; + public static final String CONFLICTS_PATH_SEGMENT = "conflicts"; + public static final String DOCUMENTS_PATH_SEGMENT = "docs"; + public static final String ATTACHMENTS_PATH_SEGMENT = "attachments"; + + // /offers + public static final String OFFERS_PATH_SEGMENT = "offers"; + public static final String OFFERS_ROOT = ROOT + OFFERS_PATH_SEGMENT + "/"; + + public static final String ADDRESS_PATH_SEGMENT = "addresses"; + public static final String PARTITIONS_PATH_SEGMENT = "partitions"; + public static final String DATABASE_ACCOUNT_PATH_SEGMENT = "databaseaccount"; + public static final String TOPOLOGY_PATH_SEGMENT = "topology"; + public static final String MEDIA_PATH_SEGMENT = "media"; + public static final String MEDIA_ROOT = ROOT + MEDIA_PATH_SEGMENT; + public static final String SCHEMAS_PATH_SEGMENT = "schemas"; + public static final String PARTITION_KEY_RANGES_PATH_SEGMENT = "pkranges"; + + public static final String USER_DEFINED_TYPES_PATH_SEGMENT = "udts"; + + public static final String RID_RANGE_PATH_SEGMENT = "ridranges"; +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/PathsHelper.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/PathsHelper.java new file mode 100644 index 0000000000000..8294feedf2add --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/PathsHelper.java @@ -0,0 +1,866 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.BadRequestException; +import com.azure.data.cosmos.Resource; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.commons.text.StringEscapeUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; + +/** + * Used internally to provide utility methods to work with the resource's path in the Azure Cosmos DB database service. + */ +public class PathsHelper { + private final static Logger logger = LoggerFactory.getLogger(PathsHelper.class); + + public static String generatePath(ResourceType resourceType, RxDocumentServiceRequest request, boolean isFeed) { + if (request.getIsNameBased()) { + return PathsHelper.generatePathForNameBased(resourceType, request.getResourceAddress(), isFeed); + } else { + return PathsHelper.generatePath(resourceType, request.getResourceId(), isFeed); + } + } + + public static String generatePathForNameBased(Resource resourceType, String resourceOwnerFullName, String resourceName) { + if (resourceName == null) + return null; + + if (resourceType instanceof Database) { + return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName; + } else if (resourceOwnerFullName == null) { + return null; + } else if (resourceType instanceof DocumentCollection) { + return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName; + } else if (resourceType instanceof StoredProcedure) { + return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName; + } else if (resourceType instanceof UserDefinedFunction) { + return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName; + } else if (resourceType instanceof Trigger) { + return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName; + } else if (resourceType instanceof Conflict) { + return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName; + } else if (resourceType instanceof User) { + return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName; + } else if (resourceType instanceof Permission) { + return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName; + } else if (resourceType instanceof Document) { + return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName; + } else if (resourceType instanceof Offer) { + return Paths.OFFERS_PATH_SEGMENT + "/" + resourceName; + } else if (resourceType instanceof Resource) { + // just generic Resource type. + return null; + } + + String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); + assert false : errorMessage; + throw new IllegalArgumentException(errorMessage); + } + + private static String generatePathForNameBased(ResourceType resourceType, String resourceFullName, boolean isFeed) { + if (isFeed && Strings.isNullOrEmpty(resourceFullName) && resourceType != ResourceType.Database) { + String errorMessage = String.format(RMResources.UnexpectedResourceType, resourceType); + throw new IllegalArgumentException(errorMessage); + } + + String resourcePath = null; + if (!isFeed) { + resourcePath = resourceFullName; + } else if (resourceType == ResourceType.Database) { + return Paths.DATABASES_PATH_SEGMENT; + } else if (resourceType == ResourceType.DocumentCollection) { + resourcePath = resourceFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT; + } else if (resourceType == ResourceType.StoredProcedure) { + resourcePath = resourceFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT; + } else if (resourceType == ResourceType.UserDefinedFunction) { + resourcePath = resourceFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; + } else if (resourceType == ResourceType.Trigger) { + resourcePath = resourceFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT; + } else if (resourceType == ResourceType.Conflict) { + resourcePath = resourceFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT; + } else if (resourceType == ResourceType.Attachment) { + resourcePath = resourceFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT; + } else if (resourceType == ResourceType.User) { + resourcePath = resourceFullName + "/" + Paths.USERS_PATH_SEGMENT; + } else if (resourceType == ResourceType.Permission) { + resourcePath = resourceFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT; + } else if (resourceType == ResourceType.Document) { + resourcePath = resourceFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT; + } else if (resourceType == ResourceType.Offer) { + return resourceFullName + "/" + Paths.OFFERS_PATH_SEGMENT; + } else if (resourceType == ResourceType.PartitionKeyRange) { + return resourceFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; + } else if (resourceType == ResourceType.Schema) { + resourcePath = resourceFullName + "/" + Paths.SCHEMAS_PATH_SEGMENT; + } else { + String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); + assert false : errorMessage; + throw new IllegalArgumentException(errorMessage); + } + + return resourcePath; + } + + public static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed) { + if (isFeed && (ownerOrResourceId == null || ownerOrResourceId.isEmpty()) && + resourceType != ResourceType.Database && + resourceType != ResourceType.Offer && + resourceType != ResourceType.MasterPartition && + resourceType != ResourceType.ServerPartition && + resourceType != ResourceType.DatabaseAccount && + resourceType != ResourceType.Topology) { + throw new IllegalStateException("INVALID resource type"); + } + + if(ownerOrResourceId == null) { + ownerOrResourceId = StringUtils.EMPTY; + } + + if (isFeed && resourceType == ResourceType.Database) { + return Paths.DATABASES_PATH_SEGMENT; + } else if (resourceType == ResourceType.Database) { + return Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId; + } else if (isFeed && resourceType == ResourceType.DocumentCollection) { + ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); + + return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + + Paths.COLLECTIONS_PATH_SEGMENT; + } else if (resourceType == ResourceType.DocumentCollection) { + ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); + + return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString(); + } else if (isFeed && resourceType == ResourceType.Offer) { + return Paths.OFFERS_PATH_SEGMENT; + } else if (resourceType == ResourceType.Offer) { + return Paths.OFFERS_PATH_SEGMENT + "/" + ownerOrResourceId; + } else if (isFeed && resourceType == ResourceType.StoredProcedure) { + ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); + + return + Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + + Paths.STORED_PROCEDURES_PATH_SEGMENT; + } else if (resourceType == ResourceType.StoredProcedure) { + ResourceId storedProcedureId = ResourceId.parse(ownerOrResourceId); + + return Paths.DATABASES_PATH_SEGMENT + "/" + storedProcedureId.getDatabaseId().toString() + "/" + + Paths.COLLECTIONS_PATH_SEGMENT + "/" + storedProcedureId.getDocumentCollectionId().toString() + "/" + + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + storedProcedureId.getStoredProcedureId().toString(); + } else if (isFeed && resourceType == ResourceType.UserDefinedFunction) { + ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); + + return + Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; + } else if (resourceType == ResourceType.UserDefinedFunction) { + ResourceId functionId = ResourceId.parse(ownerOrResourceId); + + return Paths.DATABASES_PATH_SEGMENT + "/" + functionId.getDatabaseId().toString() + "/" + + Paths.COLLECTIONS_PATH_SEGMENT + "/" + functionId.getDocumentCollectionId().toString() + "/" + + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + functionId.getUserDefinedFunctionId().toString(); + } else if (isFeed && resourceType == ResourceType.Trigger) { + ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); + + return + Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + + Paths.TRIGGERS_PATH_SEGMENT; + } else if (resourceType == ResourceType.Trigger) { + ResourceId triggerId = ResourceId.parse(ownerOrResourceId); + + return Paths.DATABASES_PATH_SEGMENT + "/" + triggerId.getDatabaseId().toString() + "/" + + Paths.COLLECTIONS_PATH_SEGMENT + "/" + triggerId.getDocumentCollectionId().toString() + "/" + + Paths.TRIGGERS_PATH_SEGMENT + "/" + triggerId.getTriggerId().toString(); + } else if (isFeed && resourceType == ResourceType.Conflict) { + ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); + + return + Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + + Paths.CONFLICTS_PATH_SEGMENT; + } else if (resourceType == ResourceType.Conflict) { + ResourceId conflictId = ResourceId.parse(ownerOrResourceId); + + return Paths.DATABASES_PATH_SEGMENT + "/" + conflictId.getDatabaseId().toString() + "/" + + Paths.COLLECTIONS_PATH_SEGMENT + "/" + conflictId.getDocumentCollectionId().toString() + "/" + + Paths.CONFLICTS_PATH_SEGMENT + "/" + conflictId.getConflictId().toString(); + } else if (isFeed && resourceType == ResourceType.PartitionKeyRange) { + ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); + + return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + + Paths.COLLECTIONS_PATH_SEGMENT + "/" + + documentCollectionId.getDocumentCollectionId().toString() + "/" + + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; + } else if (resourceType == ResourceType.PartitionKeyRange) { + ResourceId partitionKeyRangeId = ResourceId.parse(ownerOrResourceId); + + return Paths.DATABASES_PATH_SEGMENT + "/" + partitionKeyRangeId.getDatabaseId().toString() + "/" + + Paths.COLLECTIONS_PATH_SEGMENT + "/" + partitionKeyRangeId.getDocumentCollectionId().toString() + "/" + + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + partitionKeyRangeId.getPartitionKeyRangeId().toString(); + } else if (isFeed && resourceType == ResourceType.Attachment) { + ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); + + return + Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + + Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentId().toString() + "/" + + Paths.ATTACHMENTS_PATH_SEGMENT; + } else if (resourceType == ResourceType.Attachment) { + ResourceId attachmentId = ResourceId.parse(ownerOrResourceId); + + return Paths.DATABASES_PATH_SEGMENT + "/" + attachmentId.getDatabaseId().toString() + "/" + + Paths.COLLECTIONS_PATH_SEGMENT + "/" + attachmentId.getDocumentCollectionId().toString() + "/" + + Paths.DOCUMENTS_PATH_SEGMENT + "/" + attachmentId.getDocumentId().toString() + "/" + + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + attachmentId.getAttachmentId().toString(); + } else if (isFeed && resourceType == ResourceType.User) { + return + Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId + "/" + + Paths.USERS_PATH_SEGMENT; + } else if (resourceType == ResourceType.User) { + ResourceId userId = ResourceId.parse(ownerOrResourceId); + + return Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" + + Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString(); + } else if (isFeed && resourceType == ResourceType.Permission) { + ResourceId userId = ResourceId.parse(ownerOrResourceId); + + return + Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" + + Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString() + "/" + + Paths.PERMISSIONS_PATH_SEGMENT; + } else if (resourceType == ResourceType.Permission) { + ResourceId permissionId = ResourceId.parse(ownerOrResourceId); + + return Paths.DATABASES_PATH_SEGMENT + "/" + permissionId.getDatabaseId().toString() + "/" + + Paths.USERS_PATH_SEGMENT + "/" + permissionId.getUserId().toString() + "/" + + Paths.PERMISSIONS_PATH_SEGMENT + "/" + permissionId.getPermissionId().toString(); + } else if (isFeed && resourceType == ResourceType.Document) { + ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); + + return + Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + + Paths.DOCUMENTS_PATH_SEGMENT; + } else if (resourceType == ResourceType.Document) { + ResourceId documentId = ResourceId.parse(ownerOrResourceId); + + return Paths.DATABASES_PATH_SEGMENT + "/" + documentId.getDatabaseId().toString() + "/" + + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentId.getDocumentCollectionId().toString() + "/" + + Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentId.getDocumentId().toString(); + } else if (isFeed && resourceType == ResourceType.MasterPartition) { + return Paths.PARTITIONS_PATH_SEGMENT; + } else if (resourceType == ResourceType.MasterPartition) { + return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId; + } else if (isFeed && resourceType == ResourceType.ServerPartition) { + return Paths.PARTITIONS_PATH_SEGMENT; + } else if (resourceType == ResourceType.ServerPartition) { + return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId; + } else if (isFeed && resourceType == ResourceType.Topology) { + return Paths.TOPOLOGY_PATH_SEGMENT; + } else if (resourceType == ResourceType.Topology) { + return Paths.TOPOLOGY_PATH_SEGMENT + "/" + ownerOrResourceId; + } else if (isFeed && resourceType == ResourceType.DatabaseAccount) { + return Paths.DATABASE_ACCOUNT_PATH_SEGMENT; + } else if (resourceType == ResourceType.DatabaseAccount) { + return Paths.DATABASE_ACCOUNT_PATH_SEGMENT + "/" + ownerOrResourceId; + } + + String errorMessage = "invalid resource type"; + throw new IllegalStateException(errorMessage); + } + + public static PathInfo parsePathSegments(String resourceUrl) { + String[] segments = StringUtils.strip(resourceUrl, "/").split("/"); + if (segments == null || segments.length < 1) { + return null; + } + + int uriSegmentsCount = segments.length; + String segmentOne = StringUtils.strip(segments[uriSegmentsCount - 1], "/"); + String segmentTwo = (uriSegmentsCount >= 2) ? StringUtils.strip(segments[uriSegmentsCount - 2], "/") + : StringUtils.EMPTY; + + // handle name based operation + if (uriSegmentsCount >= 2) { + // parse the databaseId, if failed, it is name based routing + // mediaId is special, we will treat it always as id based. + if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0 + && Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0 + && Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0 + && Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0) { + Pair result = ResourceId.tryParse(segments[1]); + if (!result.getLeft() || !result.getRight().isDatabaseId()) { + return parseNameSegments(resourceUrl, segments); + } + } + + } + + // Feed paths have odd number of segments + if ((uriSegmentsCount % 2 != 0) && isResourceType(segmentOne)) { + return new PathInfo(true, segmentOne, + segmentOne.compareToIgnoreCase(Paths.DATABASES_PATH_SEGMENT) != 0 ? segmentTwo : StringUtils.EMPTY, + false); + } else if (isResourceType(segmentTwo)) { + return new PathInfo(false, segmentTwo, segmentOne, false); + } + + return null; + } + + /** + * Method which will return boolean based on whether it is able to parse the + * path and name segment from resource url , and fill info in PathInfo object + * @param resourceUrl Complete ResourceLink + * @param pathInfo Path info object which will hold information + * @param clientVersion The Client version + * @return + */ + public static boolean tryParsePathSegments(String resourceUrl, PathInfo pathInfo, String clientVersion) { + pathInfo.resourcePath = StringUtils.EMPTY; + pathInfo.resourceIdOrFullName = StringUtils.EMPTY; + pathInfo.isFeed = false; + pathInfo.isNameBased = false; + if (StringUtils.isEmpty(resourceUrl)) { + return false; + } + String trimmedStr = StringUtils.strip(resourceUrl, Constants.Properties.PATH_SEPARATOR); + String[] segments = StringUtils.split(trimmedStr, Constants.Properties.PATH_SEPARATOR); + if (segments == null || segments.length < 1) { + return false; + } + int uriSegmentsCount = segments.length; + String segmentOne = segments[uriSegmentsCount - 1]; + String segmentTwo = (uriSegmentsCount >= 2) ? segments[uriSegmentsCount - 2] : StringUtils.EMPTY; + + // handle name based operation + if (uriSegmentsCount >= 2) { + // parse the databaseId, if failed, it is name based routing + // mediaId is special, we will treat it always as id based. + if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0 + && Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0 + && Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0 + && Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0 + && Paths.TOPOLOGY_PATH_SEGMENT.compareTo(segments[0]) != 0 + && Paths.RID_RANGE_PATH_SEGMENT.compareTo(segments[0]) != 0) { + Pair result = ResourceId.tryParse(segments[1]); + if (!result.getLeft() || !result.getRight().isDatabaseId()) { + pathInfo.isNameBased = true; + return tryParseNameSegments(resourceUrl, segments, pathInfo); + } + } + } + // Feed paths have odd number of segments + if ((uriSegmentsCount % 2 != 0) && PathsHelper.isResourceType(segmentOne)) { + pathInfo.isFeed = true; + pathInfo.resourcePath = segmentOne; + // The URL for dbs may contain the management endpoint as the segmentTwo which + // should not be used as resourceId + if (!segmentOne.equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) { + pathInfo.resourceIdOrFullName = segmentTwo; + } + } else if (PathsHelper.isResourceType(segmentTwo)) { + pathInfo.isFeed = false; + pathInfo.resourcePath = segmentTwo; + pathInfo.resourceIdOrFullName = segmentOne; + // Media ID is not supposed to be used for any ID verification. However, if the + // old client makes a call for media ID + // we still need to support it. + // For new clients, parse to return the attachment id. For old clients do not + // modify. + if (!StringUtils.isEmpty(clientVersion) + && pathInfo.resourcePath.equalsIgnoreCase(Paths.MEDIA_PATH_SEGMENT)) { + String attachmentId = null; + byte storeIndex = 0; + // MEDIA Id parsing code will come here , supported MediaIdHelper file missing in java sdk(Sync and Async both) + //Below code from .net + // if (!MediaIdHelper.TryParseMediaId(resourceIdOrFullName, out attachmentId, out storeIndex)) + // { + // return false; + //} + //resourceIdOrFullName = attachmentId; + } + } else { + return false; + } + + return true; + + } + + /** + * Method which will return boolean based on whether it is able to parse the + * name segment from resource url , and fill info in PathInfo object + * @param resourceUrl Complete ResourceLink + * @param segments + * @param pathInfo Path info object which will hold information + * @return + */ + private static boolean tryParseNameSegments(String resourceUrl, String[] segments, PathInfo pathInfo) { + pathInfo.isFeed = false; + pathInfo.resourceIdOrFullName = ""; + pathInfo.resourcePath = ""; + if (segments == null || segments.length < 1) { + return false; + } + if (segments.length % 2 == 0) { + // even number, assume it is individual resource + if (isResourceType(segments[segments.length - 2])) { + pathInfo.resourcePath = segments[segments.length - 2]; + pathInfo.resourceIdOrFullName = StringEscapeUtils.unescapeJava(StringUtils.removeEnd( + StringUtils.removeStart(resourceUrl, Paths.ROOT), Paths.ROOT)); + return true; + } + } else { + // odd number, assume it is feed request + if (isResourceType(segments[segments.length - 1])) { + pathInfo.isFeed = true; + pathInfo.resourcePath = segments[segments.length - 1]; + String resourceIdOrFullName = resourceUrl.substring(0, StringUtils.removeEnd(resourceUrl,Paths.ROOT).lastIndexOf(Paths.ROOT)); + pathInfo.resourceIdOrFullName = StringEscapeUtils.unescapeJava(StringUtils.removeEnd( + StringUtils.removeStart(resourceIdOrFullName, Paths.ROOT), Paths.ROOT)); + return true; + } + } + return false; + } + + public static PathInfo parseNameSegments(String resourceUrl, String[] segments) { + if (segments == null || segments.length < 1) { + return null; + } + + if (segments.length % 2 == 0) { + // even number, assume it is individual resource + if (isResourceType(segments[segments.length - 2])) { + return new PathInfo(false, segments[segments.length - 2], + StringEscapeUtils.unescapeJava(StringUtils.strip(resourceUrl, Paths.ROOT)), true); + } + } else { + // odd number, assume it is feed request + if (isResourceType(segments[segments.length - 1])) { + return new PathInfo(true, segments[segments.length - 1], + StringEscapeUtils.unescapeJava(StringUtils.strip( + resourceUrl.substring(0, + StringUtils.removeEnd(resourceUrl, Paths.ROOT).lastIndexOf(Paths.ROOT)), + Paths.ROOT)), + true); + } + } + + return null; + } + + private static boolean isResourceType(String resourcePathSegment) { + if (StringUtils.isEmpty(resourcePathSegment)) { + return false; + } + + switch (resourcePathSegment.toLowerCase()) { + case Paths.ATTACHMENTS_PATH_SEGMENT: + case Paths.COLLECTIONS_PATH_SEGMENT: + case Paths.DATABASES_PATH_SEGMENT: + case Paths.PERMISSIONS_PATH_SEGMENT: + case Paths.USERS_PATH_SEGMENT: + case Paths.DOCUMENTS_PATH_SEGMENT: + case Paths.STORED_PROCEDURES_PATH_SEGMENT: + case Paths.TRIGGERS_PATH_SEGMENT: + case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT: + case Paths.CONFLICTS_PATH_SEGMENT: + case Paths.MEDIA_PATH_SEGMENT: + case Paths.OFFERS_PATH_SEGMENT: + case Paths.PARTITIONS_PATH_SEGMENT: + case Paths.DATABASE_ACCOUNT_PATH_SEGMENT: + case Paths.TOPOLOGY_PATH_SEGMENT: + case Paths.PARTITION_KEY_RANGES_PATH_SEGMENT: + case Paths.SCHEMAS_PATH_SEGMENT: + return true; + default: + return false; + } + } + + public static String generatePathForNameBased(ResourceType resourceType, String resourceOwnerFullName, String resourceName) { + switch (resourceType) { + case Database: + return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName; + case DocumentCollection: + return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName; + case StoredProcedure: + return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName; + case UserDefinedFunction: + return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName; + case Trigger: + return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName; + case Attachment: + return resourceOwnerFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + resourceName; + case Conflict: + return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName; + case Document: + return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName; + case Offer: + return resourceOwnerFullName + "/" + Paths.OFFERS_PATH_SEGMENT + "/" + resourceName; + case Permission: + return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName; + case User: + return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName; + case PartitionKeyRange: + return resourceOwnerFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + resourceName; + default: + return null; + } + } + + public static String getCollectionPath(String resourceFullName) { + if (resourceFullName != null) { + String trimmedResourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName); + int index = indexOfNth(trimmedResourceFullName, '/', 4); + if (index > 0) + return trimmedResourceFullName.substring(0, index); + } + + return resourceFullName; + } + + public static String getDatabasePath(String resourceFullName) { + if (resourceFullName != null) { + int index = indexOfNth(resourceFullName, '/', 2); + if (index > 0) + return resourceFullName.substring(0, index); + } + + return resourceFullName; + } + + public static String getParentByIndex(String resourceFullName, int segmentIndex) { + int index = indexOfNth(resourceFullName, '/', segmentIndex); + if (index > 0) + return resourceFullName.substring(0, index); + else { + index = indexOfNth(resourceFullName, '/', segmentIndex - 1); + if (index > 0) + return resourceFullName; + else + return null; + } + } + public static boolean isNameBased(String resourceIdOrFullName) { + // quick way to tell whether it is resourceId nor not, non conclusively. + if (resourceIdOrFullName != null && !resourceIdOrFullName.isEmpty() + && resourceIdOrFullName.length() > 4 && resourceIdOrFullName.charAt(3) == '/') { + return true; + } + return false; + } + + private static int indexOfNth(String str, char value, int nthOccurance) { + int remaining = nthOccurance; + char[] characters = str.toCharArray(); + for (int i = 0; i < characters.length; i++) { + if (characters[i] == value) { + remaining--; + if (remaining == 0) { + return i; + } + } + } + return -1; + } + + public static ResourceType getResourcePathSegment(String resourcePathSegment) throws BadRequestException { + if (StringUtils.isEmpty(resourcePathSegment)) { + String message = String.format(RMResources.StringArgumentNullOrEmpty, "resourcePathSegment"); + throw new BadRequestException(message); + } + + switch (resourcePathSegment) { + case Paths.ATTACHMENTS_PATH_SEGMENT: + return ResourceType.Attachment; + + case Paths.COLLECTIONS_PATH_SEGMENT: + return ResourceType.DocumentCollection; + + case Paths.DATABASES_PATH_SEGMENT: + return ResourceType.Database; + + case Paths.PERMISSIONS_PATH_SEGMENT: + return ResourceType.Permission; + + case Paths.USERS_PATH_SEGMENT: + return ResourceType.User; + + case Paths.DOCUMENTS_PATH_SEGMENT: + return ResourceType.Document; + + case Paths.STORED_PROCEDURES_PATH_SEGMENT: + return ResourceType.StoredProcedure; + + case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT: + return ResourceType.UserDefinedFunction; + + case Paths.TRIGGERS_PATH_SEGMENT: + return ResourceType.Trigger; + + case Paths.CONFLICTS_PATH_SEGMENT: + return ResourceType.Conflict; + + case Paths.OFFERS_PATH_SEGMENT: + return ResourceType.Offer; + + case Paths.SCHEMAS_PATH_SEGMENT: + return ResourceType.Schema; + } + + String errorMessage = String.format(RMResources.UnknownResourceType, resourcePathSegment); + throw new BadRequestException(errorMessage); + } + + public static String getResourcePath(ResourceType resourceType) throws BadRequestException { + switch (resourceType) { + case Database: + return Paths.DATABASES_PATH_SEGMENT; + + case DocumentCollection: + return Paths.COLLECTIONS_PATH_SEGMENT; + + case Document: + return Paths.DOCUMENTS_PATH_SEGMENT; + + case StoredProcedure: + return Paths.STORED_PROCEDURES_PATH_SEGMENT; + + case UserDefinedFunction: + return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; + + case Trigger: + return Paths.TRIGGERS_PATH_SEGMENT; + + case Conflict: + return Paths.CONFLICTS_PATH_SEGMENT; + + case Attachment: + return Paths.ATTACHMENTS_PATH_SEGMENT; + + case User: + return Paths.USERS_PATH_SEGMENT; + + case Permission: + return Paths.PERMISSIONS_PATH_SEGMENT; + + case Offer: + return Paths.OFFERS_PATH_SEGMENT; + + case MasterPartition: + case ServerPartition: + return Paths.PARTITIONS_PATH_SEGMENT; + + case PartitionKeyRange: + return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; + + case Media: + return Paths.MEDIA_ROOT; + + case Schema: + return Paths.SCHEMAS_PATH_SEGMENT; + + + case DatabaseAccount: + case Topology: + + return Paths.ROOT; + + default: + String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); + throw new BadRequestException(errorMessage); + } + } + + public static boolean validateResourceFullName(ResourceType resourceType, String resourceFullName) { + String[] segments = StringUtils.split(resourceFullName, '/'); + String[] resourcePathArray = getResourcePathArray(resourceType); + if (resourcePathArray == null) { + return false; + } + + if (segments.length != resourcePathArray.length * 2) { + return false; + } + for (int i = 0; i < resourcePathArray.length; i++) { + if(resourcePathArray[i].compareTo(segments[2 * i]) != 0) { + return false; + } + } + return true; + } + + private static String[] getResourcePathArray(ResourceType resourceType) { + List segments = new ArrayList(); + segments.add(Paths.DATABASES_PATH_SEGMENT); + + if (resourceType == ResourceType.Permission || + resourceType == ResourceType.User) { + segments.add(Paths.USERS_PATH_SEGMENT); + if (resourceType == ResourceType.Permission) { + segments.add(Paths.PERMISSIONS_PATH_SEGMENT); + } + } else if (resourceType == ResourceType.DocumentCollection || + resourceType == ResourceType.StoredProcedure || + resourceType == ResourceType.UserDefinedFunction || + resourceType == ResourceType.Trigger || + resourceType == ResourceType.Conflict || + resourceType == ResourceType.Attachment || + resourceType == ResourceType.Document || + resourceType == ResourceType.PartitionKeyRange || + resourceType == ResourceType.Schema) { + segments.add(Paths.COLLECTIONS_PATH_SEGMENT); + if (resourceType == ResourceType.StoredProcedure) { + segments.add(Paths.STORED_PROCEDURES_PATH_SEGMENT); + } else if(resourceType == ResourceType.UserDefinedFunction) { + segments.add(Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); + } else if(resourceType == ResourceType.Trigger) { + segments.add(Paths.TRIGGERS_PATH_SEGMENT); + } else if (resourceType == ResourceType.Conflict) { + segments.add(Paths.CONFLICTS_PATH_SEGMENT); + } else if (resourceType == ResourceType.Schema) { + segments.add(Paths.SCHEMAS_PATH_SEGMENT); + } else if(resourceType == ResourceType.Document || + resourceType == ResourceType.Attachment) { + segments.add(Paths.DOCUMENTS_PATH_SEGMENT); + if (resourceType == ResourceType.Attachment) { + segments.add(Paths.ATTACHMENTS_PATH_SEGMENT); + } + } else if(resourceType == ResourceType.PartitionKeyRange) { + segments.add(Paths.PARTITION_KEY_RANGES_PATH_SEGMENT); + } + } else if (resourceType != ResourceType.Database) { + return null; + } + return segments.stream().toArray(String[]::new); + } + + public static boolean validateResourceId(ResourceType resourceType, String resourceId) { + if (resourceType == ResourceType.Conflict) { + return PathsHelper.validateConflictId(resourceId); + } else if (resourceType == ResourceType.Database) { + return PathsHelper.validateDatabaseId(resourceId); + } else if (resourceType == ResourceType.DocumentCollection) { + return PathsHelper.validateDocumentCollectionId(resourceId); + } else if (resourceType == ResourceType.Document) { + return PathsHelper.validateDocumentId(resourceId); + } else if (resourceType == ResourceType.Permission) { + return PathsHelper.validatePermissionId(resourceId); + } else if (resourceType == ResourceType.StoredProcedure) { + return PathsHelper.validateStoredProcedureId(resourceId); + } else if (resourceType == ResourceType.Trigger) { + return PathsHelper.validateTriggerId(resourceId); + } else if (resourceType == ResourceType.UserDefinedFunction) { + return PathsHelper.validateUserDefinedFunctionId(resourceId); + } else if (resourceType == ResourceType.User) { + return PathsHelper.validateUserId(resourceId); + } else if (resourceType == ResourceType.Attachment) { + return PathsHelper.validateAttachmentId(resourceId); + } else { + logger.error(String.format("ValidateResourceId not implemented for Type %s in ResourceRequestHandler", resourceType.toString())); + return false; + } + } + + public static boolean validateDatabaseId(String resourceIdString) { + Pair pair = ResourceId.tryParse(resourceIdString); + return pair.getLeft() && pair.getRight().getDatabase() != 0; + } + + public static boolean validateDocumentCollectionId(String resourceIdString) { + Pair pair = ResourceId.tryParse(resourceIdString); + return pair.getLeft() && pair.getRight().getDocumentCollection() != 0; + } + + public static boolean validateDocumentId(String resourceIdString) { + Pair pair = ResourceId.tryParse(resourceIdString); + return pair.getLeft() && pair.getRight().getDocument() != 0; + } + + public static boolean validateConflictId(String resourceIdString) { + Pair pair = ResourceId.tryParse(resourceIdString); + return pair.getLeft() && pair.getRight().getConflict() != 0; + } + + public static boolean validateAttachmentId(String resourceIdString) { + Pair pair = ResourceId.tryParse(resourceIdString); + return pair.getLeft() && pair.getRight().getAttachment() != 0; + } + + public static boolean validatePermissionId(String resourceIdString) { + Pair pair = ResourceId.tryParse(resourceIdString); + return pair.getLeft() && pair.getRight().getPermission() != 0; + } + + public static boolean validateStoredProcedureId(String resourceIdString) { + Pair pair = ResourceId.tryParse(resourceIdString); + return pair.getLeft() && pair.getRight().getStoredProcedure() != 0; + } + + public static boolean validateTriggerId(String resourceIdString) { + Pair pair = ResourceId.tryParse(resourceIdString); + return pair.getLeft() && pair.getRight().getTrigger() != 0; + } + + public static boolean validateUserDefinedFunctionId(String resourceIdString) { + Pair pair = ResourceId.tryParse(resourceIdString); + return pair.getLeft() && pair.getRight().getUserDefinedFunction() != 0; + } + + public static boolean validateUserId(String resourceIdString) { + Pair pair = ResourceId.tryParse(resourceIdString); + return pair.getLeft() && pair.getRight().getUser() != 0; + } + + + public static boolean isPublicResource(Resource resourceType) { + if (resourceType instanceof Database || + resourceType instanceof DocumentCollection || + resourceType instanceof StoredProcedure || + resourceType instanceof UserDefinedFunction || + resourceType instanceof Trigger || + resourceType instanceof Conflict || + resourceType instanceof User || + resourceType instanceof Permission || + resourceType instanceof Document || + resourceType instanceof Offer + ) { + return true; + } else { + return false; + } + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Permission.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Permission.java new file mode 100644 index 0000000000000..bd14847bc6996 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Permission.java @@ -0,0 +1,134 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.PermissionMode; +import com.azure.data.cosmos.Resource; +import com.fasterxml.jackson.databind.node.ArrayNode; +import org.apache.commons.lang3.StringUtils; + +/** + * Represents a per-User Permission to access a specific resource e.g. Document or Collection in the Azure Cosmos DB database service. + */ +public class Permission extends Resource { + /** + * Initialize a permission object. + */ + public Permission() { + super(); + } + + /** + * Initialize a permission object from json string. + * + * @param jsonString the json string that represents the permission. + */ + public Permission(String jsonString) { + super(jsonString); + } + + /** + * Sets the id + * @param id the name of the resource. + * @return the current instance of permission + */ + public Permission id(String id){ + super.id(id); + return this; + } + + /** + * Gets the self-link of resource to which the permission applies. + * + * @return the resource link. + */ + public String getResourceLink() { + return super.getString(Constants.Properties.RESOURCE_LINK); + } + + /** + * Sets the self-link of resource to which the permission applies. + * + * @param resourceLink the resource link. + */ + public void setResourceLink(String resourceLink) { + BridgeInternal.setProperty(this, Constants.Properties.RESOURCE_LINK, resourceLink); + } + + /** + * Gets the permission mode. + * + * @return the permission mode. + */ + public PermissionMode getPermissionMode() { + String value = super.getString(Constants.Properties.PERMISSION_MODE); + return PermissionMode.valueOf(StringUtils.upperCase(value)); + } + + /** + * Sets the permission mode. + * + * @param permissionMode the permission mode. + */ + public void setPermissionMode(PermissionMode permissionMode) { + BridgeInternal.setProperty(this, Constants.Properties.PERMISSION_MODE, + permissionMode.toString().toLowerCase()); + } + + /** + * Gets the access token granting the defined permission. + * + * @return the access token. + */ + public String getToken() { + return super.getString(Constants.Properties.TOKEN); + } + + /** + * Gets the resource partition key associated with this permission object. + * + * @return the partition key. + */ + public PartitionKey getResourcePartitionKey() { + PartitionKey key = null; + Object value = super.get(Constants.Properties.RESOURCE_PARTITION_KEY); + if (value != null) { + ArrayNode arrayValue = (ArrayNode) value; + key = new PartitionKey(BridgeInternal.getValue(arrayValue.get(0))); + } + + return key; + } + + /** + * Sets the resource partition key associated with this permission object. + * + * @param partitionkey the partition key. + */ + public void setResourcePartitionKey(PartitionKey partitionkey) { + BridgeInternal.setProperty(this, Constants.Properties.RESOURCE_PARTITION_KEY, partitionkey.getInternalPartitionKey().toJson()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Quadruple.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Quadruple.java new file mode 100644 index 0000000000000..612ab51ca2172 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Quadruple.java @@ -0,0 +1,64 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +/** + * Represents class with four different generic objects. + */ +public class Quadruple { + + private final A val0; + private final B val1; + private final C val2; + private final D val3; + + public static Quadruple with(final A value0, final B value1, final C value2, + final D value3) { + return new Quadruple(value0, value1, value2, value3); + } + + public Quadruple(final A value0, final B value1, final C value2, final D value3) { + this.val0 = value0; + this.val1 = value1; + this.val2 = value2; + this.val3 = value3; + } + + public A getValue0() { + return this.val0; + } + + public B getValue1() { + return this.val1; + } + + public C getValue2() { + return this.val2; + } + + public D getValue3() { + return this.val3; + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/QueryCompatibilityMode.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/QueryCompatibilityMode.java new file mode 100644 index 0000000000000..191f205bbec30 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/QueryCompatibilityMode.java @@ -0,0 +1,34 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +/** + * A client query compatibility mode when making query request in the Azure Cosmos DB database service. Can be used + * to force a specific query request format. + */ +public enum QueryCompatibilityMode { + Default, + Query, + SqlQuery +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/QueryMetrics.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/QueryMetrics.java new file mode 100644 index 0000000000000..da298ab34d9f4 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/QueryMetrics.java @@ -0,0 +1,315 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.internal.query.metrics.ClientSideMetrics; +import com.azure.data.cosmos.internal.query.metrics.FetchExecutionRange; +import com.azure.data.cosmos.internal.query.metrics.QueryMetricsTextWriter; +import com.azure.data.cosmos.internal.query.metrics.SchedulingTimeSpan; +import org.apache.commons.lang3.tuple.ImmutablePair; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; + +/** + * Query metrics in the Azure Cosmos database service. + * This metric represents a moving average for a set of queries whose metrics have been aggregated together. + */ +public final class QueryMetrics { + public static QueryMetrics ZERO = new QueryMetrics( + new ArrayList<>(), /* */ + 0, /* retrievedDocumentCount */ + 0, /* retrievedDocumentSize */ + 0, /* outputDocumentCount */ + 0, /* outputDocumentSize */ + 0, /* indexHitCount */ + Duration.ZERO, + QueryPreparationTimes.ZERO, + Duration.ZERO, + Duration.ZERO, + Duration.ZERO, + RuntimeExecutionTimes.ZERO, + Duration.ZERO, + ClientSideMetrics.ZERO); + private final long retrievedDocumentCount; + private final long retrievedDocumentSize; + private final long outputDocumentCount; + private final long outputDocumentSize; + private final long indexHitDocumentCount; + private final Duration totalQueryExecutionTime; + private final QueryPreparationTimes queryPreparationTimes; + private final Duration indexLookupTime; + private final Duration documentLoadTime; + private final Duration vmExecutionTime; + private final RuntimeExecutionTimes runtimeExecutionTimes; + private final Duration documentWriteTime; + private final ClientSideMetrics clientSideMetrics; + private final List activityIds; + + public QueryMetrics(List activities, long retrievedDocumentCount, long retrievedDocumentSize, long outputDocumentCount, + long outputDocumentSize, long indexHitCount, Duration totalQueryExecutionTime, + QueryPreparationTimes queryPreparationTimes, Duration indexLookupTime, Duration documentLoadTime, + Duration vmExecutionTime, RuntimeExecutionTimes runtimeExecutionTimes, Duration documentWriteTime, + ClientSideMetrics clientSideMetrics) { + this.retrievedDocumentCount = retrievedDocumentCount; + this.retrievedDocumentSize = retrievedDocumentSize; + this.outputDocumentCount = outputDocumentCount; + this.outputDocumentSize = outputDocumentSize; + this.indexHitDocumentCount = indexHitCount; + this.totalQueryExecutionTime = totalQueryExecutionTime; + this.queryPreparationTimes = queryPreparationTimes; + this.indexLookupTime = indexLookupTime; + this.documentLoadTime = documentLoadTime; + this.vmExecutionTime = vmExecutionTime; + this.runtimeExecutionTimes = runtimeExecutionTimes; + this.documentWriteTime = documentWriteTime; + this.clientSideMetrics = clientSideMetrics; + this.activityIds = activities; + } + + /** + * @return the retrievedDocumentCount + */ + public long getRetrievedDocumentCount() { + return retrievedDocumentCount; + } + + /** + * @return the retrievedDocumentSize + */ + public long getRetrievedDocumentSize() { + return retrievedDocumentSize; + } + + /** + * @return the outputDocumentCount + */ + public long getOutputDocumentCount() { + return outputDocumentCount; + } + + /** + * @return the outputDocumentSize + */ + public long getOutputDocumentSize() { + return outputDocumentSize; + } + + /** + * @return the indexHitDocumentCount + */ + public long getIndexHitDocumentCount() { + return indexHitDocumentCount; + } + + /** + * Gets the index hit ratio by query in the Azure Cosmos database service. + * + * @return the IndexHitRatio + */ + public double getIndexHitRatio() { + return this.retrievedDocumentCount == 0 ? 1 : (double) this.indexHitDocumentCount / this.retrievedDocumentCount; + } + + /** + * @return the totalQueryExecutionTime + */ + public Duration getTotalQueryExecutionTime() { + return totalQueryExecutionTime; + } + + /** + * @return the queryPreparationTimes + */ + public QueryPreparationTimes getQueryPreparationTimes() { + return queryPreparationTimes; + } + + /** + * @return the indexLookupTime + */ + public Duration getIndexLookupTime() { + return indexLookupTime; + } + + /** + * @return the documentLoadTime + */ + public Duration getDocumentLoadTime() { + return documentLoadTime; + } + + /** + * @return the vmExecutionTime + */ + public Duration getVMExecutionTime() { + return vmExecutionTime; + } + + /** + * @return the runtimeExecutionTimes + */ + public RuntimeExecutionTimes getRuntimeExecutionTimes() { + return runtimeExecutionTimes; + } + + /** + * @return the documentWriteTime + */ + public Duration getDocumentWriteTime() { + return documentWriteTime; + } + + /** + * @return the clientSideMetrics + */ + public ClientSideMetrics getClientSideMetrics() { + return clientSideMetrics; + } + + /** + * @return number of reties in the Azure Cosmos database service. + */ + public long getRetries() { + return this.clientSideMetrics.getRetries(); + } + + public QueryMetrics add(QueryMetrics... queryMetricsArgs) { + ArrayList queryMetricsList = new ArrayList(); + for (QueryMetrics queryMetrics : queryMetricsArgs) { + queryMetricsList.add(queryMetrics); + } + + queryMetricsList.add(this); + + return QueryMetrics.createFromCollection(queryMetricsList); + } + + private String toTextString() { + return toTextString(0); + } + + private String toTextString(int indentLevel) { + StringBuilder stringBuilder = new StringBuilder(); + QueryMetricsTextWriter queryMetricsTextWriter = new QueryMetricsTextWriter(stringBuilder); + queryMetricsTextWriter.writeQueryMetrics(this); + return stringBuilder.toString(); + } + + public static QueryMetrics createFromCollection(Collection queryMetricsCollection) { + long retrievedDocumentCount = 0; + long retrievedDocumentSize = 0; + long outputDocumentCount = 0; + long outputDocumentSize = 0; + long indexHitDocumentCount = 0; + Duration totalQueryExecutionTime = Duration.ZERO; + Collection queryPreparationTimesCollection = new ArrayList(); + Duration indexLookupTime = Duration.ZERO; + Duration documentLoadTime = Duration.ZERO; + Duration vmExecutionTime = Duration.ZERO; + Collection runtimeExecutionTimesCollection = new ArrayList(); + Duration documentWriteTime = Duration.ZERO; + Collection clientSideMetricsCollection = new ArrayList(); + List activityIds = new ArrayList<>(); + + for (QueryMetrics queryMetrics : queryMetricsCollection) { + if (queryMetrics == null) { + throw new NullPointerException("queryMetricsList can not have null elements"); + } + activityIds.addAll(queryMetrics.activityIds); + retrievedDocumentCount += queryMetrics.retrievedDocumentCount; + retrievedDocumentSize += queryMetrics.retrievedDocumentSize; + outputDocumentCount += queryMetrics.outputDocumentCount; + outputDocumentSize += queryMetrics.outputDocumentSize; + indexHitDocumentCount += queryMetrics.indexHitDocumentCount; + totalQueryExecutionTime = totalQueryExecutionTime.plus(queryMetrics.totalQueryExecutionTime); + queryPreparationTimesCollection.add(queryMetrics.queryPreparationTimes); + indexLookupTime = indexLookupTime.plus(queryMetrics.indexLookupTime); + documentLoadTime = documentLoadTime.plus(queryMetrics.documentLoadTime); + vmExecutionTime = vmExecutionTime.plus(queryMetrics.vmExecutionTime); + runtimeExecutionTimesCollection.add(queryMetrics.runtimeExecutionTimes); + documentWriteTime = documentWriteTime.plus(queryMetrics.documentWriteTime); + clientSideMetricsCollection.add(queryMetrics.clientSideMetrics); + } + + return new QueryMetrics(activityIds, retrievedDocumentCount, retrievedDocumentSize, outputDocumentCount, + outputDocumentSize, + indexHitDocumentCount, totalQueryExecutionTime, + QueryPreparationTimes.createFromCollection(queryPreparationTimesCollection), indexLookupTime, documentLoadTime, + vmExecutionTime, RuntimeExecutionTimes.createFromCollection(runtimeExecutionTimesCollection), + documentWriteTime, ClientSideMetrics.createFromCollection(clientSideMetricsCollection)); + } + + private static double getOrDefault(HashMap metrics, String key) { + Double doubleReference = metrics.get(key); + return doubleReference == null ? 0 : doubleReference; + } + + public static QueryMetrics createFromDelimitedString(String delimitedString) { + HashMap metrics = QueryMetricsUtils.parseDelimitedString(delimitedString); + return QueryMetrics.createFromDelimitedStringAndClientSideMetrics(delimitedString, + new ClientSideMetrics(0, 0, new ArrayList(), + new ArrayList>()), ""); + } + + public static QueryMetrics createFromDelimitedStringAndClientSideMetrics(String delimitedString, ClientSideMetrics clientSideMetrics, + String activityId) { + HashMap metrics = QueryMetricsUtils.parseDelimitedString(delimitedString); + double indexHitRatio; + double retrievedDocumentCount; + indexHitRatio = metrics.get(QueryMetricsConstants.IndexHitRatio); + retrievedDocumentCount = metrics.get(QueryMetricsConstants.RetrievedDocumentCount); + long indexHitCount = (long) (indexHitRatio * retrievedDocumentCount); + double outputDocumentCount = metrics.get(QueryMetricsConstants.OutputDocumentCount); + double outputDocumentSize = metrics.get(QueryMetricsConstants.OutputDocumentSize); + double retrievedDocumentSize = metrics.get(QueryMetricsConstants.RetrievedDocumentSize); + Duration totalQueryExecutionTime = QueryMetricsUtils.getDurationFromMetrics(metrics, QueryMetricsConstants.TotalQueryExecutionTimeInMs); + + List activities = new ArrayList<>(); + activities.add(activityId); + + return new QueryMetrics( + activities, + (long) retrievedDocumentCount, + (long) retrievedDocumentSize, + (long) outputDocumentCount, + (long) outputDocumentSize, + indexHitCount, + totalQueryExecutionTime, + QueryPreparationTimes.createFromDelimitedString(delimitedString), + QueryMetricsUtils.getDurationFromMetrics(metrics, QueryMetricsConstants.IndexLookupTimeInMs), + QueryMetricsUtils.getDurationFromMetrics(metrics, QueryMetricsConstants.DocumentLoadTimeInMs), + QueryMetricsUtils.getDurationFromMetrics(metrics, QueryMetricsConstants.VMExecutionTimeInMs), + RuntimeExecutionTimes.createFromDelimitedString(delimitedString), + QueryMetricsUtils.getDurationFromMetrics(metrics, QueryMetricsConstants.DocumentWriteTimeInMs), + clientSideMetrics); + } + + @Override + public String toString() { + return toTextString(0); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/QueryMetricsConstants.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/QueryMetricsConstants.java new file mode 100644 index 0000000000000..2fc5ebf913211 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/QueryMetricsConstants.java @@ -0,0 +1,91 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +public final class QueryMetricsConstants { + // QueryMetrics + public static final String RetrievedDocumentCount = "retrievedDocumentCount"; + public static final String RetrievedDocumentSize = "retrievedDocumentSize"; + public static final String OutputDocumentCount = "outputDocumentCount"; + public static final String OutputDocumentSize = "outputDocumentSize"; + public static final String IndexHitRatio = "indexUtilizationRatio"; + public static final String IndexHitDocumentCount = "indexHitDocumentCount"; + public static final String TotalQueryExecutionTimeInMs = "totalExecutionTimeInMs"; + + // QueryPreparationTimes + public static final String QueryCompileTimeInMs = "queryCompileTimeInMs"; + public static final String LogicalPlanBuildTimeInMs = "queryLogicalPlanBuildTimeInMs"; + public static final String PhysicalPlanBuildTimeInMs = "queryPhysicalPlanBuildTimeInMs"; + public static final String QueryOptimizationTimeInMs = "queryOptimizationTimeInMs"; + + // QueryTimes + public static final String IndexLookupTimeInMs = "indexLookupTimeInMs"; + public static final String DocumentLoadTimeInMs = "documentLoadTimeInMs"; + public static final String VMExecutionTimeInMs = "VMExecutionTimeInMs"; + public static final String DocumentWriteTimeInMs = "writeOutputTimeInMs"; + + // RuntimeExecutionTimes + public static final String QueryEngineTimes = "queryEngineTimes"; + public static final String SystemFunctionExecuteTimeInMs = "systemFunctionExecuteTimeInMs"; + public static final String UserDefinedFunctionExecutionTimeInMs = "userFunctionExecuteTimeInMs"; + + // ClientSideMetrics + public static final String Retries = "retries"; + public static final String RequestCharge = "requestCharge"; + + // QueryMetrics Text + public static final String ActivityIds = "Activity Ids"; + public static final String RetrievedDocumentCountText = "Retrieved Document Count"; + public static final String RetrievedDocumentSizeText = "Retrieved Document Size"; + public static final String OutputDocumentCountText = "Output Document Count"; + public static final String OutputDocumentSizeText = "Output Document Size"; + public static final String IndexUtilizationText = "Index Utilization"; + public static final String TotalQueryExecutionTimeText = "Total Query Execution Time"; + + // QueryPreparationTimes Text + public static final String QueryPreparationTimesText = "Query Preparation Times"; + public static final String QueryCompileTimeText = "Query Compilation Time"; + public static final String LogicalPlanBuildTimeText = "Logical Plan Build Time"; + public static final String PhysicalPlanBuildTimeText = "Physical Plan Build Time"; + public static final String QueryOptimizationTimeText = "Query Optimization Time"; + + // QueryTimes Text + public static final String QueryEngineTimesText = "Query Engine Times"; + public static final String IndexLookupTimeText = "Index Lookup Time"; + public static final String DocumentLoadTimeText = "Document Load Time"; + public static final String WriteOutputTimeText = "Document Write Time"; + + // RuntimeExecutionTimes Text + public static final String RuntimeExecutionTimesText = "Runtime Execution Times"; + public static final String TotalExecutionTimeText = "Query Engine Execution Time"; + public static final String SystemFunctionExecuteTimeText = "System Function Execution Time"; + public static final String UserDefinedFunctionExecutionTimeText = "User-defined Function Execution Time"; + + // ClientSideQueryMetrics Text + public static final String ClientSideQueryMetricsText = "Client Side Metrics"; + public static final String RetriesText = "Retry Count"; + public static final String RequestChargeText = "Request Charge"; + public static final String FetchExecutionRangesText = "Partition Execution Timeline"; + public static final String SchedulingMetricsText = "Scheduling Metrics"; +} + diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/QueryMetricsUtils.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/QueryMetricsUtils.java new file mode 100644 index 0000000000000..dc57f3d0ce223 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/QueryMetricsUtils.java @@ -0,0 +1,191 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import org.apache.commons.lang3.StringUtils; + +import java.time.Duration; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; + +public class QueryMetricsUtils { + static final String Indent = StringUtils.SPACE; + private static final int NANOS_TO_MILLIS = 1000000; + private static final String BytesUnitString = "bytes"; + + static HashMap parseDelimitedString(String delimitedString) { + if (delimitedString == null) { + throw new NullPointerException("delimitedString"); + } + + HashMap metrics = new HashMap<>(); + + final int key = 0; + final int value = 1; + String[] headerAttributes = StringUtils.split(delimitedString, ";"); + + for (String attribute : headerAttributes) { + String[] attributeKeyValue = StringUtils.split(attribute, "="); + + if (attributeKeyValue.length != 2) { + throw new NullPointerException("recieved a malformed delimited STRING"); + } + + String attributeKey = attributeKeyValue[key]; + double attributeValue = Double.parseDouble(attributeKeyValue[value]); + metrics.put(attributeKey, attributeValue); + } + + return metrics; + } + + static Duration durationFromMetrics(HashMap metrics, String key) { + // Just attempt to get the metrics + Double durationInMilliseconds = metrics.get(key); + if (durationInMilliseconds == null) { + return Duration.ZERO; + } + + long seconds = (long) (durationInMilliseconds / 1e3); + long nanoseconds = (long) ((durationInMilliseconds - (seconds * 1e3)) * 1e6); + + return Duration.ofSeconds(seconds, nanoseconds); + } + + static Duration getDurationFromMetrics(HashMap metrics, String key) { + double timeSpanInMilliseconds; + Duration timeSpanFromMetrics; + timeSpanInMilliseconds = metrics.get(key); + timeSpanFromMetrics = QueryMetricsUtils.doubleMillisecondsToDuration(timeSpanInMilliseconds); + return timeSpanFromMetrics; + } + + private static Duration doubleMillisecondsToDuration(double timeSpanInMilliseconds) { + long timeInNanoSeconds = (long) (timeSpanInMilliseconds * NANOS_TO_MILLIS); + return Duration.ofNanos(timeInNanoSeconds); + } + + private static void appendToStringBuilder(StringBuilder stringBuilder, String property, String value, + String units, int indentLevel) { + final String FormatString = "%-40s : %15s %-12s %s"; + + stringBuilder.append(String.format( + Locale.ROOT, + FormatString, + StringUtils.repeat(Indent, indentLevel) + property, + value, + units, + System.lineSeparator())); + } + + static void appendBytesToStringBuilder(StringBuilder stringBuilder, String property, long bytes, int indentLevel) { + final String BytesFormatString = "%d"; + + QueryMetricsUtils.appendToStringBuilder( + stringBuilder, + property, + String.format(BytesFormatString, bytes), + BytesUnitString, + indentLevel); + } + + static void appendMillisecondsToStringBuilder(StringBuilder stringBuilder, String property, double milliseconds, + int indentLevel) { + final String MillisecondsFormatString = "%f"; + final String MillisecondsUnitString = "milliseconds"; + + QueryMetricsUtils.appendToStringBuilder(stringBuilder, property, String.format(MillisecondsFormatString, + milliseconds), MillisecondsUnitString, indentLevel); + } + + static void appendNanosecondsToStringBuilder(StringBuilder stringBuilder, String property, double nanoSeconds, + int indentLevel) { + final String MillisecondsFormatString = "%.2f"; + final String MillisecondsUnitString = "milliseconds"; + QueryMetricsUtils.appendToStringBuilder(stringBuilder, property, String.format(MillisecondsFormatString, + nanosToMilliSeconds(nanoSeconds)), MillisecondsUnitString, indentLevel); + } + + static double nanosToMilliSeconds(double nanos) { + return nanos / NANOS_TO_MILLIS; + } + + static void appendHeaderToStringBuilder(StringBuilder stringBuilder, String headerTitle, int indentLevel) { + final String FormatString = "%s %s"; + stringBuilder.append(String.format( + Locale.ROOT, + FormatString, + String.join(StringUtils.repeat(Indent, indentLevel)) + headerTitle, + System.lineSeparator())); + } + + static void appendRUToStringBuilder(StringBuilder stringBuilder, String property, double requestCharge, + int indentLevel) { + final String RequestChargeFormatString = "%s"; + final String RequestChargeUnitString = "RUs"; + + QueryMetricsUtils.appendToStringBuilder( + stringBuilder, + property, + String.format(Locale.ROOT, RequestChargeFormatString, requestCharge), + RequestChargeUnitString, + indentLevel); + } + + static void appendActivityIdsToStringBuilder(StringBuilder stringBuilder, String activityIdsLabel, + List activityIds, int indentLevel) { + stringBuilder.append(activityIdsLabel); + stringBuilder.append(System.lineSeparator()); + for (String activityId : activityIds) { + stringBuilder.append(Indent); + stringBuilder.append(activityId); + stringBuilder.append(System.lineSeparator()); + } + } + + static void appendPercentageToStringBuilder(StringBuilder stringBuilder, String property, double percentage, + int indentLevel) { + final String PercentageFormatString = "%.2f"; + final String PercentageUnitString = "%"; + + QueryMetricsUtils.appendToStringBuilder(stringBuilder, property, String.format(PercentageFormatString, + percentage * 100), PercentageUnitString, indentLevel); + } + + static void appendCountToStringBuilder(StringBuilder stringBuilder, String property, long count, int indentLevel) { + final String CountFormatString = "%s"; + final String CountUnitString = ""; + + QueryMetricsUtils.appendToStringBuilder( + stringBuilder, + property, + String.format(CountFormatString, count), + CountUnitString, + indentLevel); + } + + static void appendNewlineToStringBuilder(StringBuilder stringBuilder) { + QueryMetricsUtils.appendHeaderToStringBuilder(stringBuilder, StringUtils.EMPTY, 0); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/QueryPreparationTimes.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/QueryPreparationTimes.java new file mode 100644 index 0000000000000..e50c3fbd91fdc --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/QueryPreparationTimes.java @@ -0,0 +1,178 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import java.time.Duration; +import java.util.Collection; +import java.util.HashMap; + +public final class QueryPreparationTimes { + + static final QueryPreparationTimes ZERO = new QueryPreparationTimes(Duration.ZERO, Duration.ZERO, Duration.ZERO, Duration.ZERO); + + private final Duration queryCompilationTime; + private final Duration logicalPlanBuildTime; + private final Duration physicalPlanBuildTime; + private final Duration queryOptimizationTime; + + /** + * @param queryCompilationTime + * @param logicalPlanBuildTime + * @param physicalPlanBuildTime + * @param queryOptimizationTime + */ + QueryPreparationTimes(Duration queryCompilationTime, Duration logicalPlanBuildTime, Duration physicalPlanBuildTime, + Duration queryOptimizationTime) { + super(); + + if (queryCompilationTime == null) { + throw new NullPointerException("queryCompilationTime"); + } + + if (logicalPlanBuildTime == null) { + throw new NullPointerException("logicalPlanBuildTime"); + } + + if (physicalPlanBuildTime == null) { + throw new NullPointerException("physicalPlanBuildTime"); + } + + if (queryOptimizationTime == null) { + throw new NullPointerException("queryOptimizationTime"); + } + + this.queryCompilationTime = queryCompilationTime; + this.logicalPlanBuildTime = logicalPlanBuildTime; + this.physicalPlanBuildTime = physicalPlanBuildTime; + this.queryOptimizationTime = queryOptimizationTime; + } + + /** + * @return the queryCompilationTime + */ + public Duration getQueryCompilationTime() { + return queryCompilationTime; + } + + /** + * @return the logicalPlanBuildTime + */ + public Duration getLogicalPlanBuildTime() { + return logicalPlanBuildTime; + } + + /** + * @return the physicalPlanBuildTime + */ + public Duration getPhysicalPlanBuildTime() { + return physicalPlanBuildTime; + } + + /** + * @return the queryOptimizationTime + */ + public Duration getQueryOptimizationTime() { + return queryOptimizationTime; + } + + static QueryPreparationTimes createFromCollection( + Collection queryPreparationTimesCollection) { + if (queryPreparationTimesCollection == null) { + throw new NullPointerException("queryPreparationTimesCollection"); + } + + Duration queryCompilationTime = Duration.ZERO; + Duration logicalPlanBuildTime = Duration.ZERO; + Duration physicalPlanBuildTime = Duration.ZERO; + Duration queryOptimizationTime = Duration.ZERO; + + for (QueryPreparationTimes queryPreparationTimes : queryPreparationTimesCollection) { + if (queryPreparationTimes == null) { + throw new NullPointerException("queryPreparationTimesList can not have a null element"); + } + + queryCompilationTime = queryCompilationTime.plus(queryPreparationTimes.queryCompilationTime); + logicalPlanBuildTime = logicalPlanBuildTime.plus(queryPreparationTimes.logicalPlanBuildTime); + physicalPlanBuildTime = physicalPlanBuildTime.plus(queryPreparationTimes.physicalPlanBuildTime); + queryOptimizationTime = queryOptimizationTime.plus(queryPreparationTimes.queryOptimizationTime); + } + + return new QueryPreparationTimes( + queryCompilationTime, + logicalPlanBuildTime, + physicalPlanBuildTime, + queryOptimizationTime); + } + + static QueryPreparationTimes createFromDelimitedString(String delimitedString) { + HashMap metrics = QueryMetricsUtils.parseDelimitedString(delimitedString); + + return new QueryPreparationTimes( + QueryMetricsUtils.durationFromMetrics(metrics, QueryMetricsConstants.QueryCompileTimeInMs), + QueryMetricsUtils.durationFromMetrics(metrics, QueryMetricsConstants.LogicalPlanBuildTimeInMs), + QueryMetricsUtils.durationFromMetrics(metrics, QueryMetricsConstants.PhysicalPlanBuildTimeInMs), + QueryMetricsUtils.durationFromMetrics(metrics, QueryMetricsConstants.QueryOptimizationTimeInMs)); + } + + String toDelimitedString() { + String formatString = "%s=%.2f;%s=%.2f;%s=%.2f;%s=%.2f"; + return String.format( + formatString, + QueryMetricsConstants.QueryCompileTimeInMs, + this.queryCompilationTime.toMillis(), + QueryMetricsConstants.LogicalPlanBuildTimeInMs, + this.logicalPlanBuildTime.toMillis(), + QueryMetricsConstants.PhysicalPlanBuildTimeInMs, + this.physicalPlanBuildTime.toMillis(), + QueryMetricsConstants.QueryOptimizationTimeInMs, + this.queryOptimizationTime.toMillis()); + } + + String toTextString(int indentLevel) { + if (indentLevel == Integer.MAX_VALUE) { + throw new NumberFormatException("indentLevel input must be less than Integer.MaxValue"); + } + + StringBuilder stringBuilder = new StringBuilder(); + + QueryMetricsUtils.appendHeaderToStringBuilder(stringBuilder, QueryMetricsConstants.QueryPreparationTimesText, + indentLevel); + + QueryMetricsUtils.appendNanosecondsToStringBuilder(stringBuilder, QueryMetricsConstants.QueryCompileTimeText + , this.queryCompilationTime.toNanos(), indentLevel + 1); + + QueryMetricsUtils.appendNanosecondsToStringBuilder(stringBuilder, + QueryMetricsConstants.LogicalPlanBuildTimeText, this.logicalPlanBuildTime.toNanos(), + indentLevel + 1); + + QueryMetricsUtils.appendNanosecondsToStringBuilder(stringBuilder, + QueryMetricsConstants.PhysicalPlanBuildTimeText, this.physicalPlanBuildTime.toNanos(), + indentLevel + 1); + + QueryMetricsUtils.appendNanosecondsToStringBuilder(stringBuilder, + QueryMetricsConstants.QueryOptimizationTimeText, this.queryOptimizationTime.toNanos(), + indentLevel + 1); + return stringBuilder.toString(); + } +} + diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RMResources.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RMResources.java new file mode 100644 index 0000000000000..d8c58e928e9e2 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RMResources.java @@ -0,0 +1,72 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +public class RMResources { + + public static final String UnknownResourceType = "Resource type %s is unknown"; + public static final String InvalidDocumentCollection = "The specified document collection is invalid."; + public static final String StringArgumentNullOrEmpty = "STRING agument %s is null or empty"; + public static final String PartitionKeyAndParitionKeyRangeIdBothSpecified = "Both Partition Key and Partition Key range are Specified in %s"; + public static final String PartitionKeyRangeIdOrPartitionKeyMustBeSpecified = "One of the partition key range id or partition key must be specified"; + public static final String TooFewPartitionKeyComponents = "PartitionKey has fewer components than defined the collection resource."; + public static final String TooManyPartitionKeyComponents = "PartitionKey has more components than defined the collection resource."; + public static final String UnableToDeserializePartitionKeyValue = "Cannot deserialize PartitionKey value '%s"; + public static final String Gone = "The requested resource is no longer available at the server."; + public static final String ExceptionMessageAddIpAddress = "%s, Local IP: %s"; + public static final String ExceptionMessage = "Message: %s"; + public static final String ServiceUnavailable = "Service is currently unavailable, please retry after a while. If this problem persists please contact support."; + public static final String InternalServerError = "Unknown server error occurred when processing this request."; + public static final String InvalidBackendResponse = "The backend response was not in the correct format."; + public static final String PartitionKeyRangeNotFound = "PartitionKeyRange with id %s in collection %s doesn't exist"; + public static final String InvalidTarget = "Target for the request is invalid"; + public static final String InvalidPartitionKey = "Partition key %s is invalid."; + public static final String PartitionKeyMismatch = "Partition key provided either doesn't correspond to definition in the collection or doesn't match partition key field values specified in the document."; + public static final String MissingPartitionKeyValue = "PartitionKey value must be supplied for this operation."; + public static final String InvalidConflictResolutionMode = "INVALID mode '%s' for setting '%s'. MODE expected is '%s'."; + public static final String InvalidRegionsInSessionToken = "Compared session tokens '%s' and '%s' has unexpected regions."; + public static final String InvalidSessionToken = "The session token provided '%s' is invalid."; + public static final String ResourceTokenNotFound = "Resource token not found."; + public static final String Unauthorized = "Unable to authenticate the request. The request requires valid user authentication."; + public static final String Forbidden = "Unable to proceed with the request. Please check the authorization claims to ensure the required permissions to process the request."; + public static final String NotFound = "Entity with the specified id does not exist in the system."; + public static final String BadRequest = "One of the input values is invalid."; + public static final String MethodNotAllowed = "The requested verb is not supported."; + public static final String EntityAlreadyExists = "Entity with the specified id already exists in the system."; + public static final String PreconditionFailed = "Operation cannot be performed because one of the specified precondition is not met."; + public static final String RequestEntityTooLarge = "The size of the response exceeded the maximum allowed size, limit the response size by specifying smaller value for '%s' header."; + public static final String Locked = ""; + public static final String RetryWith = "Retry the request."; + public static final String TooManyRequests = "The request rate is too large. Please retry after sometime."; + public static final String UnexpectedResourceType = "ResourceType %s is unexpected."; + public static final String InvalidHeaderValue = "Value '%s' specified for the header '%s' is invalid."; + public static final String RequestTimeout = "Request timed out."; + public static final String GlobalStrongWriteBarrierNotMet = "Global STRONG write barrier has not been met for the request."; + public static final String InvalidRequestHeaderValue = "INVALID value for request header %s: %s"; + public static final String InvalidResourceAddress = "INVALID address for resource %s: %s"; + public static final String ReadQuorumNotMet = "READ Quorum size of %d is not met for the request."; + public static final String ReadSessionNotAvailable = "The read session is not available for the input session token."; + public static final String InvalidUrl = "InvalidUrl"; + public static final String InvalidResourceUrlQuery = "The value %s specified for query %s is invalid."; + public static final String PartitionKeyRangeIdAbsentInContext = "PartitionKeyRangeId is absent in the context."; +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ReadFeedKeyType.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ReadFeedKeyType.java new file mode 100644 index 0000000000000..7d4ae90623ec2 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ReadFeedKeyType.java @@ -0,0 +1,40 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal; + +/** + * Type of Start and End key for ReadFeedKey + */ +public enum ReadFeedKeyType { + /** + * Use resource name + */ + ResourceId, + + /** + * Use effective partition key + */ + EffectivePartitionKey +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RemoteStorageType.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RemoteStorageType.java new file mode 100644 index 0000000000000..b36fecad18f33 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RemoteStorageType.java @@ -0,0 +1,42 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal; + +public enum RemoteStorageType { + /** + * Use standard storage + */ + NotSpecified, + + /** + * Use standard storage + */ + Standard, + + /** + * Use premium storage + */ + Premium +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RenameCollectionAwareClientRetryPolicy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RenameCollectionAwareClientRetryPolicy.java new file mode 100644 index 0000000000000..86570eb1c4b72 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RenameCollectionAwareClientRetryPolicy.java @@ -0,0 +1,109 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.internal.caches.RxClientCollectionCache; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.time.Duration; + +public class RenameCollectionAwareClientRetryPolicy implements IDocumentClientRetryPolicy { + + private final static Logger logger = LoggerFactory.getLogger(RenameCollectionAwareClientRetryPolicy.class); + + private final IDocumentClientRetryPolicy retryPolicy; + private final ISessionContainer sessionContainer; + private final RxClientCollectionCache collectionCache; + private RxDocumentServiceRequest request; + private boolean hasTriggered = false; + + public RenameCollectionAwareClientRetryPolicy(ISessionContainer sessionContainer, RxClientCollectionCache collectionCache, IDocumentClientRetryPolicy retryPolicy) { + this.retryPolicy = retryPolicy; + this.sessionContainer = sessionContainer; + this.collectionCache = collectionCache; + this.request = null; + } + + @Override + public void onBeforeSendRequest(RxDocumentServiceRequest request) { + this.request = request; + this.retryPolicy.onBeforeSendRequest(request); + } + + @Override + public Mono shouldRetry(Exception e) { + return this.retryPolicy.shouldRetry(e).flatMap(shouldRetryResult -> { + if (!shouldRetryResult.shouldRetry && !this.hasTriggered) { + CosmosClientException clientException = Utils.as(e, CosmosClientException.class); + + if (this.request == null) { + // someone didn't call OnBeforeSendRequest - nothing we can do + logger.error("onBeforeSendRequest is not invoked, encountered failure due to request being null", e); + return Mono.just(ShouldRetryResult.error(e)); + } + + if (clientException != null && this.request.getIsNameBased() && + Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.NOTFOUND) && + Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)) { + // Clear the session token, because the collection name might be reused. + logger.warn("Clear the token for named base request {}", request.getResourceAddress()); + + this.sessionContainer.clearTokenByCollectionFullName(request.getResourceAddress()); + + this.hasTriggered = true; + + String oldCollectionRid = request.requestContext.resolvedCollectionRid; + + request.forceNameCacheRefresh = true; + request.requestContext.resolvedCollectionRid = null; + + Mono collectionObs = this.collectionCache.resolveCollectionAsync(request); + + return collectionObs.flatMap(collectionInfo -> { + if (!StringUtils.isEmpty(oldCollectionRid) && !StringUtils.isEmpty(collectionInfo.resourceId())) { + return Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO)); + } + return Mono.just(shouldRetryResult); + }).switchIfEmpty(Mono.defer(() -> { + logger.warn("Can't recover from session unavailable exception because resolving collection name {} returned null", request.getResourceAddress()); + return Mono.just(shouldRetryResult); + })).onErrorResume(throwable -> { + // When resolveCollectionAsync throws an exception ignore it because it's an attempt to recover an existing + // error. When the recovery fails we return ShouldRetryResult.noRetry and propagate the original exception to the client + + logger.warn("Can't recover from session unavailable exception because resolving collection name {} failed with {}", request.getResourceAddress(), throwable.getMessage()); + if (throwable instanceof Exception) { + return Mono.just(ShouldRetryResult.error((Exception) throwable)); + } + return Mono.error(throwable); + }); + } + } + return Mono.just(shouldRetryResult); + }); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ReplicatedResourceClientUtils.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ReplicatedResourceClientUtils.java new file mode 100644 index 0000000000000..a75c5c83fab7b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ReplicatedResourceClientUtils.java @@ -0,0 +1,63 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class ReplicatedResourceClientUtils { + + public static boolean isReadingFromMaster(ResourceType resourceType, OperationType operationType) { + if (resourceType == ResourceType.Offer || + resourceType == ResourceType.Database || + resourceType == ResourceType.User || + resourceType == ResourceType.UserDefinedType || + resourceType == ResourceType.Permission || + resourceType == ResourceType.Topology || + resourceType == ResourceType.DatabaseAccount || + (resourceType == ResourceType.PartitionKeyRange && operationType != OperationType.GetSplitPoint && operationType != OperationType.AbortSplit) || + (resourceType == ResourceType.DocumentCollection && (operationType == OperationType.ReadFeed || operationType == OperationType.Query || operationType == OperationType.SqlQuery))) + { + return true; + } + + return false; + } + + public static boolean isMasterResource(ResourceType resourceType) { + if (resourceType == ResourceType.Offer || + resourceType == ResourceType.Database || + resourceType == ResourceType.User || + resourceType == ResourceType.UserDefinedType || + resourceType == ResourceType.Permission || + resourceType == ResourceType.Topology || + resourceType == ResourceType.DatabaseAccount || + resourceType == ResourceType.PartitionKeyRange || + resourceType == ResourceType.DocumentCollection) { + return true; + } + + return false; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ReplicationPolicy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ReplicationPolicy.java new file mode 100644 index 0000000000000..0036edba15523 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ReplicationPolicy.java @@ -0,0 +1,70 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.JsonSerializable; + +/** + * Encapsulates the replication policy in the Azure Cosmos DB database service. + */ +public class ReplicationPolicy extends JsonSerializable { + private static final int DEFAULT_MAX_REPLICA_SET_SIZE = 4; + private static final int DEFAULT_MIN_REPLICA_SET_SIZE = 3; + + public ReplicationPolicy() { + } + + /** + * Constructor. + * + * @param jsonString the json string that represents the replication policy. + */ + public ReplicationPolicy(String jsonString) { + super(jsonString); + } + + public int getMaxReplicaSetSize() { + Integer maxReplicaSetSize = super.getInt(Constants.Properties.MAX_REPLICA_SET_SIZE); + if (maxReplicaSetSize == null) { + return DEFAULT_MAX_REPLICA_SET_SIZE; + } + + return maxReplicaSetSize; + } + + public void setMaxReplicaSetSize(int value) { + Integer maxReplicaSetSize = super.getInt(Constants.Properties.MAX_REPLICA_SET_SIZE); + BridgeInternal.setProperty(this, Constants.Properties.MAX_REPLICA_SET_SIZE, value); + } + + public int getMinReplicaSetSize() { + Integer minReplicaSetSize = super.getInt(Constants.Properties.MIN_REPLICA_SET_SIZE); + if (minReplicaSetSize == null) { + return DEFAULT_MIN_REPLICA_SET_SIZE; + } + + return minReplicaSetSize; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RequestChargeTracker.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RequestChargeTracker.java new file mode 100644 index 0000000000000..3621fca0ff29b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RequestChargeTracker.java @@ -0,0 +1,46 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Tracks requests charge in the Azure Cosmos DB database service. + */ +public final class RequestChargeTracker { + private final static int NUMBER_OF_DECIMAL_POINT_TO_RESERVE_FACTOR = 1000; + private final AtomicLong totalRUs = new AtomicLong(); + + public double getTotalRequestCharge() { + return ((double) this.totalRUs.get()) / NUMBER_OF_DECIMAL_POINT_TO_RESERVE_FACTOR; + } + + public void addCharge(double ruUsage) { + this.totalRUs.addAndGet((long) (ruUsage * NUMBER_OF_DECIMAL_POINT_TO_RESERVE_FACTOR)); + } + + public double getAndResetCharge() { + return (double) this.totalRUs.getAndSet(0) / NUMBER_OF_DECIMAL_POINT_TO_RESERVE_FACTOR; + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RequestOptions.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RequestOptions.java new file mode 100644 index 0000000000000..556491686f7df --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RequestOptions.java @@ -0,0 +1,334 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.AccessCondition; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.IndexingDirective; +import com.azure.data.cosmos.PartitionKey; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Encapsulates options that can be specified for a request issued to the Azure Cosmos DB database service. + */ +public class RequestOptions { + private Map customOptions; + private List preTriggerInclude; + private List postTriggerInclude; + private AccessCondition accessCondition; + private IndexingDirective indexingDirective; + private ConsistencyLevel consistencyLevel; + private String sessionToken; + private Integer resourceTokenExpirySeconds; + private String offerType; + private Integer offerThroughput; + private PartitionKey partitionkey; + private String partitionKeyRangeId; + private boolean scriptLoggingEnabled; + private boolean populateQuotaInfo; + private Map properties; + + /** + * Gets the triggers to be invoked before the operation. + * + * @return the triggers to be invoked before the operation. + */ + public List getPreTriggerInclude() { + return this.preTriggerInclude; + } + + /** + * Sets the triggers to be invoked before the operation. + * + * @param preTriggerInclude the triggers to be invoked before the operation. + */ + public void setPreTriggerInclude(List preTriggerInclude) { + this.preTriggerInclude = preTriggerInclude; + } + + /** + * Gets the triggers to be invoked after the operation. + * + * @return the triggers to be invoked after the operation. + */ + public List getPostTriggerInclude() { + return this.postTriggerInclude; + } + + /** + * Sets the triggers to be invoked after the operation. + * + * @param postTriggerInclude the triggers to be invoked after the operation. + */ + public void setPostTriggerInclude(List postTriggerInclude) { + this.postTriggerInclude = postTriggerInclude; + } + + /** + * Gets the conditions associated with the request. + * + * @return the access condition. + */ + public AccessCondition getAccessCondition() { + return this.accessCondition; + } + + /** + * Sets the conditions associated with the request. + * + * @param accessCondition the access condition. + */ + public void setAccessCondition(AccessCondition accessCondition) { + this.accessCondition = accessCondition; + } + + /** + * Gets the indexing directive (index, do not index etc). + * + * @return the indexing directive. + */ + public IndexingDirective getIndexingDirective() { + return this.indexingDirective; + } + + /** + * Sets the indexing directive (index, do not index etc). + * + * @param indexingDirective the indexing directive. + */ + public void setIndexingDirective(IndexingDirective indexingDirective) { + this.indexingDirective = indexingDirective; + } + + /** + * Gets the consistency level required for the request. + * + * @return the consistency level. + */ + public ConsistencyLevel getConsistencyLevel() { + return this.consistencyLevel; + } + + /** + * Sets the consistency level required for the request. + * + * @param consistencyLevel the consistency level. + */ + public void setConsistencyLevel(ConsistencyLevel consistencyLevel) { + this.consistencyLevel = consistencyLevel; + } + + /** + * Gets the token for use with session consistency. + * + * @return the session token. + */ + public String getSessionToken() { + return this.sessionToken; + } + + /** + * Sets the token for use with session consistency. + * + * @param sessionToken the session token. + */ + public void setSessionToken(String sessionToken) { + this.sessionToken = sessionToken; + } + + /** + * Gets the expiry time for resource token. Used when creating, updating, reading permission. + * + * @return the resource token expiry seconds. + */ + public Integer getResourceTokenExpirySeconds() { + return this.resourceTokenExpirySeconds; + } + + /** + * Sets the expiry time for resource token. Used when creating, updating, reading permission. + * + * @param resourceTokenExpirySeconds the resource token expiry seconds. + */ + public void setResourceTokenExpirySeconds(Integer resourceTokenExpirySeconds) { + this.resourceTokenExpirySeconds = resourceTokenExpirySeconds; + } + + /** + * Gets the offer type when creating a document collection. + * + * @return the offer type. + */ + public String getOfferType() { + return this.offerType; + } + + /** + * Sets the offer type when creating a document collection. + * + * @param offerType the offer type. + */ + public void setOfferType(String offerType) { + this.offerType = offerType; + } + + /** + * Gets the throughput in the form of Request Units per second when creating a document collection. + * + * @return the throughput value. + */ + public Integer getOfferThroughput() { + return this.offerThroughput; + } + + /** + * Sets the throughput in the form of Request Units per second when creating a document collection. + * + * @param offerThroughput the throughput value. + */ + public void setOfferThroughput(Integer offerThroughput) { + this.offerThroughput = offerThroughput; + } + + /** + * Gets the partition key used to identify the current request's target partition. + * + * @return the partition key value. + */ + public PartitionKey getPartitionKey() { + return this.partitionkey; + } + + /** + * Sets the partition key used to identify the current request's target partition. + * + * @param partitionkey the partition key value. + */ + public void setPartitionKey(PartitionKey partitionkey) { + this.partitionkey = partitionkey; + } + + /** + * Internal usage only: Gets the partition key range id used to identify the current request's target partition. + * + * @return the partition key range id value. + */ + String getPartitionKeyRangeId() { + return this.partitionKeyRangeId; + } + + /** + * Internal usage only: Sets the partition key range id used to identify the current request's target partition. + * + * @param partitionKeyRangeId the partition key range id value. + */ + protected void setPartitionKeyRengeId(String partitionKeyRangeId) { + this.partitionKeyRangeId = partitionKeyRangeId; + } + + /** + * Gets whether Javascript stored procedure logging is enabled for the current request in the Azure Cosmos DB database + * service or not. + * + * @return true if Javascript stored procedure logging is enabled + */ + public boolean isScriptLoggingEnabled() { + return scriptLoggingEnabled; + } + + /** + * Sets whether Javascript stored procedure logging is enabled for the current request in the Azure Cosmos DB database + * service or not. + * + * @param scriptLoggingEnabled true if stored procedure Javascript logging is enabled + */ + public void setScriptLoggingEnabled(boolean scriptLoggingEnabled) { + this.scriptLoggingEnabled = scriptLoggingEnabled; + } + + /** + * Gets the PopulateQuotaInfo setting for document collection read requests in the Azure Cosmos DB database service. + * PopulateQuotaInfo is used to enable/disable getting document collection quota related stats for document + * collection read requests. + * + * @return true if PopulateQuotaInfo is enabled + */ + public boolean isPopulateQuotaInfo() { + return populateQuotaInfo; + } + + /** + * Sets the PopulateQuotaInfo setting for document collection read requests in the Azure Cosmos DB database service. + * PopulateQuotaInfo is used to enable/disable getting document collection quota related stats for document + * collection read requests. + * + * @param populateQuotaInfo a boolean value indicating whether PopulateQuotaInfo is enabled or not + */ + public void setPopulateQuotaInfo(boolean populateQuotaInfo) { + this.populateQuotaInfo = populateQuotaInfo; + } + + /** + * Sets the custom request option value by key + * + * @param name a string representing the custom option's name + * @param value a STRING representing the custom option's value + */ + public void setHeader(String name, String value) { + if (this.customOptions == null) { + this.customOptions = new HashMap<>(); + } + this.customOptions.put(name, value); + } + + /** + * Gets the custom request options + * + * @return Map of custom request options + */ + public Map getHeaders() { + return this.customOptions; + } + /** + * Gets the properties + * + * @return Map of request options properties + */ + public Map getProperties() { + return properties; + } + + /** + * Sets the properties used to identify the request token. + * + * @param properties the properties. + */ + public void setProperties(Map properties) { + this.properties = properties; + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ResetSessionTokenRetryPolicyFactory.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ResetSessionTokenRetryPolicyFactory.java new file mode 100644 index 0000000000000..8a76dde7f420b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ResetSessionTokenRetryPolicyFactory.java @@ -0,0 +1,43 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.internal.caches.RxClientCollectionCache; + +public class ResetSessionTokenRetryPolicyFactory implements IRetryPolicyFactory { + + private final IRetryPolicyFactory retryPolicy; + private final ISessionContainer sessionContainer; + private final RxClientCollectionCache collectionCache; + + public ResetSessionTokenRetryPolicyFactory(ISessionContainer sessionContainer, RxClientCollectionCache collectionCache, IRetryPolicyFactory retryPolicy) { + this.retryPolicy = retryPolicy; + this.sessionContainer = sessionContainer; + this.collectionCache = collectionCache; + } + + @Override + public IDocumentClientRetryPolicy getRequestPolicy() { + return new RenameCollectionAwareClientRetryPolicy(this.sessionContainer, this.collectionCache, retryPolicy.getRequestPolicy()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ResourceId.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ResourceId.java new file mode 100644 index 0000000000000..deeba0aec6352 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ResourceId.java @@ -0,0 +1,540 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.Pair; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +/** + * Used internally to represents a Resource ID in the Azure Cosmos DB database service. + */ +public class ResourceId { + static final short Length = 20; + static final short OFFER_ID_LENGTH = 3; + static final short MAX_PATH_FRAGMENT = 8; + + private int database; + private int documentCollection; + private long storedProcedure; + private long trigger; + private long userDefinedFunction; + private long conflict; + private long document; + private long partitionKeyRange; + private int user; + private long permission; + private int attachment; + private long offer; + + private ResourceId() { + this.offer = 0; + this.database = 0; + this.documentCollection = 0; + this.storedProcedure = 0; + this.trigger = 0; + this.userDefinedFunction = 0; + this.document = 0; + this.partitionKeyRange = 0; + this.user = 0; + this.conflict = 0; + this.permission = 0; + this.attachment = 0; + } + + public static ResourceId parse(String id) throws IllegalArgumentException { + Pair pair = ResourceId.tryParse(id); + + if (!pair.getKey()) { + throw new IllegalArgumentException(String.format( + "INVALID resource id %s", id)); + } + return pair.getValue(); + } + + public static byte[] parse(ResourceType type, String id) { + if (ResourceId.hasNonHierarchicalResourceId(type)) { + return id.getBytes(StandardCharsets.UTF_8); + } + return ResourceId.parse(id).getValue(); + } + + private static boolean hasNonHierarchicalResourceId(ResourceType type) { + switch (type) { + case MasterPartition: + case ServerPartition: + case RidRange: + return true; + default: + return false; + } + } + + public static ResourceId newDatabaseId(int dbid) { + ResourceId resourceId = new ResourceId(); + resourceId.database = dbid; + return resourceId; + } + + public static ResourceId newDocumentCollectionId(String databaseId, int collectionId) { + ResourceId dbId = ResourceId.parse(databaseId); + + return newDocumentCollectionId(dbId.database, collectionId); + } + + static ResourceId newDocumentCollectionId(int dbId, int collectionId) { + ResourceId collectionResourceId = new ResourceId(); + collectionResourceId.database = dbId; + collectionResourceId.documentCollection = collectionId; + + return collectionResourceId; + } + + public static ResourceId newUserId(String databaseId, int userId) { + ResourceId dbId = ResourceId.parse(databaseId); + + ResourceId userResourceId = new ResourceId(); + userResourceId.database = dbId.database; + userResourceId.user = userId; + + return userResourceId; + } + + public static ResourceId newPermissionId(String userId, long permissionId) { + ResourceId usrId = ResourceId.parse(userId); + + ResourceId permissionResourceId = new ResourceId(); + permissionResourceId.database = usrId.database; + permissionResourceId.user = usrId.user; + permissionResourceId.permission = permissionId; + return permissionResourceId; + } + + public static ResourceId newAttachmentId(String documentId, int attachmentId) { + ResourceId docId = ResourceId.parse(documentId); + + ResourceId attachmentResourceId = new ResourceId(); + attachmentResourceId.database = docId.database; + attachmentResourceId.documentCollection = docId.documentCollection; + attachmentResourceId.document = docId.document; + attachmentResourceId.attachment = attachmentId; + + return attachmentResourceId; + } + + public static Pair tryParse(String id) { + ResourceId rid = null; + + try { + if (StringUtils.isEmpty(id)) + return Pair.of(false, null); + + if (id.length() % 4 != 0) { + // our ResourceId string is always padded + return Pair.of(false, null); + } + + byte[] buffer = null; + + Pair pair = ResourceId.verify(id); + + if (!pair.getKey()) + return Pair.of(false, null); + + buffer = pair.getValue(); + + if (buffer.length % 4 != 0 && buffer.length != ResourceId.OFFER_ID_LENGTH) { + return Pair.of(false, null); + } + + rid = new ResourceId(); + + if (buffer.length == ResourceId.OFFER_ID_LENGTH) { + rid.offer = 0; + for (int index = 0; index < ResourceId.OFFER_ID_LENGTH; index++) + { + rid.offer |= (long)(buffer[index] << (index * 8)); + } + return Pair.of(true, rid); + } + + if (buffer.length >= 4) + rid.database = ByteBuffer.wrap(buffer).getInt(); + + if (buffer.length >= 8) { + byte[] temp = new byte[4]; + ResourceId.blockCopy(buffer, 4, temp, 0, 4); + + boolean isCollection = (temp[0] & (128)) > 0; + + if (isCollection) { + rid.documentCollection = ByteBuffer.wrap(temp).getInt(); + + if (buffer.length >= 16) { + byte[] subCollRes = new byte[8]; + ResourceId.blockCopy(buffer, 8, subCollRes, 0, 8); + + long subCollectionResource = ByteBuffer.wrap(buffer, 8, 8).getLong(); + if ((subCollRes[7] >> 4) == (byte) CollectionChildResourceType.Document) { + rid.document = subCollectionResource; + + if (buffer.length == 20) { + rid.attachment = ByteBuffer.wrap(buffer, 16, 4).getInt(); + } + } else if (Math.abs(subCollRes[7] >> 4) == (byte) CollectionChildResourceType.StoredProcedure) { + rid.storedProcedure = subCollectionResource; + } else if ((subCollRes[7] >> 4) == (byte) CollectionChildResourceType.Trigger) { + rid.trigger = subCollectionResource; + } else if ((subCollRes[7] >> 4) == (byte) CollectionChildResourceType.UserDefinedFunction) { + rid.userDefinedFunction = subCollectionResource; + } else if ((subCollRes[7] >> 4) == (byte) CollectionChildResourceType.Conflict) { + rid.conflict = subCollectionResource; + } else if ((subCollRes[7] >> 4) == (byte) CollectionChildResourceType.PartitionKeyRange) { + rid.partitionKeyRange = subCollectionResource; + } else { + return Pair.of(false, rid); + } + } else if (buffer.length != 8) { + return Pair.of(false, rid); + } + } else { + rid.user = ByteBuffer.wrap(temp).getInt(); + + if (buffer.length == 16) { + rid.permission = ByteBuffer.wrap(buffer, 8, 8).getLong(); + } else if (buffer.length != 8) { + return Pair.of(false, rid); + } + } + } + + return Pair.of(true, rid); + } catch (Exception e) { + return Pair.of(false, null); + } + } + + public static Pair verify(String id) { + if (StringUtils.isEmpty(id)) + throw new IllegalArgumentException("id"); + + byte[] buffer = null; + + try { + buffer = ResourceId.fromBase64String(id); + } catch (Exception e) { + } + + if (buffer == null || buffer.length > ResourceId.Length) { + buffer = null; + return Pair.of(false, buffer); + } + + return Pair.of(true, buffer); + } + + public static boolean verifyBool(String id) { + return verify(id).getKey(); + } + + static byte[] fromBase64String(String s) { + return Utils.Base64Decoder.decode(s.replace('-', '/')); + } + + static String toBase64String(byte[] buffer) { + return ResourceId.toBase64String(buffer, 0, buffer.length); + } + + static String toBase64String(byte[] buffer, int offset, int length) { + byte[] subBuffer = Arrays.copyOfRange(buffer, offset, length); + + return Utils.encodeBase64String(subBuffer).replace('/', '-'); + } + + // Copy the bytes provided with a for loop, faster when there are only a few + // bytes to copy + static void blockCopy(byte[] src, int srcOffset, byte[] dst, int dstOffset, int count) { + int stop = srcOffset + count; + for (int i = srcOffset; i < stop; i++) + dst[dstOffset++] = src[i]; + } + + private static byte[] convertToBytesUsingByteBuffer(int value) { + ByteOrder order = ByteOrder.BIG_ENDIAN; + ByteBuffer buffer = ByteBuffer.allocate(4); + buffer.order(order); + return buffer.putInt(value).array(); + } + + private static byte[] convertToBytesUsingByteBuffer(long value) { + ByteOrder order = ByteOrder.BIG_ENDIAN; + ByteBuffer buffer = ByteBuffer.allocate(8); + buffer.order(order); + return buffer.putLong(value).array(); + } + + public boolean isDatabaseId() { + return this.getDatabase() != 0 && (this.getDocumentCollection() == 0 && this.getUser() == 0); + } + + public int getDatabase() { + return this.database; + } + + public ResourceId getDatabaseId() { + ResourceId rid = new ResourceId(); + rid.database = this.database; + return rid; + } + + public int getDocumentCollection() { + return this.documentCollection; + } + + public ResourceId getDocumentCollectionId() { + ResourceId rid = new ResourceId(); + rid.database = this.database; + rid.documentCollection = this.documentCollection; + return rid; + } + + /** + * Unique (across all databases) Id for the DocumentCollection. + * First 4 bytes are DatabaseId and next 4 bytes are CollectionId. + * + * @return the unique collectionId + */ + public long getUniqueDocumentCollectionId() { + return (long) this.database << 32 | this.documentCollection; + } + + public long getStoredProcedure() { + return this.storedProcedure; + } + + public ResourceId getStoredProcedureId() { + ResourceId rid = new ResourceId(); + rid.database = this.database; + rid.documentCollection = this.documentCollection; + rid.storedProcedure = this.storedProcedure; + return rid; + } + + public long getTrigger() { + return this.trigger; + } + + public ResourceId getTriggerId() { + ResourceId rid = new ResourceId(); + rid.database = this.database; + rid.documentCollection = this.documentCollection; + rid.trigger = this.trigger; + return rid; + } + + public long getUserDefinedFunction() { + return this.userDefinedFunction; + } + + public ResourceId getUserDefinedFunctionId() { + ResourceId rid = new ResourceId(); + rid.database = this.database; + rid.documentCollection = this.documentCollection; + rid.userDefinedFunction = this.userDefinedFunction; + return rid; + } + + public long getConflict() { + return this.conflict; + } + + public ResourceId getConflictId() { + ResourceId rid = new ResourceId(); + rid.database = this.database; + rid.documentCollection = this.documentCollection; + rid.conflict = this.conflict; + return rid; + } + + public long getDocument() { + return this.document; + } + + public ResourceId getDocumentId() { + ResourceId rid = new ResourceId(); + rid.database = this.database; + rid.documentCollection = this.documentCollection; + rid.document = this.document; + return rid; + } + + public long getPartitionKeyRange() { + return this.partitionKeyRange; + } + + public ResourceId getPartitionKeyRangeId() { + ResourceId rid = new ResourceId(); + rid.database = this.database; + rid.documentCollection = this.documentCollection; + rid.partitionKeyRange = this.partitionKeyRange; + return rid; + } + + public int getUser() { + return this.user; + } + + public ResourceId getUserId() { + ResourceId rid = new ResourceId(); + rid.database = this.database; + rid.user = this.user; + return rid; + } + + public long getPermission() { + return this.permission; + } + + public ResourceId getPermissionId() { + ResourceId rid = new ResourceId(); + rid.database = this.database; + rid.user = this.user; + rid.permission = this.permission; + return rid; + } + + public int getAttachment() { + return this.attachment; + } + + public ResourceId getAttachmentId() { + ResourceId rid = new ResourceId(); + rid.database = this.database; + rid.documentCollection = this.documentCollection; + rid.document = this.document; + rid.attachment = this.attachment; + return rid; + } + + public long getOffer() { return this.offer; } + + public ResourceId getOfferId() { + ResourceId rid = new ResourceId(); + rid.offer = this.offer; + return rid; + } + + public byte[] getValue() { + int len = 0; + if (this.offer != 0) + len += ResourceId.OFFER_ID_LENGTH; + else if (this.database != 0) + len += 4; + if (this.documentCollection != 0 || this.user != 0) + len += 4; + if (this.document != 0 || this.permission != 0 + || this.storedProcedure != 0 || this.trigger != 0 + || this.userDefinedFunction != 0 || this.conflict != 0 + || this.partitionKeyRange != 0) + len += 8; + if (this.attachment != 0) + len += 4; + + byte[] val = new byte[len]; + + if (this.offer != 0) + ResourceId.blockCopy(convertToBytesUsingByteBuffer(this.offer), + 0, val, 0, ResourceId.OFFER_ID_LENGTH); + else if (this.database != 0) + ResourceId.blockCopy(convertToBytesUsingByteBuffer(this.database), + 0, val, 0, 4); + + if (this.documentCollection != 0) + ResourceId.blockCopy( + convertToBytesUsingByteBuffer(this.documentCollection), + 0, val, 4, 4); + else if (this.user != 0) + ResourceId.blockCopy(convertToBytesUsingByteBuffer(this.user), + 0, val, 4, 4); + + if (this.storedProcedure != 0) + ResourceId.blockCopy( + convertToBytesUsingByteBuffer(this.storedProcedure), + 0, val, 8, 8); + else if (this.trigger != 0) + ResourceId.blockCopy(convertToBytesUsingByteBuffer(this.trigger), + 0, val, 8, 8); + else if (this.userDefinedFunction != 0) + ResourceId.blockCopy( + convertToBytesUsingByteBuffer(this.userDefinedFunction), + 0, val, 8, 8); + else if (this.conflict != 0) + ResourceId.blockCopy(convertToBytesUsingByteBuffer(this.conflict), + 0, val, 8, 8); + else if (this.document != 0) + ResourceId.blockCopy(convertToBytesUsingByteBuffer(this.document), + 0, val, 8, 8); + else if (this.permission != 0) + ResourceId.blockCopy( + convertToBytesUsingByteBuffer(this.permission), + 0, val, 8, 8); + else if (this.partitionKeyRange != 0) + ResourceId.blockCopy( + convertToBytesUsingByteBuffer(this.partitionKeyRange), + 0, val, 8, 8); + + if (this.attachment != 0) + ResourceId.blockCopy( + convertToBytesUsingByteBuffer(this.attachment), + 0, val, 16, 4); + + return val; + } + + public String toString() { + return ResourceId.toBase64String(this.getValue()); + } + + public boolean equals(ResourceId other) { + if (other == null) { + return false; + } + + return Arrays.equals(this.getValue(), other.getValue()); + } + + // Using a byte however, we only need nibble here. + private static class CollectionChildResourceType { + public static final byte Document = 0x0; + public static final byte StoredProcedure = 0x08; + public static final byte Trigger = 0x07; + public static final byte UserDefinedFunction = 0x06; + public static final byte Conflict = 0x04; + public static final byte PartitionKeyRange = 0x05; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ResourceResponse.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ResourceResponse.java new file mode 100644 index 0000000000000..2c9f2b1684094 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ResourceResponse.java @@ -0,0 +1,445 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.CosmosResponseDiagnostics; +import com.azure.data.cosmos.Resource; +import org.apache.commons.lang3.StringUtils; + +import java.time.Duration; +import java.util.HashMap; +import java.util.Map; + +/** + * Represents the service response to a request made from DocumentClient in the Azure Cosmos DB database service. + * Contains both the resource and the response headers. + * + * @param the resource type of the resource response. + */ +public final class ResourceResponse { + private Class cls; + private T resource; + private RxDocumentServiceResponse response; + private Map usageHeaders; + private Map quotaHeaders; + + public ResourceResponse(RxDocumentServiceResponse response, Class cls) { + this.response = response; + this.usageHeaders = new HashMap(); + this.quotaHeaders = new HashMap(); + this.cls = cls; + this.resource = this.response.getResource(this.cls); + } + + /** + * Max Quota. + * + * @return the database quota. + */ + public long getDatabaseQuota() { + return this.getMaxQuotaHeader(Constants.Quota.DATABASE); + } + + /** + * Current Usage. + * + * @return the current database usage. + */ + public long getDatabaseUsage() { + return this.getCurrentQuotaHeader(Constants.Quota.DATABASE); + } + + /** + * Max Quota. + * + * @return the collection quota. + */ + public long getCollectionQuota() { + return this.getMaxQuotaHeader(Constants.Quota.COLLECTION); + } + + /** + * Current Usage. + * + * @return the current collection usage. + */ + public long getCollectionUsage() { + return this.getCurrentQuotaHeader(Constants.Quota.COLLECTION); + } + + /** + * Max Quota. + * + * @return the user quota. + */ + public long getUserQuota() { + return this.getMaxQuotaHeader(Constants.Quota.USER); + } + + /** + * Current Usage. + * + * @return the current user usage. + */ + public long getUserUsage() { + return this.getCurrentQuotaHeader(Constants.Quota.USER); + } + + /** + * Max Quota. + * + * @return the permission quota. + */ + public long getPermissionQuota() { + return this.getMaxQuotaHeader(Constants.Quota.PERMISSION); + } + + /** + * Current Usage. + * + * @return the current permission usage. + */ + public long getPermissionUsage() { + return this.getCurrentQuotaHeader(Constants.Quota.PERMISSION); + } + + /** + * Max Quota. + * + * @return the collection size quota. + */ + public long getCollectionSizeQuota() { + return this.getMaxQuotaHeader(Constants.Quota.COLLECTION_SIZE); + } + + /** + * Current Usage. + * + * @return the collection size usage. + */ + public long getCollectionSizeUsage() { + return this.getCurrentQuotaHeader(Constants.Quota.COLLECTION_SIZE); + } + + /** + * Max Quota. + * + * @return the document quota. + */ + public long getDocumentQuota() { + return this.getMaxQuotaHeader(Constants.Quota.DOCUMENTS_SIZE); + } + + /** + * Current Usage. + * + * @return the document usage. + */ + public long getDocumentUsage() { + return this.getCurrentQuotaHeader(Constants.Quota.DOCUMENTS_SIZE); + } + + /** + * Max document count quota. + * + * @return the document count quota. + */ + public long getDocumentCountQuota() { + return this.getMaxQuotaHeader(Constants.Quota.DOCUMENTS_COUNT); + } + + /** + * Current document count usage. + * + * @return the document count usage. + */ + public long getDocumentCountUsage() { + return this.getCurrentQuotaHeader(Constants.Quota.DOCUMENTS_COUNT); + } + + /** + * Max Quota. + * + * @return the stored procedures quota. + */ + public long getStoredProceduresQuota() { + return this.getMaxQuotaHeader(Constants.Quota.STORED_PROCEDURE); + } + + /** + * Current Usage. + * + * @return the current stored procedures usage. + */ + public long getStoredProceduresUsage() { + return this.getCurrentQuotaHeader(Constants.Quota.STORED_PROCEDURE); + } + + /** + * Max Quota. + * + * @return the triggers quota. + */ + public long getTriggersQuota() { + return this.getMaxQuotaHeader(Constants.Quota.TRIGGER); + } + + /** + * Current Usage. + * + * @return the current triggers usage. + */ + public long getTriggersUsage() { + return this.getCurrentQuotaHeader(Constants.Quota.TRIGGER); + } + + /** + * Max Quota. + * + * @return the user defined functions quota. + */ + public long getUserDefinedFunctionsQuota() { + return this.getMaxQuotaHeader(Constants.Quota.USER_DEFINED_FUNCTION); + } + + /** + * Current Usage. + * + * @return the current user defined functions usage. + */ + public long getUserDefinedFunctionsUsage() { + return this.getCurrentQuotaHeader( + Constants.Quota.USER_DEFINED_FUNCTION); + } + + /** + * Gets the Activity ID for the request. + * + * @return the activity id. + */ + public String getActivityId() { + return this.response.getResponseHeaders().get(HttpConstants.HttpHeaders.ACTIVITY_ID); + } + + /** + * Gets the token used for managing client's consistency requirements. + * + * @return the session token. + */ + public String getSessionToken() { + return this.response.getResponseHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); + } + + /** + * Gets the HTTP status code associated with the response. + * + * @return the status code. + */ + public int getStatusCode() { + return this.response.getStatusCode(); + } + + /** + * Gets the maximum size limit for this entity (in megabytes (MB) for server resources and in count for master + * resources). + * + * @return the max resource quota. + */ + public String getMaxResourceQuota() { + return this.response.getResponseHeaders().get(HttpConstants.HttpHeaders.MAX_RESOURCE_QUOTA); + } + + /** + * Gets the current size of this entity (in megabytes (MB) for server resources and in count for master resources) + * + * @return the current resource quota usage. + */ + public String getCurrentResourceQuotaUsage() { + return this.response.getResponseHeaders().get(HttpConstants.HttpHeaders.CURRENT_RESOURCE_QUOTA_USAGE); + } + + /** + * Gets the resource for the request. + * + * @return the resource. + */ + public T getResource() { + return this.resource; + } + + /** + * Gets the number of index paths (terms) generated by the operation. + * + * @return the request charge. + */ + public double getRequestCharge() { + String value = this.getResponseHeaders().get(HttpConstants.HttpHeaders.REQUEST_CHARGE); + if (StringUtils.isEmpty(value)) { + return 0; + } + return Double.valueOf(value); + } + + /** + * Gets the headers associated with the response. + * + * @return the response headers. + */ + public Map getResponseHeaders() { + return this.response.getResponseHeaders(); + } + + /** + * Gets the progress of an index transformation, if one is underway. + * + * @return the progress of an index transformation. + */ + public long getIndexTransformationProgress() { + String value = this.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_TRANSFORMATION_PROGRESS); + if (StringUtils.isEmpty(value)) { + return -1; + } + return Long.parseLong(value); + } + + /** + * Gets the progress of lazy indexing. + * + * @return the progress of lazy indexing. + */ + public long getLazyIndexingProgress() { + String value = this.getResponseHeaders().get(HttpConstants.HttpHeaders.LAZY_INDEXING_PROGRESS); + if (StringUtils.isEmpty(value)) { + return -1; + } + return Long.parseLong(value); + } + + /** + * Gets the request diagnostic statistics for the current request to Azure Cosmos DB service. + * + * @return request diagnostic statistics for the current request to Azure Cosmos DB service. + */ + public CosmosResponseDiagnostics getCosmosResponseDiagnostics() { + return this.response.getCosmosResponseRequestDiagnosticStatistics(); + } + + /** + * Gets the end-to-end request latency for the current request to Azure Cosmos DB service. + * + * @return end-to-end request latency for the current request to Azure Cosmos DB service. + */ + public Duration getRequestLatency() { + CosmosResponseDiagnostics cosmosResponseDiagnostics = this.response.getCosmosResponseRequestDiagnosticStatistics(); + if (cosmosResponseDiagnostics == null) { + return Duration.ZERO; + } + + return cosmosResponseDiagnostics.requestLatency(); + } + + /** + * Gets the diagnostics information for the current request to Azure Cosmos DB service. + * + * @return diagnostics information for the current request to Azure Cosmos DB service. + */ + public String getCosmosResponseDiagnosticString() { + CosmosResponseDiagnostics cosmosResponseRequestDiagnosticStatistics = this.response.getCosmosResponseRequestDiagnosticStatistics(); + if (cosmosResponseRequestDiagnosticStatistics == null) { + return StringUtils.EMPTY; + } + return cosmosResponseRequestDiagnosticStatistics.toString(); + } + + long getCurrentQuotaHeader(String headerName) { + if (this.usageHeaders.size() == 0 && + !StringUtils.isEmpty(this.getMaxResourceQuota()) && + !StringUtils.isEmpty(this.getCurrentResourceQuotaUsage())) { + this.populateQuotaHeader(this.getMaxResourceQuota(), this.getCurrentResourceQuotaUsage()); + } + + if (this.usageHeaders.containsKey(headerName)) { + return this.usageHeaders.get(headerName); + } + + return 0; + } + + long getMaxQuotaHeader(String headerName) { + if (this.quotaHeaders.size() == 0 && + !StringUtils.isEmpty(this.getMaxResourceQuota()) && + !this.getCurrentResourceQuotaUsage().isEmpty()) { + this.populateQuotaHeader(this.getMaxResourceQuota(), this.getCurrentResourceQuotaUsage()); + } + + if (this.quotaHeaders.containsKey(headerName)) { + return this.quotaHeaders.get(headerName); + } + + return 0; + } + + private void populateQuotaHeader(String headerMaxQuota, String headerCurrentUsage) { + String[] headerMaxQuotaWords = headerMaxQuota.split(Constants.Quota.DELIMITER_CHARS, -1); + String[] headerCurrentUsageWords = headerCurrentUsage.split(Constants.Quota.DELIMITER_CHARS, -1); + + assert (headerMaxQuotaWords.length == headerCurrentUsageWords.length); + + for (int i = 0; i < headerMaxQuotaWords.length; ++i) { + if (headerMaxQuotaWords[i].equalsIgnoreCase(Constants.Quota.DATABASE)) { + this.quotaHeaders.put(Constants.Quota.DATABASE, Long.valueOf(headerMaxQuotaWords[i + 1])); + this.usageHeaders.put(Constants.Quota.DATABASE, Long.valueOf(headerCurrentUsageWords[i + 1])); + } else if (headerMaxQuotaWords[i].equalsIgnoreCase(Constants.Quota.COLLECTION)) { + this.quotaHeaders.put(Constants.Quota.COLLECTION, Long.valueOf(headerMaxQuotaWords[i + 1])); + this.usageHeaders.put(Constants.Quota.COLLECTION, Long.valueOf(headerCurrentUsageWords[i + 1])); + } else if (headerMaxQuotaWords[i].equalsIgnoreCase(Constants.Quota.USER)) { + this.quotaHeaders.put(Constants.Quota.USER, Long.valueOf(headerMaxQuotaWords[i + 1])); + this.usageHeaders.put(Constants.Quota.USER, Long.valueOf(headerCurrentUsageWords[i + 1])); + } else if (headerMaxQuotaWords[i].equalsIgnoreCase(Constants.Quota.PERMISSION)) { + this.quotaHeaders.put(Constants.Quota.PERMISSION, Long.valueOf(headerMaxQuotaWords[i + 1])); + this.usageHeaders.put(Constants.Quota.PERMISSION, Long.valueOf(headerCurrentUsageWords[i + 1])); + } else if (headerMaxQuotaWords[i].equalsIgnoreCase(Constants.Quota.COLLECTION_SIZE)) { + this.quotaHeaders.put(Constants.Quota.COLLECTION_SIZE, Long.valueOf(headerMaxQuotaWords[i + 1])); + this.usageHeaders.put(Constants.Quota.COLLECTION_SIZE, Long.valueOf(headerCurrentUsageWords[i + 1])); + } else if (headerMaxQuotaWords[i].equalsIgnoreCase(Constants.Quota.DOCUMENTS_SIZE)) { + this.quotaHeaders.put(Constants.Quota.DOCUMENTS_SIZE, Long.valueOf(headerMaxQuotaWords[i + 1])); + this.usageHeaders.put(Constants.Quota.DOCUMENTS_SIZE, Long.valueOf(headerCurrentUsageWords[i + 1])); + } else if (headerMaxQuotaWords[i].equalsIgnoreCase(Constants.Quota.STORED_PROCEDURE)) { + this.quotaHeaders.put(Constants.Quota.STORED_PROCEDURE, Long.valueOf(headerMaxQuotaWords[i + 1])); + this.usageHeaders.put(Constants.Quota.STORED_PROCEDURE, Long.valueOf(headerCurrentUsageWords[i + 1])); + } else if (headerMaxQuotaWords[i].equalsIgnoreCase(Constants.Quota.TRIGGER)) { + this.quotaHeaders.put(Constants.Quota.TRIGGER, Long.valueOf(headerMaxQuotaWords[i + 1])); + this.usageHeaders.put(Constants.Quota.TRIGGER, Long.valueOf(headerCurrentUsageWords[i + 1])); + } else if (headerMaxQuotaWords[i].equalsIgnoreCase(Constants.Quota.USER_DEFINED_FUNCTION)) { + this.quotaHeaders.put(Constants.Quota.USER_DEFINED_FUNCTION, Long.valueOf(headerMaxQuotaWords[i + 1])); + this.usageHeaders.put(Constants.Quota.USER_DEFINED_FUNCTION, + Long.valueOf(headerCurrentUsageWords[i + 1])); + } else if (headerMaxQuotaWords[i].equalsIgnoreCase(Constants.Quota.DOCUMENTS_COUNT)) { + this.quotaHeaders.put(Constants.Quota.DOCUMENTS_COUNT, Long.valueOf(headerMaxQuotaWords[i + 1])); + this.usageHeaders.put(Constants.Quota.DOCUMENTS_COUNT, + Long.valueOf(headerCurrentUsageWords[i + 1])); + } + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ResourceThrottleRetryPolicy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ResourceThrottleRetryPolicy.java new file mode 100644 index 0000000000000..47ae481869fb2 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ResourceThrottleRetryPolicy.java @@ -0,0 +1,133 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.CosmosClientException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.time.Duration; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class ResourceThrottleRetryPolicy implements IDocumentClientRetryPolicy{ + + private final static Logger logger = LoggerFactory.getLogger(ResourceThrottleRetryPolicy.class); + + private final static int DefaultMaxWaitTimeInSeconds = 60; + private final static int DefaultRetryInSeconds = 5; + private final int backoffDelayFactor; + private final int maxAttemptCount; + private final Duration maxWaitTime; + + // TODO: is this thread safe? + // should we make this atomic int? + private int currentAttemptCount; + private Duration cumulativeRetryDelay; + + public ResourceThrottleRetryPolicy(int maxAttemptCount, int maxWaitTimeInSeconds) { + this(maxAttemptCount, maxWaitTimeInSeconds, 1); + } + + public ResourceThrottleRetryPolicy(int maxAttemptCount) { + this(maxAttemptCount, DefaultMaxWaitTimeInSeconds, 1); + } + + public ResourceThrottleRetryPolicy(int maxAttemptCount, int maxWaitTimeInSeconds, int backoffDelayFactor) { + Utils.checkStateOrThrow(maxWaitTimeInSeconds < Integer.MAX_VALUE / 1000, "maxWaitTimeInSeconds", "maxWaitTimeInSeconds must be less than " + Integer.MAX_VALUE / 1000); + + this.maxAttemptCount = maxAttemptCount; + this.backoffDelayFactor = backoffDelayFactor; + this.maxWaitTime = Duration.ofSeconds(maxWaitTimeInSeconds); + this.currentAttemptCount = 0; + this.cumulativeRetryDelay = Duration.ZERO; + } + + @Override + public Mono shouldRetry(Exception exception) { + Duration retryDelay = Duration.ZERO; + + if (this.currentAttemptCount < this.maxAttemptCount && + (retryDelay = checkIfRetryNeeded(exception)) != null) { + this.currentAttemptCount++; + logger.warn( + "Operation will be retried after {} milliseconds. Current attempt {}, Cumulative delay {}", + retryDelay.toMillis(), + this.currentAttemptCount, + this.cumulativeRetryDelay, + exception); + return Mono.just(ShouldRetryResult.retryAfter(retryDelay)); + } else { + logger.debug( + "Operation will NOT be retried. Current attempt {}", + this.currentAttemptCount, + exception); + return Mono.just(ShouldRetryResult.noRetry()); + } + } + + @Override + public void onBeforeSendRequest(RxDocumentServiceRequest request) { + // no op + } + + // if retry not needed reaturns null + /// + /// Returns True if the given exception is retriable + /// + /// Exception to examine + /// retryDelay + /// True if the exception is retriable; False otherwise + private Duration checkIfRetryNeeded(Exception exception) { + Duration retryDelay = Duration.ZERO; + + CosmosClientException dce = Utils.as(exception, CosmosClientException.class); + + if (dce != null){ + + if (Exceptions.isStatusCode(dce, HttpConstants.StatusCodes.TOO_MANY_REQUESTS)) { + retryDelay = Duration.ofMillis(dce.retryAfterInMilliseconds()); + if (this.backoffDelayFactor > 1) { + retryDelay = Duration.ofNanos(retryDelay.toNanos() * this.backoffDelayFactor); + } + + if (retryDelay.toMillis() < this.maxWaitTime.toMillis() && + this.maxWaitTime.toMillis() >= (this.cumulativeRetryDelay = retryDelay.plus(this.cumulativeRetryDelay)).toMillis()) + { + if (retryDelay == Duration.ZERO){ + // we should never reach here as BE should turn non-zero of retryDelay + logger.trace("Received retryDelay of 0 with Http 429", exception); + retryDelay = Duration.ofSeconds(DefaultRetryInSeconds); + } + + return retryDelay; + } + } + } + // if retry not needed returns null + return null; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ResourceTokenAuthorizationHelper.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ResourceTokenAuthorizationHelper.java new file mode 100644 index 0000000000000..4a37d5b7574fa --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ResourceTokenAuthorizationHelper.java @@ -0,0 +1,212 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.internal.routing.PartitionKeyAndResourceTokenPair; +import com.azure.data.cosmos.internal.routing.PartitionKeyInternal; +import org.apache.commons.lang3.tuple.Pair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.Map; + +/** + * This class is used internally and act as a helper in authorization of + * resources from permission feed and its supporting method. + * + */ +public class ResourceTokenAuthorizationHelper { + + private static final Logger logger = LoggerFactory.getLogger(ResourceTokenAuthorizationHelper.class); + + /** + * This method help to differentiate between master key and resource token + * + * @param token + * ResourceToken provide + * @return Whether given token is resource token or not + */ + public static boolean isResourceToken(String token) { + int typeSeparatorPosition = token.indexOf('&'); + if (typeSeparatorPosition == -1) { + return false; + } + String authType = token.substring(0, typeSeparatorPosition); + int typeKeyValueSepartorPosition = authType.indexOf('='); + if (typeKeyValueSepartorPosition == -1 || !authType.substring(0, typeKeyValueSepartorPosition) + .equalsIgnoreCase(Constants.Properties.AUTH_SCHEMA_TYPE)) { + return false; + } + + String authTypeValue = authType.substring(typeKeyValueSepartorPosition + 1); + + return authTypeValue.equalsIgnoreCase(Constants.Properties.RESOURCE_TOKEN); + } + + /** + * Private method which will fetch resource token based on partition key and + * resource address . + * + * @param resourceTokensMap + * @param resourceAddress + * @param partitionKey + * @return + */ + private static String getResourceToken(Map> resourceTokensMap, + String resourceAddress, + PartitionKeyInternal partitionKey) { + List partitionKeyAndResourceTokenPairs = resourceTokensMap + .get(resourceAddress); + if (partitionKeyAndResourceTokenPairs != null) { + for (PartitionKeyAndResourceTokenPair pair : partitionKeyAndResourceTokenPairs) { + if (pair.getPartitionKey().contains(partitionKey) || partitionKey.equals(PartitionKeyInternal.Empty)) { + return pair.getResourceToken(); + } + } + } + + return null; + } + + /** + * This method will try to fetch the resource token to access the resource . + * + * @param resourceTokensMap + * It contains the resource link and its partition key and resource + * token list . + * @param headers + * Header information of the request . + * @param resourceAddress + * Resource full name or ID . + * @param requestVerb + * The verb . + */ + public static String getAuthorizationTokenUsingResourceTokens( + Map> resourceTokensMap, + String requestVerb, + String resourceAddress, + Map headers) { + PartitionKeyInternal partitionKey = PartitionKeyInternal.Empty; + String partitionKeyString = headers.get(HttpConstants.HttpHeaders.PARTITION_KEY); + if (partitionKeyString != null) { + partitionKey = PartitionKeyInternal.fromJsonString(partitionKeyString); + } + + if (PathsHelper.isNameBased(resourceAddress)) { + String resourceToken = null; + for (int index = 2; index < ResourceId.MAX_PATH_FRAGMENT; index = index + 2) { + String resourceParent = PathsHelper.getParentByIndex(resourceAddress, index); + if (resourceParent == null) + break; + resourceToken = getResourceToken(resourceTokensMap, resourceParent, partitionKey); + if (resourceToken != null) + break; + } + + // Get or Head for collection can be done with any child token + if (resourceToken == null && PathsHelper.getCollectionPath(resourceAddress).equalsIgnoreCase(resourceAddress) + && HttpConstants.HttpMethods.GET.equalsIgnoreCase(requestVerb) + || HttpConstants.HttpMethods.HEAD.equalsIgnoreCase(requestVerb)) { + String resourceAddressWithSlash = resourceAddress.endsWith(Constants.Properties.PATH_SEPARATOR) + ? resourceAddress + : resourceAddress + Constants.Properties.PATH_SEPARATOR; + for (String key : resourceTokensMap.keySet()) { + if (key.startsWith(resourceAddressWithSlash)) { + if (resourceTokensMap.get(key) != null && resourceTokensMap.get(key).size() > 0) + resourceToken = resourceTokensMap.get(key).get(0).getResourceToken(); + break; + } + } + } + + if (resourceToken == null) { + throw new IllegalArgumentException(RMResources.ResourceTokenNotFound); + } + + logger.debug("returned token for resourceAddress [{}] = [{}]", + resourceAddress, resourceToken); + return resourceToken; + } else { + String resourceToken = null; + ResourceId resourceId = ResourceId.parse(resourceAddress); + if (resourceId.getAttachment() != 0 || resourceId.getPermission() != 0 + || resourceId.getStoredProcedure() != 0 || resourceId.getTrigger() != 0 + || resourceId.getUserDefinedFunction() != 0) { + // Use the leaf ID - attachment/permission/sproc/trigger/udf + resourceToken = getResourceToken(resourceTokensMap, resourceAddress, partitionKey); + } + + if (resourceToken == null && (resourceId.getAttachment() != 0 || resourceId.getDocument() != 0)) { + // Use DocumentID for attachment/document + resourceToken = getResourceToken(resourceTokensMap, resourceId.getDocumentId().toString(), + partitionKey); + } + + if (resourceToken == null && (resourceId.getAttachment() != 0 || resourceId.getDocument() != 0 + || resourceId.getStoredProcedure() != 0 || resourceId.getTrigger() != 0 + || resourceId.getUserDefinedFunction() != 0 || resourceId.getDocumentCollection() != 0)) { + // Use CollectionID for attachment/document/sproc/trigger/udf/collection + resourceToken = getResourceToken(resourceTokensMap, resourceId.getDocumentCollectionId().toString(), + partitionKey); + } + + if (resourceToken == null && (resourceId.getPermission() != 0 || resourceId.getUser() != 0)) { + // Use UserID for permission/user + resourceToken = getResourceToken(resourceTokensMap, resourceId.getUserId().toString(), partitionKey); + } + + if (resourceToken == null) { + // Use DatabaseId if all else fail + resourceToken = getResourceToken(resourceTokensMap, resourceId.getDatabaseId().toString(), + partitionKey); + } + // Get or Head for collection can be done with any child token + if (resourceToken == null && resourceId.getDocumentCollection() != 0 + && (HttpConstants.HttpMethods.GET.equalsIgnoreCase(requestVerb) + || HttpConstants.HttpMethods.HEAD.equalsIgnoreCase(requestVerb))) { + for (String key : resourceTokensMap.keySet()) { + ResourceId tokenRid; + Pair pair = ResourceId.tryParse(key); + ResourceId test1= pair.getRight().getDocumentCollectionId(); + boolean test = test1.equals(resourceId); + if (!PathsHelper.isNameBased(key) && pair.getLeft() + && pair.getRight().getDocumentCollectionId().equals(resourceId)) { + if (resourceTokensMap.get(key) != null && resourceTokensMap.get(key).size() > 0) { + resourceToken = resourceTokensMap.get(key).get(0).getResourceToken(); + } + } + } + + } + + if (resourceToken == null) { + throw new IllegalArgumentException(RMResources.ResourceTokenNotFound); + } + + logger.debug("returned token for resourceAddress [{}] = [{}]", + resourceAddress, resourceToken); + return resourceToken; + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ResourceType.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ResourceType.java new file mode 100644 index 0000000000000..fb0a92940e91d --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/ResourceType.java @@ -0,0 +1,124 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +/** + * Resource types in the Azure Cosmos DB database service. + */ +public enum ResourceType { + + // REQUIRED: This enum must be kept in sync with the ResourceType enum in backend native + + Unknown(-1), + Attachment(3), + BatchApply(112), + DocumentCollection(1), + ComputeGatewayCharges(131), + Conflict(107), + Database(0), + DatabaseAccount(118), + Document(2), + Index(104), + IndexBookmark(105), + IndexSize(106), + LargeInvalid(100), + LogStoreLogs(126), + MasterPartition(120), + Module(9), + ModuleCommand(103), + Offer(113), + PartitionKeyRange(125), + PartitionSetInformation(114), + Permission(5), + PreviousImage(128), + Progress(6), + Record(108), + Replica(7), + RestoreMetadata(127), + RidRange(130), + Schema(124), + SchemaContainer(123), + ServerPartition(121), + SmallMaxInvalid(10), + StoredProcedure(109), + Timestamp(117), + Tombstone(8), + Topology(122), + Trigger(110), + User(4), + UserDefinedFunction(111), + UserDefinedType(133), + VectorClock(129), + XPReplicatorAddress(115), + + // These names make it unclear what they map to in ResourceType. + Address(-5), + Key(-2), + Media(-3), + ServiceFabricService(-4); + + final private int value; + + ResourceType(int value) { + this.value = value; + } + + public int value() { + return this.value; + } + + public boolean isCollectionChild() { + return this == ResourceType.Document || + this == ResourceType.Attachment || + this == ResourceType.Conflict || + this == ResourceType.Schema || + this.isScript(); + } + + public boolean isMasterResource() { + return this == ResourceType.Offer || + this == ResourceType.Database || + this == ResourceType.User || + this == ResourceType.Permission || + this == ResourceType.Topology || + this == ResourceType.PartitionKeyRange || + this == ResourceType.DocumentCollection; + } + + /// + /// Resources for which this method returns true, are spread between multiple partitions + /// + public boolean isPartitioned() { + return this == ResourceType.Document || + this == ResourceType.Attachment || + this == ResourceType.Conflict; + } + + public boolean isScript() { + return this == ResourceType.UserDefinedFunction || + this == ResourceType.Trigger || + this == ResourceType.StoredProcedure; + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RetryPolicy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RetryPolicy.java new file mode 100644 index 0000000000000..713e9068eeda9 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RetryPolicy.java @@ -0,0 +1,53 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.RetryOptions; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + * + * Represents the retry policy configuration associated with a DocumentClient instance. + */ +public class RetryPolicy implements IRetryPolicyFactory { + private final GlobalEndpointManager globalEndpointManager; + private final boolean enableEndpointDiscovery; + private final RetryOptions retryOptions; + + public RetryPolicy(GlobalEndpointManager globalEndpointManager, ConnectionPolicy connectionPolicy) { + this.enableEndpointDiscovery = connectionPolicy.enableEndpointDiscovery(); + this.globalEndpointManager = globalEndpointManager; + this.retryOptions = connectionPolicy.retryOptions(); + } + + @Override + public IDocumentClientRetryPolicy getRequestPolicy() { + ClientRetryPolicy clientRetryPolicy = new ClientRetryPolicy(this.globalEndpointManager, + this.enableEndpointDiscovery, this.retryOptions); + + return clientRetryPolicy; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RetryUtils.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RetryUtils.java new file mode 100644 index 0000000000000..4e50ed2a6f5dc --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RetryUtils.java @@ -0,0 +1,142 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import org.apache.commons.lang3.time.StopWatch; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.time.Duration; +import java.util.function.Function; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class RetryUtils { + private final static Logger logger = LoggerFactory.getLogger(BackoffRetryUtility.class); + + static Function, Flux> toRetryWhenFunc(IRetryPolicy policy) { + return throwableFlux -> throwableFlux.flatMap(t -> { + Exception e = Utils.as(t, Exception.class); + if (e == null) { + return Flux.error(t); + } + Flux shouldRetryResultFlux = policy.shouldRetry(e).flux(); + return shouldRetryResultFlux.flatMap(s -> { + + if (s.backOffTime != null) { + return Mono.delay(Duration.ofMillis(s.backOffTime.toMillis())).flux(); + } else if (s.exception != null) { + return Flux.error(s.exception); + } else { + // NoRetry return original failure + return Flux.error(t); + } + }); + }); + } + + /** + * This method will be called after getting error on callbackMethod , and then keep trying between + * callbackMethod and inBackoffAlternateCallbackMethod until success or as stated in + * retry policy. + * @param callbackMethod The callbackMethod + * @param retryPolicy Retry policy + * @param inBackoffAlternateCallbackMethod The inBackoffAlternateCallbackMethod + * @param minBackoffForInBackoffCallback Minimum backoff for InBackoffCallbackMethod + * @return + */ + + public static Function> toRetryWithAlternateFunc(Function, Mono> callbackMethod, IRetryPolicy retryPolicy, Function, Mono> inBackoffAlternateCallbackMethod, Duration minBackoffForInBackoffCallback) { + return throwable -> { + Exception e = Utils.as(throwable, Exception.class); + if (e == null) { + return Mono.error(throwable); + } + + Flux shouldRetryResultFlux = retryPolicy.shouldRetry(e).flux(); + return shouldRetryResultFlux.flatMap(shouldRetryResult -> { + if (!shouldRetryResult.shouldRetry) { + if(shouldRetryResult.exception == null) { + return Mono.error(e); + } else { + return Mono.error(shouldRetryResult.exception); + } + } + + if (inBackoffAlternateCallbackMethod != null + && shouldRetryResult.backOffTime.compareTo(minBackoffForInBackoffCallback) > 0) { + StopWatch stopwatch = new StopWatch(); + startStopWatch(stopwatch); + return inBackoffAlternateCallbackMethod.apply(shouldRetryResult.policyArg) + .onErrorResume(recurrsiveWithAlternateFunc(callbackMethod, retryPolicy, + inBackoffAlternateCallbackMethod, shouldRetryResult, stopwatch, + minBackoffForInBackoffCallback)); + } else { + return recurrsiveFunc(callbackMethod, retryPolicy, inBackoffAlternateCallbackMethod, + shouldRetryResult, minBackoffForInBackoffCallback) + .delaySubscription(Duration.ofMillis(shouldRetryResult.backOffTime.toMillis())); + } + }).single(); + }; + } + + private static Mono recurrsiveFunc(Function, Mono> callbackMethod, IRetryPolicy retryPolicy, Function, Mono> inBackoffAlternateCallbackMethod, IRetryPolicy.ShouldRetryResult shouldRetryResult, Duration minBackoffForInBackoffCallback) { + return callbackMethod.apply(shouldRetryResult.policyArg).onErrorResume(toRetryWithAlternateFunc( + callbackMethod, retryPolicy, inBackoffAlternateCallbackMethod, minBackoffForInBackoffCallback)); + } + + private static Function> recurrsiveWithAlternateFunc(Function, Mono> callbackMethod, IRetryPolicy retryPolicy, Function, Mono> inBackoffAlternateCallbackMethod, IRetryPolicy.ShouldRetryResult shouldRetryResult, StopWatch stopwatch, Duration minBackoffForInBackoffCallback) { + return throwable -> { + Exception e = Utils.as(throwable, Exception.class); + if (e == null) { + return Mono.error(throwable); + } + + stopStopWatch(stopwatch); + logger.info("Failed inBackoffAlternateCallback with {}, proceeding with retry. Time taken: {}ms", + e.toString(), stopwatch.getTime()); + Duration backoffTime = shouldRetryResult.backOffTime.toMillis() > stopwatch.getTime() + ? Duration.ofMillis(shouldRetryResult.backOffTime.toMillis() - stopwatch.getTime()) + : Duration.ZERO; + return recurrsiveFunc(callbackMethod, retryPolicy, inBackoffAlternateCallbackMethod, shouldRetryResult, + minBackoffForInBackoffCallback) + .delaySubscription(Flux.just(0L).delayElements(Duration.ofMillis(backoffTime.toMillis()))); + }; + } + + private static void stopStopWatch(StopWatch stopwatch) { + synchronized (stopwatch) { + stopwatch.stop(); + } + } + + private static void startStopWatch(StopWatch stopwatch) { + synchronized (stopwatch) { + stopwatch.start(); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RuntimeConstants.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RuntimeConstants.java new file mode 100644 index 0000000000000..8d68ec3abec07 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RuntimeConstants.java @@ -0,0 +1,77 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +/** + * Used internally. Runtime constants in the Azure Cosmos DB database service Java SDK. + */ +public class RuntimeConstants { + public static class MediaTypes { + // http://www.iana.org/assignments/media-types/media-types.xhtml + public static final String ANY = "*/*"; + public static final String IMAGE_JPEG = "image/jpeg"; + public static final String IMAGE_PNG = "image/png"; + public static final String JAVA_SCRIPT = "application/x-javascript"; + public static final String JSON = "application/json"; + public static final String OCTET_STREAM = "application/octet-stream"; + public static final String QUERY_JSON = "application/query+json"; + public static final String SQL = "application/sql"; + public static final String TEXT_HTML = "text/html"; + public static final String TEXT_PLAIN = "text/plain"; + public static final String XML = "application/xml"; + } + + public static class ProtocolScheme { + public static final String HTTPS = "https"; + public static final String TCP = "rntbd"; + } + + static class Separators { + static final char[] Url = new char[] {'/'}; + static final char[] Quote = new char[] {'\''}; + static final char[] DomainId = new char[] {'-'}; + static final char[] Query = new char[] {'?', '&', '='}; + static final char[] Parenthesis = new char[] {'(', ')'}; + static final char[] UserAgentHeader = new char[] {'(', ')', ';', ','}; + + + //Note that the accept header separator here is ideally comma. Semicolon is used for separators within individual + //header for now cloud moe does not recognize such accept header hence we allow both semicolon or comma separated + //accept header + static final char[] Header = new char[] {';', ','}; + static final char[] CookieSeparator = new char[] {';'}; + static final char[] CookieValueSeparator = new char[] {'='}; + static final char[] PPMUserToken = new char[] {':'}; + static final char[] Identifier = new char[] {'-'}; + static final char[] Host = new char[] {'.'}; + static final char[] Version = new char[] {','}; + static final char[] Pair = new char[] {';'}; + static final char[] ETag = new char[] {'#'}; + static final char[] MemberQuery = new char[] {'+'}; + + static final String HeaderEncodingBegin = "=?"; + static final String HeaderEncodingEnd = "?="; + static final String HeaderEncodingSeparator = "?"; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RuntimeExecutionTimes.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RuntimeExecutionTimes.java new file mode 100644 index 0000000000000..3d5153cc53772 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RuntimeExecutionTimes.java @@ -0,0 +1,159 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import java.time.Duration; +import java.util.Collection; +import java.util.HashMap; + +/** + * Query runtime execution times in the Azure Cosmos DB service. + */ +public final class RuntimeExecutionTimes { + + static final RuntimeExecutionTimes ZERO = new RuntimeExecutionTimes(Duration.ZERO, Duration.ZERO, Duration.ZERO); + + private final Duration queryEngineExecutionTime; + private final Duration systemFunctionExecutionTime; + private final Duration userDefinedFunctionExecutionTime; + + /** + * @param queryEngineExecutionTime + * @param systemFunctionExecutionTime + * @param userDefinedFunctionExecutionTime + */ + RuntimeExecutionTimes(Duration queryEngineExecutionTime, Duration systemFunctionExecutionTime, + Duration userDefinedFunctionExecutionTime) { + super(); + + if (queryEngineExecutionTime == null) { + throw new NullPointerException("queryEngineExecutionTime"); + } + + if (systemFunctionExecutionTime == null) { + throw new NullPointerException("systemFunctionExecutionTime"); + } + + if (userDefinedFunctionExecutionTime == null) { + throw new NullPointerException("userDefinedFunctionExecutionTime"); + } + + this.queryEngineExecutionTime = queryEngineExecutionTime; + this.systemFunctionExecutionTime = systemFunctionExecutionTime; + this.userDefinedFunctionExecutionTime = userDefinedFunctionExecutionTime; + } + + /** + * @return the queryEngineExecutionTime + */ + public Duration getQueryEngineExecutionTime() { + return queryEngineExecutionTime; + } + + /** + * @return the systemFunctionExecutionTime + */ + public Duration getSystemFunctionExecutionTime() { + return systemFunctionExecutionTime; + } + + /** + * @return the userDefinedFunctionExecutionTime + */ + public Duration getUserDefinedFunctionExecutionTime() { + return userDefinedFunctionExecutionTime; + } + + static RuntimeExecutionTimes createFromCollection( + Collection runtimeExecutionTimesCollection) { + if (runtimeExecutionTimesCollection == null) { + throw new NullPointerException("runtimeExecutionTimesCollection"); + } + + Duration queryEngineExecutionTime = Duration.ZERO; + Duration systemFunctionExecutionTime = Duration.ZERO; + Duration userDefinedFunctionExecutionTime = Duration.ZERO; + + for (RuntimeExecutionTimes runtimeExecutionTime : runtimeExecutionTimesCollection) { + queryEngineExecutionTime = queryEngineExecutionTime.plus(runtimeExecutionTime.queryEngineExecutionTime); + systemFunctionExecutionTime = systemFunctionExecutionTime.plus(runtimeExecutionTime.systemFunctionExecutionTime); + userDefinedFunctionExecutionTime = userDefinedFunctionExecutionTime.plus(runtimeExecutionTime.userDefinedFunctionExecutionTime); + } + + return new RuntimeExecutionTimes( + queryEngineExecutionTime, + systemFunctionExecutionTime, + userDefinedFunctionExecutionTime); + } + + static RuntimeExecutionTimes createFromDelimitedString(String delimitedString) { + HashMap metrics = QueryMetricsUtils.parseDelimitedString(delimitedString); + + Duration vmExecutionTime = QueryMetricsUtils.durationFromMetrics(metrics, QueryMetricsConstants.VMExecutionTimeInMs); + Duration indexLookupTime = QueryMetricsUtils.durationFromMetrics(metrics, QueryMetricsConstants.IndexLookupTimeInMs); + Duration documentLoadTime = QueryMetricsUtils.durationFromMetrics(metrics, QueryMetricsConstants.DocumentLoadTimeInMs); + Duration documentWriteTime = QueryMetricsUtils.durationFromMetrics(metrics, QueryMetricsConstants.DocumentWriteTimeInMs); + + return new RuntimeExecutionTimes( + vmExecutionTime.minus(indexLookupTime).minus(documentLoadTime).minus(documentWriteTime), + QueryMetricsUtils.durationFromMetrics(metrics, QueryMetricsConstants.SystemFunctionExecuteTimeInMs), + QueryMetricsUtils.durationFromMetrics(metrics, QueryMetricsConstants.UserDefinedFunctionExecutionTimeInMs)); + } + + String toDelimitedString() { + String formatString = "%s=%2f;%s=%2f"; + + // queryEngineExecutionTime is not emitted, since it is calculated as + // vmExecutionTime - indexLookupTime - documentLoadTime - documentWriteTime + return String.format( + formatString, + QueryMetricsConstants.SystemFunctionExecuteTimeInMs, + this.systemFunctionExecutionTime.toMillis(), + QueryMetricsConstants.UserDefinedFunctionExecutionTimeInMs, + this.userDefinedFunctionExecutionTime.toMillis()); + } + + String toTextString(int indentLevel) { + if (indentLevel == Integer.MAX_VALUE) { + throw new NumberFormatException("indentLevel input must be less than Int32.MaxValue"); + } + StringBuilder stringBuilder = new StringBuilder(); + + QueryMetricsUtils.appendHeaderToStringBuilder(stringBuilder, QueryMetricsConstants.RuntimeExecutionTimesText, + indentLevel); + + QueryMetricsUtils.appendNanosecondsToStringBuilder(stringBuilder, + QueryMetricsConstants.TotalExecutionTimeText, this.queryEngineExecutionTime.toNanos(), + indentLevel + 1); + + QueryMetricsUtils.appendNanosecondsToStringBuilder(stringBuilder, + QueryMetricsConstants.SystemFunctionExecuteTimeText, + this.systemFunctionExecutionTime.toNanos(), indentLevel + 1); + + QueryMetricsUtils.appendNanosecondsToStringBuilder(stringBuilder, + QueryMetricsConstants.UserDefinedFunctionExecutionTimeText, + this.userDefinedFunctionExecutionTime.toNanos(), indentLevel + 1); + + return stringBuilder.toString(); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RxDocumentClientImpl.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RxDocumentClientImpl.java new file mode 100644 index 0000000000000..a8445b91fd5bb --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RxDocumentClientImpl.java @@ -0,0 +1,2778 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.AccessConditionType; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ChangeFeedOptions; +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosResourceType; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.JsonSerializable; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.SqlQuerySpec; +import com.azure.data.cosmos.TokenResolver; +import com.azure.data.cosmos.internal.caches.RxClientCollectionCache; +import com.azure.data.cosmos.internal.caches.RxCollectionCache; +import com.azure.data.cosmos.internal.caches.RxPartitionKeyRangeCache; +import com.azure.data.cosmos.internal.directconnectivity.GatewayServiceConfigurationReader; +import com.azure.data.cosmos.internal.directconnectivity.GlobalAddressResolver; +import com.azure.data.cosmos.internal.directconnectivity.ServerStoreModel; +import com.azure.data.cosmos.internal.directconnectivity.StoreClient; +import com.azure.data.cosmos.internal.directconnectivity.StoreClientFactory; +import com.azure.data.cosmos.internal.http.HttpClient; +import com.azure.data.cosmos.internal.http.HttpClientConfig; +import com.azure.data.cosmos.internal.query.DocumentQueryExecutionContextFactory; +import com.azure.data.cosmos.internal.query.IDocumentQueryClient; +import com.azure.data.cosmos.internal.query.IDocumentQueryExecutionContext; +import com.azure.data.cosmos.internal.query.Paginator; +import com.azure.data.cosmos.internal.routing.PartitionKeyAndResourceTokenPair; +import com.azure.data.cosmos.internal.routing.PartitionKeyInternal; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URLEncoder; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.function.BiFunction; +import java.util.function.Function; + +import static com.azure.data.cosmos.BridgeInternal.documentFromObject; +import static com.azure.data.cosmos.BridgeInternal.getAltLink; +import static com.azure.data.cosmos.BridgeInternal.toDatabaseAccount; +import static com.azure.data.cosmos.BridgeInternal.toFeedResponsePage; +import static com.azure.data.cosmos.BridgeInternal.toResourceResponse; +import static com.azure.data.cosmos.BridgeInternal.toStoredProcedureResponse; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider { + private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); + private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); + private final String masterKeyOrResourceToken; + private final URI serviceEndpoint; + private final ConnectionPolicy connectionPolicy; + private final ConsistencyLevel consistencyLevel; + private final BaseAuthorizationTokenProvider authorizationTokenProvider; + private final UserAgentContainer userAgentContainer; + private final boolean hasAuthKeyResourceToken; + private final Configs configs; + private TokenResolver tokenResolver; + private SessionContainer sessionContainer; + private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; + private RxClientCollectionCache collectionCache; + private RxStoreModel gatewayProxy; + private RxStoreModel storeModel; + private GlobalAddressResolver addressResolver; + private RxPartitionKeyRangeCache partitionKeyRangeCache; + private Map> resourceTokensMap; + + // RetryPolicy retries a request when it encounters session unavailable (see ClientRetryPolicy). + // Once it exhausts all write regions it clears the session container, then it uses RxClientCollectionCache + // to resolves the request's collection name. If it differs from the session container's resource id it + // explains the session unavailable exception: somebody removed and recreated the collection. In this + // case we retry once again (with empty session token) otherwise we return the error to the client + // (see RenameCollectionAwareClientRetryPolicy) + private IRetryPolicyFactory resetSessionTokenRetryPolicy; + /** + * Compatibility mode: Allows to specify compatibility mode used by client when + * making query requests. Should be removed when application/sql is no longer + * supported. + */ + private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; + private final HttpClient reactorHttpClient; + private final GlobalEndpointManager globalEndpointManager; + private final RetryPolicy retryPolicy; + private volatile boolean useMultipleWriteLocations; + + // creator of TransportClient is responsible for disposing it. + private StoreClientFactory storeClientFactory; + + private GatewayServiceConfigurationReader gatewayConfigurationReader; + + public RxDocumentClientImpl(URI serviceEndpoint, + String masterKeyOrResourceToken, + List permissionFeed, + ConnectionPolicy connectionPolicy, + ConsistencyLevel consistencyLevel, + Configs configs, + TokenResolver tokenResolver) { + this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs); + this.tokenResolver = tokenResolver; + } + + public RxDocumentClientImpl(URI serviceEndpoint, + String masterKeyOrResourceToken, + List permissionFeed, + ConnectionPolicy connectionPolicy, + ConsistencyLevel consistencyLevel, + Configs configs) { + this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs); + if (permissionFeed != null && permissionFeed.size() > 0) { + this.resourceTokensMap = new HashMap<>(); + for (Permission permission : permissionFeed) { + String[] segments = StringUtils.split(permission.getResourceLink(), + Constants.Properties.PATH_SEPARATOR.charAt(0)); + + if (segments.length <= 0) { + throw new IllegalArgumentException("resourceLink"); + } + + List partitionKeyAndResourceTokenPairs = null; + PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); + if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { + throw new IllegalArgumentException(permission.getResourceLink()); + } + + partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); + if (partitionKeyAndResourceTokenPairs == null) { + partitionKeyAndResourceTokenPairs = new ArrayList<>(); + this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); + } + + PartitionKey partitionKey = permission.getResourcePartitionKey(); + partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( + partitionKey != null ? partitionKey.getInternalPartitionKey() : PartitionKeyInternal.Empty, + permission.getToken())); + logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token", + pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); + + } + + if(this.resourceTokensMap.isEmpty()) { + throw new IllegalArgumentException("permissionFeed"); + } + + String firstToken = permissionFeed.get(0).getToken(); + if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { + this.firstResourceTokenFromPermissionFeed = firstToken; + } + } + } + + public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, + ConsistencyLevel consistencyLevel, Configs configs) { + + logger.info( + "Initializing DocumentClient with" + + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]", + serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); + + this.configs = configs; + this.masterKeyOrResourceToken = masterKeyOrResourceToken; + this.serviceEndpoint = serviceEndpoint; + + if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { + this.authorizationTokenProvider = null; + hasAuthKeyResourceToken = true; + } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)){ + hasAuthKeyResourceToken = false; + this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.masterKeyOrResourceToken); + } else { + hasAuthKeyResourceToken = false; + this.authorizationTokenProvider = null; + } + + if (connectionPolicy != null) { + this.connectionPolicy = connectionPolicy; + } else { + this.connectionPolicy = new ConnectionPolicy(); + } + + this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost()); + this.consistencyLevel = consistencyLevel; + + this.userAgentContainer = new UserAgentContainer(); + + String userAgentSuffix = this.connectionPolicy.userAgentSuffix(); + if (userAgentSuffix != null && userAgentSuffix.length() > 0) { + userAgentContainer.setSuffix(userAgentSuffix); + } + + this.reactorHttpClient = httpClient(); + this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); + this.retryPolicy = new RetryPolicy(this.globalEndpointManager, this.connectionPolicy); + this.resetSessionTokenRetryPolicy = retryPolicy; + } + + private void initializeGatewayConfigurationReader() { + String resourceToken; + if(this.tokenResolver != null) { + resourceToken = this.tokenResolver.getAuthorizationToken("GET", "", CosmosResourceType.System, null); + } else if(!this.hasAuthKeyResourceToken && this.authorizationTokenProvider == null) { + resourceToken = this.firstResourceTokenFromPermissionFeed; + } else { + assert this.masterKeyOrResourceToken != null; + resourceToken = this.masterKeyOrResourceToken; + } + + this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.serviceEndpoint, + this.hasAuthKeyResourceToken, + resourceToken, + this.connectionPolicy, + this.authorizationTokenProvider, + this.reactorHttpClient); + + DatabaseAccount databaseAccount = this.gatewayConfigurationReader.initializeReaderAsync().block(); + this.useMultipleWriteLocations = this.connectionPolicy.usingMultipleWriteLocations() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); + + // TODO: add support for openAsync + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/332589 + this.globalEndpointManager.refreshLocationAsync(databaseAccount).block(); + } + + public void init() { + + // TODO: add support for openAsync + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/332589 + this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, + this.consistencyLevel, + this.queryCompatibilityMode, + this.userAgentContainer, + this.globalEndpointManager, + this.reactorHttpClient); + this.globalEndpointManager.init(); + this.initializeGatewayConfigurationReader(); + + this.collectionCache = new RxClientCollectionCache(this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); + this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); + + this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, + collectionCache); + + if (this.connectionPolicy.connectionMode() == ConnectionMode.GATEWAY) { + this.storeModel = this.gatewayProxy; + } else { + this.initializeDirectConnectivity(); + } + } + + private void initializeDirectConnectivity() { + + this.storeClientFactory = new StoreClientFactory( + this.configs, + this.connectionPolicy.requestTimeoutInMillis() / 1000, + // this.maxConcurrentConnectionOpenRequests, + 0, + this.userAgentContainer + ); + + this.addressResolver = new GlobalAddressResolver( + this.reactorHttpClient, + this.globalEndpointManager, + this.configs.getProtocol(), + this, + this.collectionCache, + this.partitionKeyRangeCache, + userAgentContainer, + // TODO: GATEWAY Configuration Reader + // this.gatewayConfigurationReader, + null, + this.connectionPolicy); + + this.createStoreModel(true); + } + + DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { + return new DatabaseAccountManagerInternal() { + + @Override + public URI getServiceEndpoint() { + return RxDocumentClientImpl.this.getServiceEndpoint(); + } + + @Override + public Flux getDatabaseAccountFromEndpoint(URI endpoint) { + logger.info("Getting database account endpoint from {}", endpoint); + return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); + } + + @Override + public ConnectionPolicy getConnectionPolicy() { + return RxDocumentClientImpl.this.getConnectionPolicy(); + } + }; + } + + RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, + ConsistencyLevel consistencyLevel, + QueryCompatibilityMode queryCompatibilityMode, + UserAgentContainer userAgentContainer, + GlobalEndpointManager globalEndpointManager, + HttpClient httpClient) { + return new RxGatewayStoreModel(sessionContainer, + consistencyLevel, + queryCompatibilityMode, + userAgentContainer, + globalEndpointManager, + httpClient); + } + + private HttpClient httpClient() { + + HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) + .withMaxIdleConnectionTimeoutInMillis(this.connectionPolicy.idleConnectionTimeoutInMillis()) + .withPoolSize(this.connectionPolicy.maxPoolSize()) + .withHttpProxy(this.connectionPolicy.proxy()) + .withRequestTimeoutInMillis(this.connectionPolicy.requestTimeoutInMillis()); + + return HttpClient.createFixed(httpClientConfig); + } + + private void createStoreModel(boolean subscribeRntbdStatus) { + // EnableReadRequestsFallback, if not explicitly set on the connection policy, + // is false if the account's consistency is bounded staleness, + // and true otherwise. + + StoreClient storeClient = this.storeClientFactory.createStoreClient( + this.addressResolver, + this.sessionContainer, + this.gatewayConfigurationReader, + this, + false + ); + + this.storeModel = new ServerStoreModel(storeClient); + } + + + @Override + public URI getServiceEndpoint() { + return this.serviceEndpoint; + } + + @Override + public URI getWriteEndpoint() { + return globalEndpointManager.getWriteEndpoints().stream().findFirst().map(loc -> { + try { + return loc.toURI(); + } catch (URISyntaxException e) { + throw new IllegalStateException(e); + } + }).orElse(null); + } + + @Override + public URI getReadEndpoint() { + return globalEndpointManager.getReadEndpoints().stream().findFirst().map(loc -> { + try { + return loc.toURI(); + } catch (URISyntaxException e) { + throw new IllegalStateException(e); + } + }).orElse(null); + } + + @Override + public ConnectionPolicy getConnectionPolicy() { + return this.connectionPolicy; + } + + @Override + public Flux> createDatabase(Database database, RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> createDatabaseInternal(Database database, RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + try { + + if (database == null) { + throw new IllegalArgumentException("Database"); + } + + logger.debug("Creating a Database. id: [{}]", database.id()); + validateResource(database); + + Map requestHeaders = this.getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create, + ResourceType.Database, Paths.DATABASES_ROOT, database, requestHeaders, options); + + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + return this.create(request).map(response -> toResourceResponse(response, Database.class)); + } catch (Exception e) { + logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> deleteDatabase(String databaseLink, RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> deleteDatabaseInternal(String databaseLink, RequestOptions options, + IDocumentClientRetryPolicy retryPolicyInstance) { + try { + if (StringUtils.isEmpty(databaseLink)) { + throw new IllegalArgumentException("databaseLink"); + } + + logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); + String path = Utils.joinPath(databaseLink, null); + Map requestHeaders = this.getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, + ResourceType.Database, path, requestHeaders, options); + + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.delete(request).map(response -> toResourceResponse(response, Database.class)); + } catch (Exception e) { + logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> readDatabase(String databaseLink, RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> readDatabaseInternal(String databaseLink, RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + try { + if (StringUtils.isEmpty(databaseLink)) { + throw new IllegalArgumentException("databaseLink"); + } + + logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); + String path = Utils.joinPath(databaseLink, null); + Map requestHeaders = this.getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + ResourceType.Database, path, requestHeaders, options); + + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + return this.read(request).map(response -> toResourceResponse(response, Database.class)); + } catch (Exception e) { + logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> readDatabases(FeedOptions options) { + return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); + } + + private String parentResourceLinkToQueryLink(String parentResouceLink, ResourceType resourceTypeEnum) { + switch (resourceTypeEnum) { + case Database: + return Paths.DATABASES_ROOT; + + case DocumentCollection: + return Utils.joinPath(parentResouceLink, Paths.COLLECTIONS_PATH_SEGMENT); + + case Document: + return Utils.joinPath(parentResouceLink, Paths.DOCUMENTS_PATH_SEGMENT); + + case Offer: + return Paths.OFFERS_ROOT; + + case User: + return Utils.joinPath(parentResouceLink, Paths.USERS_PATH_SEGMENT); + + case Permission: + return Utils.joinPath(parentResouceLink, Paths.PERMISSIONS_PATH_SEGMENT); + + case Attachment: + return Utils.joinPath(parentResouceLink, Paths.ATTACHMENTS_PATH_SEGMENT); + + case StoredProcedure: + return Utils.joinPath(parentResouceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); + + case Trigger: + return Utils.joinPath(parentResouceLink, Paths.TRIGGERS_PATH_SEGMENT); + + case UserDefinedFunction: + return Utils.joinPath(parentResouceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); + + default: + throw new IllegalArgumentException("resource type not supported"); + } + } + + private Flux> createQuery( + String parentResourceLink, + SqlQuerySpec sqlQuery, + FeedOptions options, + Class klass, + ResourceType resourceTypeEnum) { + + String queryResourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); + + UUID activityId = Utils.randomUUID(); + IDocumentQueryClient queryClient = DocumentQueryClientImpl(RxDocumentClientImpl.this); + Flux> executionContext = + DocumentQueryExecutionContextFactory.createDocumentQueryExecutionContextAsync(queryClient, resourceTypeEnum, klass, sqlQuery , options, queryResourceLink, false, activityId); + return executionContext.flatMap(IDocumentQueryExecutionContext::executeAsync); + } + + + @Override + public Flux> queryDatabases(String query, FeedOptions options) { + return queryDatabases(new SqlQuerySpec(query), options); + } + + + @Override + public Flux> queryDatabases(SqlQuerySpec querySpec, FeedOptions options) { + return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database); + } + + @Override + public Flux> createCollection(String databaseLink, + DocumentCollection collection, RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> createCollectionInternal(String databaseLink, + DocumentCollection collection, RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + try { + if (StringUtils.isEmpty(databaseLink)) { + throw new IllegalArgumentException("databaseLink"); + } + if (collection == null) { + throw new IllegalArgumentException("collection"); + } + + logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, + collection.id()); + validateResource(collection); + + String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); + Map requestHeaders = this.getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create, + ResourceType.DocumentCollection, path, collection, requestHeaders, options); + + if (retryPolicyInstance != null){ + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.create(request).map(response -> toResourceResponse(response, DocumentCollection.class)) + .doOnNext(resourceResponse -> { + // set the session token + this.sessionContainer.setSessionToken(resourceResponse.getResource().resourceId(), + getAltLink(resourceResponse.getResource()), + resourceResponse.getResponseHeaders()); + }); + } catch (Exception e) { + logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> replaceCollection(DocumentCollection collection, + RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> replaceCollectionInternal(DocumentCollection collection, + RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + try { + if (collection == null) { + throw new IllegalArgumentException("collection"); + } + + logger.debug("Replacing a Collection. id: [{}]", collection.id()); + validateResource(collection); + + String path = Utils.joinPath(collection.selfLink(), null); + Map requestHeaders = this.getRequestHeaders(options); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, + ResourceType.DocumentCollection, path, collection, requestHeaders, options); + + // TODO: .Net has some logic for updating session token which we don't + // have here + if (retryPolicyInstance != null){ + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.replace(request).map(response -> toResourceResponse(response, DocumentCollection.class)) + .doOnNext(resourceResponse -> { + if (resourceResponse.getResource() != null) { + // set the session token + this.sessionContainer.setSessionToken(resourceResponse.getResource().resourceId(), + getAltLink(resourceResponse.getResource()), + resourceResponse.getResponseHeaders()); + } + }); + + } catch (Exception e) { + logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> deleteCollection(String collectionLink, + RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> deleteCollectionInternal(String collectionLink, + RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + try { + if (StringUtils.isEmpty(collectionLink)) { + throw new IllegalArgumentException("collectionLink"); + } + + logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); + String path = Utils.joinPath(collectionLink, null); + Map requestHeaders = this.getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, + ResourceType.DocumentCollection, path, requestHeaders, options); + + if (retryPolicyInstance != null){ + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.delete(request).map(response -> toResourceResponse(response, DocumentCollection.class)); + + } catch (Exception e) { + logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + private Flux delete(RxDocumentServiceRequest request) { + populateHeaders(request, HttpConstants.HttpMethods.DELETE); + return getStoreProxy(request).processMessage(request); + } + + private Flux read(RxDocumentServiceRequest request) { + populateHeaders(request, HttpConstants.HttpMethods.GET); + return getStoreProxy(request).processMessage(request); + } + + Flux readFeed(RxDocumentServiceRequest request) { + populateHeaders(request, HttpConstants.HttpMethods.GET); + return gatewayProxy.processMessage(request); + } + + private Flux query(RxDocumentServiceRequest request) { + populateHeaders(request, HttpConstants.HttpMethods.POST); + return this.getStoreProxy(request).processMessage(request) + .map(response -> { + this.captureSessionToken(request, response); + return response; + } + ); + } + + @Override + public Flux> readCollection(String collectionLink, + RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> readCollectionInternal(String collectionLink, + RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + + // we are using an observable factory here + // observable will be created fresh upon subscription + // this is to ensure we capture most up to date information (e.g., + // session) + try { + if (StringUtils.isEmpty(collectionLink)) { + throw new IllegalArgumentException("collectionLink"); + } + + logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); + String path = Utils.joinPath(collectionLink, null); + Map requestHeaders = this.getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + ResourceType.DocumentCollection, path, requestHeaders, options); + + if (retryPolicyInstance != null){ + retryPolicyInstance.onBeforeSendRequest(request); + } + return this.read(request).map(response -> toResourceResponse(response, DocumentCollection.class)); + } catch (Exception e) { + // this is only in trace level to capture what's going on + logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> readCollections(String databaseLink, FeedOptions options) { + + if (StringUtils.isEmpty(databaseLink)) { + throw new IllegalArgumentException("databaseLink"); + } + + return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class, + Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); + } + + @Override + public Flux> queryCollections(String databaseLink, String query, + FeedOptions options) { + return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection); + } + + @Override + public Flux> queryCollections(String databaseLink, + SqlQuerySpec querySpec, FeedOptions options) { + return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection); + } + + private static String serializeProcedureParams(Object[] objectArray) { + String[] stringArray = new String[objectArray.length]; + + for (int i = 0; i < objectArray.length; ++i) { + Object object = objectArray[i]; + if (object instanceof JsonSerializable) { + stringArray[i] = ((JsonSerializable) object).toJson(); + } else { + + // POJO, ObjectNode, number, STRING or Boolean + try { + stringArray[i] = mapper.writeValueAsString(object); + } catch (IOException e) { + throw new IllegalArgumentException("Can't serialize the object into the json string", e); + } + } + } + + return String.format("[%s]", StringUtils.join(stringArray, ",")); + } + + private static void validateResource(Resource resource) { + if (!StringUtils.isEmpty(resource.id())) { + if (resource.id().indexOf('/') != -1 || resource.id().indexOf('\\') != -1 || + resource.id().indexOf('?') != -1 || resource.id().indexOf('#') != -1) { + throw new IllegalArgumentException("Id contains illegal chars."); + } + + if (resource.id().endsWith(" ")) { + throw new IllegalArgumentException("Id ends with a space."); + } + } + } + + private Map getRequestHeaders(RequestOptions options) { + Map headers = new HashMap<>(); + + if (this.useMultipleWriteLocations) { + headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); + } + + if (consistencyLevel != null) { + headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); + } + + if (options == null) { + return headers; + } + + Map customOptions = options.getHeaders(); + if (customOptions != null) { + headers.putAll(customOptions); + } + + if (options.getAccessCondition() != null) { + if (options.getAccessCondition().type() == AccessConditionType.IF_MATCH) { + headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getAccessCondition().condition()); + } else { + headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getAccessCondition().condition()); + } + } + + if (options.getConsistencyLevel() != null) { + headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); + } + + if (options.getIndexingDirective() != null) { + headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); + } + + if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { + String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); + headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); + } + + if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { + String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); + headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); + } + + if (!Strings.isNullOrEmpty(options.getSessionToken())) { + headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); + } + + if (options.getResourceTokenExpirySeconds() != null) { + headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, + String.valueOf(options.getResourceTokenExpirySeconds())); + } + + if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { + headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); + } else if (options.getOfferType() != null) { + headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); + } + + if (options.isPopulateQuotaInfo()) { + headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); + } + + if (options.isScriptLoggingEnabled()) { + headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); + } + + return headers; + } + + private Mono addPartitionKeyInformation(RxDocumentServiceRequest request, Document document, + RequestOptions options) { + + Mono collectionObs = this.collectionCache.resolveCollectionAsync(request); + return collectionObs + .map(collection -> { + addPartitionKeyInformation(request, document, options, collection); + return request; + }); + } + + private Mono addPartitionKeyInformation(RxDocumentServiceRequest request, Document document, RequestOptions options, + Mono collectionObs) { + + return collectionObs.map(collection -> { + addPartitionKeyInformation(request, document, options, collection); + return request; + }); + } + + private void addPartitionKeyInformation(RxDocumentServiceRequest request, Document document, RequestOptions options, + DocumentCollection collection) { + PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); + + PartitionKeyInternal partitionKeyInternal = null; + if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.None)){ + partitionKeyInternal = BridgeInternal.getNonePartitionKey(partitionKeyDefinition); + } else if (options != null && options.getPartitionKey() != null) { + partitionKeyInternal = options.getPartitionKey().getInternalPartitionKey(); + } else if (partitionKeyDefinition == null || partitionKeyDefinition.paths().size() == 0) { + // For backward compatibility, if collection doesn't have partition key defined, we assume all documents + // have empty value for it and user doesn't need to specify it explicitly. + partitionKeyInternal = PartitionKeyInternal.getEmpty(); + } else if (document != null) { + partitionKeyInternal = extractPartitionKeyValueFromDocument(document, partitionKeyDefinition); + } else { + throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); + } + + request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, escapeNonAscii(partitionKeyInternal.toJson())); + } + + private static String escapeNonAscii(String partitionKeyJson) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < partitionKeyJson.length(); i++) { + int val = partitionKeyJson.charAt(i); + if (val > 127) { + sb.append("\\u").append(String.format("%04X", val)); + } else { + sb.append(partitionKeyJson.charAt(i)); + } + } + return sb.toString(); + } + + private static PartitionKeyInternal extractPartitionKeyValueFromDocument( + Document document, + PartitionKeyDefinition partitionKeyDefinition) { + if (partitionKeyDefinition != null) { + String path = partitionKeyDefinition.paths().iterator().next(); + List parts = PathParser.getPathParts(path); + if (parts.size() >= 1) { + Object value = document.getObjectByPath(parts); + if (value == null || value.getClass() == ObjectNode.class) { + value = BridgeInternal.getNonePartitionKey(partitionKeyDefinition); + } + + if (value instanceof PartitionKeyInternal) { + return (PartitionKeyInternal) value; + } else { + return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false); + } + } + } + + return null; + } + + private Mono getCreateDocumentRequest(String documentCollectionLink, Object document, + RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType) { + + if (StringUtils.isEmpty(documentCollectionLink)) { + throw new IllegalArgumentException("documentCollectionLink"); + } + if (document == null) { + throw new IllegalArgumentException("document"); + } + + Document typedDocument = documentFromObject(document, mapper); + + RxDocumentClientImpl.validateResource(typedDocument); + + if (typedDocument.id() == null && !disableAutomaticIdGeneration) { + // We are supposed to use GUID. Basically UUID is the same as GUID + // when represented as a string. + typedDocument.id(UUID.randomUUID().toString()); + } + String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); + Map requestHeaders = this.getRequestHeaders(options); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Document, path, + typedDocument, requestHeaders, options); + + Mono collectionObs = this.collectionCache.resolveCollectionAsync(request); + return addPartitionKeyInformation(request, typedDocument, options, collectionObs); + } + + private void populateHeaders(RxDocumentServiceRequest request, String httpMethod) { + if (this.masterKeyOrResourceToken != null) { + request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); + } + + if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.tokenResolver != null) { + String resourceName = request.getResourceAddress(); + + String authorization = this.getUserAuthorizationToken( + resourceName, request.getResourceType(), httpMethod, request.getHeaders(), + AuthorizationTokenType.PrimaryMasterKey, request.properties); + try { + authorization = URLEncoder.encode(authorization, "UTF-8"); + } catch (UnsupportedEncodingException e) { + throw new IllegalStateException("Failed to encode authtoken.", e); + } + request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); + } + + if ((HttpConstants.HttpMethods.POST.equals(httpMethod) || HttpConstants.HttpMethods.PUT.equals(httpMethod)) + && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { + request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); + } + + if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { + request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); + } + } + + @Override + public String getUserAuthorizationToken(String resourceName, + ResourceType resourceType, + String requestVerb, + Map headers, + AuthorizationTokenType tokenType, + Map properties) { + + if (this.tokenResolver != null) { + return this.tokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType), + properties != null ? Collections.unmodifiableMap(properties) : null); + } else if (masterKeyOrResourceToken != null && !hasAuthKeyResourceToken) { + return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, + resourceType, headers); + } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { + return masterKeyOrResourceToken; + } else { + assert resourceTokensMap != null; + if(resourceType.equals(ResourceType.DatabaseAccount)) { + return this.firstResourceTokenFromPermissionFeed; + } + return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); + } + } + + private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { + try { + return CosmosResourceType.valueOf(resourceType.toString()); + } catch (IllegalArgumentException e) { + return CosmosResourceType.System; + } + } + + void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { + this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); + } + + private Flux create(RxDocumentServiceRequest request) { + populateHeaders(request, HttpConstants.HttpMethods.POST); + RxStoreModel storeProxy = this.getStoreProxy(request); + return storeProxy.processMessage(request); + } + + private Flux upsert(RxDocumentServiceRequest request) { + + populateHeaders(request, HttpConstants.HttpMethods.POST); + Map headers = request.getHeaders(); + // headers can never be null, since it will be initialized even when no + // request options are specified, + // hence using assertion here instead of exception, being in the private + // method + assert (headers != null); + headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); + + return getStoreProxy(request).processMessage(request) + .map(response -> { + this.captureSessionToken(request, response); + return response; + } + ); + } + + private Flux replace(RxDocumentServiceRequest request) { + populateHeaders(request, HttpConstants.HttpMethods.PUT); + return getStoreProxy(request).processMessage(request); + } + + @Override + public Flux> createDocument(String collectionLink, Object document, + RequestOptions options, boolean disableAutomaticIdGeneration) { + IDocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + if (options == null || options.getPartitionKey() == null) { + requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); + } + + IDocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; + return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy); + } + + private Flux> createDocumentInternal(String collectionLink, Object document, + RequestOptions options, final boolean disableAutomaticIdGeneration, IDocumentClientRetryPolicy requestRetryPolicy) { + + try { + logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); + + Mono requestObs = getCreateDocumentRequest(collectionLink, document, + options, disableAutomaticIdGeneration, OperationType.Create); + + Flux responseObservable = requestObs + .flux() + .flatMap(req -> { + if (requestRetryPolicy != null) { + requestRetryPolicy.onBeforeSendRequest(req); + } + + return create(req); + }); + + return responseObservable + .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); + + } catch (Exception e) { + logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> upsertDocument(String collectionLink, Object document, + RequestOptions options, boolean disableAutomaticIdGeneration) { + + IDocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + if (options == null || options.getPartitionKey() == null) { + requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); + } + IDocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; + return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy); + } + + private Flux> upsertDocumentInternal(String collectionLink, Object document, + RequestOptions options, boolean disableAutomaticIdGeneration, IDocumentClientRetryPolicy retryPolicyInstance) { + try { + logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); + + Flux reqObs = getCreateDocumentRequest(collectionLink, document, + options, disableAutomaticIdGeneration, OperationType.Upsert).flux(); + + Flux responseObservable = reqObs.flatMap(req -> { + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(req); + } + + return upsert(req);}); + return responseObservable + .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); + + } catch (Exception e) { + logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> replaceDocument(String documentLink, Object document, + RequestOptions options) { + + IDocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + if (options == null || options.getPartitionKey() == null) { + String collectionLink = Utils.getCollectionName(documentLink); + requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); + } + IDocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; + return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy); + } + + private Flux> replaceDocumentInternal(String documentLink, Object document, + RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + try { + if (StringUtils.isEmpty(documentLink)) { + throw new IllegalArgumentException("documentLink"); + } + + if (document == null) { + throw new IllegalArgumentException("document"); + } + + Document typedDocument = documentFromObject(document, mapper); + + return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance); + + } catch (Exception e) { + logger.debug("Failure in replacing a document due to [{}]", e.getMessage()); + return Flux.error(e); + } + } + + @Override + public Flux> replaceDocument(Document document, RequestOptions options) { + IDocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + if (options == null || options.getPartitionKey() == null) { + String collectionLink = document.selfLink(); + requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); + } + IDocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; + return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy); + } + + private Flux> replaceDocumentInternal(Document document, RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + + try { + if (document == null) { + throw new IllegalArgumentException("document"); + } + + return this.replaceDocumentInternal(document.selfLink(), document, options, retryPolicyInstance); + + } catch (Exception e) { + logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); + return Flux.error(e); + } + } + + private Flux> replaceDocumentInternal(String documentLink, Document document, + RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + + if (document == null) { + throw new IllegalArgumentException("document"); + } + + logger.debug("Replacing a Document. documentLink: [{}]", documentLink); + final String path = Utils.joinPath(documentLink, null); + final Map requestHeaders = getRequestHeaders(options); + final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, + ResourceType.Document, path, document, requestHeaders, options); + + validateResource(document); + + Mono collectionObs = collectionCache.resolveCollectionAsync(request); + Mono requestObs = addPartitionKeyInformation(request, document, options, collectionObs); + + return requestObs.flux().flatMap(req -> { + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + return replace(request) + .map(resp -> toResourceResponse(resp, Document.class));} ); + } + + @Override + public Flux> deleteDocument(String documentLink, RequestOptions options) { + IDocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, options, requestRetryPolicy), requestRetryPolicy); + } + + private Flux> deleteDocumentInternal(String documentLink, RequestOptions options, + IDocumentClientRetryPolicy retryPolicyInstance) { + try { + if (StringUtils.isEmpty(documentLink)) { + throw new IllegalArgumentException("documentLink"); + } + + logger.debug("Deleting a Document. documentLink: [{}]", documentLink); + String path = Utils.joinPath(documentLink, null); + Map requestHeaders = this.getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, + ResourceType.Document, path, requestHeaders, options); + + Mono collectionObs = collectionCache.resolveCollectionAsync(request); + + Mono requestObs = addPartitionKeyInformation(request, null, options, collectionObs); + + return requestObs.flux().flatMap(req -> { + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(req); + } + return this.delete(req) + .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));}); + + } catch (Exception e) { + logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); + return Flux.error(e); + } + } + + @Override + public Flux> readDocument(String documentLink, RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> readDocumentInternal(String documentLink, RequestOptions options, + IDocumentClientRetryPolicy retryPolicyInstance) { + try { + if (StringUtils.isEmpty(documentLink)) { + throw new IllegalArgumentException("documentLink"); + } + + logger.debug("Reading a Document. documentLink: [{}]", documentLink); + String path = Utils.joinPath(documentLink, null); + Map requestHeaders = this.getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + ResourceType.Document, path, requestHeaders, options); + + Mono collectionObs = this.collectionCache.resolveCollectionAsync(request); + + Mono requestObs = addPartitionKeyInformation(request, null, options, collectionObs); + + return requestObs.flux().flatMap(req -> { + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + return this.read(request).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); + }); + + } catch (Exception e) { + logger.debug("Failure in reading a document due to [{}]", e.getMessage()); + return Flux.error(e); + } + } + + @Override + public Flux> readDocuments(String collectionLink, FeedOptions options) { + + if (StringUtils.isEmpty(collectionLink)) { + throw new IllegalArgumentException("collectionLink"); + } + + return queryDocuments(collectionLink, "SELECT * FROM r", options); + } + + @Override + public Flux> queryDocuments(String collectionLink, String query, + FeedOptions options) { + return queryDocuments(collectionLink, new SqlQuerySpec(query), options); + } + + private IDocumentQueryClient DocumentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl) { + + return new IDocumentQueryClient () { + + @Override + public RxCollectionCache getCollectionCache() { + return RxDocumentClientImpl.this.collectionCache; + } + + @Override + public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { + return RxDocumentClientImpl.this.partitionKeyRangeCache; + } + + @Override + public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { + return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; + } + + @Override + public ConsistencyLevel getDefaultConsistencyLevelAsync() { + return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); + } + + @Override + public ConsistencyLevel getDesiredConsistencyLevelAsync() { + // TODO Auto-generated method stub + return RxDocumentClientImpl.this.consistencyLevel; + } + + @Override + public Mono executeQueryAsync(RxDocumentServiceRequest request) { + return RxDocumentClientImpl.this.query(request).single(); + } + + @Override + public QueryCompatibilityMode getQueryCompatibilityMode() { + // TODO Auto-generated method stub + return QueryCompatibilityMode.Default; + } + + @Override + public Mono readFeedAsync(RxDocumentServiceRequest request) { + // TODO Auto-generated method stub + return null; + } + }; + } + + @Override + public Flux> queryDocuments(String collectionLink, SqlQuerySpec querySpec, + FeedOptions options) { + return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document); + } + + @Override + public Flux> queryDocumentChangeFeed(final String collectionLink, + final ChangeFeedOptions changeFeedOptions) { + + if (StringUtils.isEmpty(collectionLink)) { + throw new IllegalArgumentException("collectionLink"); + } + + ChangeFeedQueryImpl changeFeedQueryImpl = new ChangeFeedQueryImpl(this, ResourceType.Document, + Document.class, collectionLink, changeFeedOptions); + + return changeFeedQueryImpl.executeAsync(); + } + + @Override + public Flux> readPartitionKeyRanges(final String collectionLink, + FeedOptions options) { + + if (StringUtils.isEmpty(collectionLink)) { + throw new IllegalArgumentException("collectionLink"); + } + + return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, + Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); + } + + private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, + RequestOptions options, OperationType operationType) { + if (StringUtils.isEmpty(collectionLink)) { + throw new IllegalArgumentException("collectionLink"); + } + if (storedProcedure == null) { + throw new IllegalArgumentException("storedProcedure"); + } + + validateResource(storedProcedure); + + String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); + Map requestHeaders = this.getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.StoredProcedure, + path, storedProcedure, requestHeaders, options); + + return request; + } + + private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, + RequestOptions options, OperationType operationType) { + if (StringUtils.isEmpty(collectionLink)) { + throw new IllegalArgumentException("collectionLink"); + } + if (udf == null) { + throw new IllegalArgumentException("udf"); + } + + validateResource(udf); + + String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); + Map requestHeaders = this.getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, + ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); + + return request; + } + + @Override + public Flux> createStoredProcedure(String collectionLink, + StoredProcedure storedProcedure, RequestOptions options) { + IDocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); + } + + private Flux> createStoredProcedureInternal(String collectionLink, + StoredProcedure storedProcedure, RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + // we are using an observable factory here + // observable will be created fresh upon subscription + // this is to ensure we capture most up to date information (e.g., + // session) + try { + + logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", + collectionLink, storedProcedure.id()); + RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, + OperationType.Create); + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.create(request).map(response -> toResourceResponse(response, StoredProcedure.class)); + + } catch (Exception e) { + // this is only in trace level to capture what's going on + logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> upsertStoredProcedure(String collectionLink, + StoredProcedure storedProcedure, RequestOptions options) { + IDocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); + } + + private Flux> upsertStoredProcedureInternal(String collectionLink, + StoredProcedure storedProcedure, RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + // we are using an observable factory here + // observable will be created fresh upon subscription + // this is to ensure we capture most up to date information (e.g., + // session) + try { + + logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", + collectionLink, storedProcedure.id()); + RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, + OperationType.Upsert); + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.upsert(request).map(response -> toResourceResponse(response, StoredProcedure.class)); + + } catch (Exception e) { + // this is only in trace level to capture what's going on + logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> replaceStoredProcedure(StoredProcedure storedProcedure, + RequestOptions options) { + IDocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); + } + + private Flux> replaceStoredProcedureInternal(StoredProcedure storedProcedure, + RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + try { + + if (storedProcedure == null) { + throw new IllegalArgumentException("storedProcedure"); + } + logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.id()); + + RxDocumentClientImpl.validateResource(storedProcedure); + + String path = Utils.joinPath(storedProcedure.selfLink(), null); + Map requestHeaders = getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, + ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); + + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.replace(request).map(response -> toResourceResponse(response, StoredProcedure.class)); + + } catch (Exception e) { + logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> deleteStoredProcedure(String storedProcedureLink, + RequestOptions options) { + IDocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); + } + + private Flux> deleteStoredProcedureInternal(String storedProcedureLink, + RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + // we are using an observable factory here + // observable will be created fresh upon subscription + // this is to ensure we capture most up to date information (e.g., + // session) + try { + + if (StringUtils.isEmpty(storedProcedureLink)) { + throw new IllegalArgumentException("storedProcedureLink"); + } + + logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); + String path = Utils.joinPath(storedProcedureLink, null); + Map requestHeaders = this.getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, + ResourceType.StoredProcedure, path, requestHeaders, options); + + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.delete(request).map(response -> toResourceResponse(response, StoredProcedure.class)); + + } catch (Exception e) { + // this is only in trace level to capture what's going on + logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> readStoredProcedure(String storedProcedureLink, + RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> readStoredProcedureInternal(String storedProcedureLink, + RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + + // we are using an observable factory here + // observable will be created fresh upon subscription + // this is to ensure we capture most up to date information (e.g., + // session) + try { + + if (StringUtils.isEmpty(storedProcedureLink)) { + throw new IllegalArgumentException("storedProcedureLink"); + } + + logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); + String path = Utils.joinPath(storedProcedureLink, null); + Map requestHeaders = this.getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + ResourceType.StoredProcedure, path, requestHeaders, options); + + if (retryPolicyInstance != null){ + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.read(request).map(response -> toResourceResponse(response, StoredProcedure.class)); + + } catch (Exception e) { + // this is only in trace level to capture what's going on + logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> readStoredProcedures(String collectionLink, + FeedOptions options) { + + if (StringUtils.isEmpty(collectionLink)) { + throw new IllegalArgumentException("collectionLink"); + } + + return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class, + Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); + } + + @Override + public Flux> queryStoredProcedures(String collectionLink, String query, + FeedOptions options) { + return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options); + } + + @Override + public Flux> queryStoredProcedures(String collectionLink, + SqlQuerySpec querySpec, FeedOptions options) { + return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure); + } + + @Override + public Flux executeStoredProcedure(String storedProcedureLink, + Object[] procedureParams) { + return this.executeStoredProcedure(storedProcedureLink, null, procedureParams); + } + + @Override + public Flux executeStoredProcedure(String storedProcedureLink, + RequestOptions options, Object[] procedureParams) { + return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams), this.resetSessionTokenRetryPolicy.getRequestPolicy()); + } + + private Flux executeStoredProcedureInternal(String storedProcedureLink, + RequestOptions options, Object[] procedureParams) { + + try { + logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); + String path = Utils.joinPath(storedProcedureLink, null); + + Map requestHeaders = getRequestHeaders(options); + requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ExecuteJavaScript, + ResourceType.StoredProcedure, path, + procedureParams != null ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", + requestHeaders, options); + + Flux reqObs = addPartitionKeyInformation(request, null, options).flux(); + return reqObs.flatMap(req -> create(request) + .map(response -> { + this.captureSessionToken(request, response); + return toStoredProcedureResponse(response); + })); + + } catch (Exception e) { + logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> createTrigger(String collectionLink, Trigger trigger, + RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> createTriggerInternal(String collectionLink, Trigger trigger, + RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + try { + + logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, + trigger.id()); + RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, + OperationType.Create); + if (retryPolicyInstance != null){ + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.create(request).map(response -> toResourceResponse(response, Trigger.class)); + + } catch (Exception e) { + logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> upsertTrigger(String collectionLink, Trigger trigger, + RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> upsertTriggerInternal(String collectionLink, Trigger trigger, + RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + try { + + logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, + trigger.id()); + RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, + OperationType.Upsert); + if (retryPolicyInstance != null){ + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.upsert(request).map(response -> toResourceResponse(response, Trigger.class)); + + } catch (Exception e) { + logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, + OperationType operationType) { + if (StringUtils.isEmpty(collectionLink)) { + throw new IllegalArgumentException("collectionLink"); + } + if (trigger == null) { + throw new IllegalArgumentException("trigger"); + } + + RxDocumentClientImpl.validateResource(trigger); + + String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); + Map requestHeaders = getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Trigger, path, + trigger, requestHeaders, options); + + return request; + } + + @Override + public Flux> replaceTrigger(Trigger trigger, RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> replaceTriggerInternal(Trigger trigger, RequestOptions options, + IDocumentClientRetryPolicy retryPolicyInstance) { + + try { + if (trigger == null) { + throw new IllegalArgumentException("trigger"); + } + + logger.debug("Replacing a Trigger. trigger id [{}]", trigger.id()); + RxDocumentClientImpl.validateResource(trigger); + + String path = Utils.joinPath(trigger.selfLink(), null); + Map requestHeaders = getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, + ResourceType.Trigger, path, trigger, requestHeaders, options); + + if (retryPolicyInstance != null){ + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.replace(request).map(response -> toResourceResponse(response, Trigger.class)); + + } catch (Exception e) { + logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> deleteTrigger(String triggerLink, RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> deleteTriggerInternal(String triggerLink, RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + try { + if (StringUtils.isEmpty(triggerLink)) { + throw new IllegalArgumentException("triggerLink"); + } + + logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); + String path = Utils.joinPath(triggerLink, null); + Map requestHeaders = getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, + ResourceType.Trigger, path, requestHeaders, options); + + if (retryPolicyInstance != null){ + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.delete(request).map(response -> toResourceResponse(response, Trigger.class)); + + } catch (Exception e) { + logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> readTrigger(String triggerLink, RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> readTriggerInternal(String triggerLink, RequestOptions options, + IDocumentClientRetryPolicy retryPolicyInstance) { + try { + if (StringUtils.isEmpty(triggerLink)) { + throw new IllegalArgumentException("triggerLink"); + } + + logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); + String path = Utils.joinPath(triggerLink, null); + Map requestHeaders = getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + ResourceType.Trigger, path, requestHeaders, options); + + if (retryPolicyInstance != null){ + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.read(request).map(response -> toResourceResponse(response, Trigger.class)); + + } catch (Exception e) { + logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> readTriggers(String collectionLink, FeedOptions options) { + + if (StringUtils.isEmpty(collectionLink)) { + throw new IllegalArgumentException("collectionLink"); + } + + return readFeed(options, ResourceType.Trigger, Trigger.class, + Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); + } + + @Override + public Flux> queryTriggers(String collectionLink, String query, + FeedOptions options) { + return queryTriggers(collectionLink, new SqlQuerySpec(query), options); + } + + @Override + public Flux> queryTriggers(String collectionLink, SqlQuerySpec querySpec, + FeedOptions options) { + return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger); + } + + @Override + public Flux> createUserDefinedFunction(String collectionLink, + UserDefinedFunction udf, RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> createUserDefinedFunctionInternal(String collectionLink, + UserDefinedFunction udf, RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + // we are using an observable factory here + // observable will be created fresh upon subscription + // this is to ensure we capture most up to date information (e.g., + // session) + try { + logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, + udf.id()); + RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, + OperationType.Create); + if (retryPolicyInstance != null){ + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.create(request).map(response -> toResourceResponse(response, UserDefinedFunction.class)); + + } catch (Exception e) { + // this is only in trace level to capture what's going on + logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> upsertUserDefinedFunction(String collectionLink, + UserDefinedFunction udf, RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> upsertUserDefinedFunctionInternal(String collectionLink, + UserDefinedFunction udf, RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + // we are using an observable factory here + // observable will be created fresh upon subscription + // this is to ensure we capture most up to date information (e.g., + // session) + try { + logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, + udf.id()); + RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, + OperationType.Upsert); + if (retryPolicyInstance != null){ + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.upsert(request).map(response -> toResourceResponse(response, UserDefinedFunction.class)); + + } catch (Exception e) { + // this is only in trace level to capture what's going on + logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> replaceUserDefinedFunction(UserDefinedFunction udf, + RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, + RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + // we are using an observable factory here + // observable will be created fresh upon subscription + // this is to ensure we capture most up to date information (e.g., + // session) + try { + if (udf == null) { + throw new IllegalArgumentException("udf"); + } + + logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.id()); + validateResource(udf); + + String path = Utils.joinPath(udf.selfLink(), null); + Map requestHeaders = this.getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, + ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); + + if (retryPolicyInstance != null){ + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.replace(request).map(response -> toResourceResponse(response, UserDefinedFunction.class)); + + } catch (Exception e) { + // this is only in trace level to capture what's going on + logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> deleteUserDefinedFunction(String udfLink, + RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> deleteUserDefinedFunctionInternal(String udfLink, + RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + // we are using an observable factory here + // observable will be created fresh upon subscription + // this is to ensure we capture most up to date information (e.g., + // session) + try { + if (StringUtils.isEmpty(udfLink)) { + throw new IllegalArgumentException("udfLink"); + } + + logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); + String path = Utils.joinPath(udfLink, null); + Map requestHeaders = this.getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, + ResourceType.UserDefinedFunction, path, requestHeaders, options); + + if (retryPolicyInstance != null){ + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.delete(request).map(response -> toResourceResponse(response, UserDefinedFunction.class)); + + } catch (Exception e) { + // this is only in trace level to capture what's going on + logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> readUserDefinedFunction(String udfLink, + RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> readUserDefinedFunctionInternal(String udfLink, + RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + // we are using an observable factory here + // observable will be created fresh upon subscription + // this is to ensure we capture most up to date information (e.g., + // session) + try { + if (StringUtils.isEmpty(udfLink)) { + throw new IllegalArgumentException("udfLink"); + } + + logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); + String path = Utils.joinPath(udfLink, null); + Map requestHeaders = this.getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + ResourceType.UserDefinedFunction, path, requestHeaders, options); + + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.read(request).map(response -> toResourceResponse(response, UserDefinedFunction.class)); + + } catch (Exception e) { + // this is only in trace level to capture what's going on + logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> readUserDefinedFunctions(String collectionLink, + FeedOptions options) { + + if (StringUtils.isEmpty(collectionLink)) { + throw new IllegalArgumentException("collectionLink"); + } + + return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class, + Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); + } + + @Override + public Flux> queryUserDefinedFunctions(String collectionLink, + String query, FeedOptions options) { + return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options); + } + + @Override + public Flux> queryUserDefinedFunctions(String collectionLink, + SqlQuerySpec querySpec, FeedOptions options) { + return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction); + } + + @Override + public Flux> readConflict(String conflictLink, RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> readConflictInternal(String conflictLink, RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + + try { + if (StringUtils.isEmpty(conflictLink)) { + throw new IllegalArgumentException("conflictLink"); + } + + logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); + String path = Utils.joinPath(conflictLink, null); + Map requestHeaders = getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + ResourceType.Conflict, path, requestHeaders, options); + + Flux reqObs = addPartitionKeyInformation(request, null, options).flux(); + + return reqObs.flatMap(req -> { + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + return this.read(request).map(response -> toResourceResponse(response, Conflict.class)); + }); + + } catch (Exception e) { + logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> readConflicts(String collectionLink, FeedOptions options) { + + if (StringUtils.isEmpty(collectionLink)) { + throw new IllegalArgumentException("collectionLink"); + } + + return readFeed(options, ResourceType.Conflict, Conflict.class, + Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); + } + + @Override + public Flux> queryConflicts(String collectionLink, String query, + FeedOptions options) { + return queryConflicts(collectionLink, new SqlQuerySpec(query), options); + } + + @Override + public Flux> queryConflicts(String collectionLink, SqlQuerySpec querySpec, + FeedOptions options) { + return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict); + } + + @Override + public Flux> deleteConflict(String conflictLink, RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> deleteConflictInternal(String conflictLink, RequestOptions options, + IDocumentClientRetryPolicy retryPolicyInstance) { + + try { + if (StringUtils.isEmpty(conflictLink)) { + throw new IllegalArgumentException("conflictLink"); + } + + logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); + String path = Utils.joinPath(conflictLink, null); + Map requestHeaders = getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, + ResourceType.Conflict, path, requestHeaders, options); + + Flux reqObs = addPartitionKeyInformation(request, null, options).flux(); + return reqObs.flatMap(req -> { + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.delete(request).map(response -> toResourceResponse(response, Conflict.class)); + }); + + } catch (Exception e) { + logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> createUser(String databaseLink, User user, RequestOptions options) { + return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options), this.resetSessionTokenRetryPolicy.getRequestPolicy()); + } + + private Flux> createUserInternal(String databaseLink, User user, RequestOptions options) { + try { + logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.id()); + RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); + return this.create(request).map(response -> toResourceResponse(response, User.class)); + + } catch (Exception e) { + logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> upsertUser(String databaseLink, User user, RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> upsertUserInternal(String databaseLink, User user, RequestOptions options, + IDocumentClientRetryPolicy retryPolicyInstance) { + try { + logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.id()); + RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.upsert(request).map(response -> toResourceResponse(response, User.class)); + + } catch (Exception e) { + logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, + OperationType operationType) { + if (StringUtils.isEmpty(databaseLink)) { + throw new IllegalArgumentException("databaseLink"); + } + if (user == null) { + throw new IllegalArgumentException("user"); + } + + RxDocumentClientImpl.validateResource(user); + + String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); + Map requestHeaders = getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.User, path, user, + requestHeaders, options); + + return request; + } + + @Override + public Flux> replaceUser(User user, RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> replaceUserInternal(User user, RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + try { + if (user == null) { + throw new IllegalArgumentException("user"); + } + logger.debug("Replacing a User. user id [{}]", user.id()); + RxDocumentClientImpl.validateResource(user); + + String path = Utils.joinPath(user.selfLink(), null); + Map requestHeaders = getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, + ResourceType.User, path, user, requestHeaders, options); + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.replace(request).map(response -> toResourceResponse(response, User.class)); + + } catch (Exception e) { + logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + + public Flux> deleteUser(String userLink, RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> deleteUserInternal(String userLink, RequestOptions options, + IDocumentClientRetryPolicy retryPolicyInstance) { + + try { + if (StringUtils.isEmpty(userLink)) { + throw new IllegalArgumentException("userLink"); + } + logger.debug("Deleting a User. userLink [{}]", userLink); + String path = Utils.joinPath(userLink, null); + Map requestHeaders = getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, + ResourceType.User, path, requestHeaders, options); + + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.delete(request).map(response -> toResourceResponse(response, User.class)); + + } catch (Exception e) { + logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + @Override + public Flux> readUser(String userLink, RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> readUserInternal(String userLink, RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + try { + if (StringUtils.isEmpty(userLink)) { + throw new IllegalArgumentException("userLink"); + } + logger.debug("Reading a User. userLink [{}]", userLink); + String path = Utils.joinPath(userLink, null); + Map requestHeaders = getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + ResourceType.User, path, requestHeaders, options); + + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + return this.read(request).map(response -> toResourceResponse(response, User.class)); + + } catch (Exception e) { + logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> readUsers(String databaseLink, FeedOptions options) { + + if (StringUtils.isEmpty(databaseLink)) { + throw new IllegalArgumentException("databaseLink"); + } + + return readFeed(options, ResourceType.User, User.class, + Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); + } + + @Override + public Flux> queryUsers(String databaseLink, String query, FeedOptions options) { + return queryUsers(databaseLink, new SqlQuerySpec(query), options); + } + + @Override + public Flux> queryUsers(String databaseLink, SqlQuerySpec querySpec, + FeedOptions options) { + return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User); + } + + @Override + public Flux> createPermission(String userLink, Permission permission, + RequestOptions options) { + return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options), this.resetSessionTokenRetryPolicy.getRequestPolicy()); + } + + private Flux> createPermissionInternal(String userLink, Permission permission, + RequestOptions options) { + + try { + logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.id()); + RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, + OperationType.Create); + return this.create(request).map(response -> toResourceResponse(response, Permission.class)); + + } catch (Exception e) { + logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> upsertPermission(String userLink, Permission permission, + RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> upsertPermissionInternal(String userLink, Permission permission, + RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + + try { + logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.id()); + RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, + OperationType.Upsert); + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.upsert(request).map(response -> toResourceResponse(response, Permission.class)); + + } catch (Exception e) { + logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, + RequestOptions options, OperationType operationType) { + if (StringUtils.isEmpty(userLink)) { + throw new IllegalArgumentException("userLink"); + } + if (permission == null) { + throw new IllegalArgumentException("permission"); + } + + RxDocumentClientImpl.validateResource(permission); + + String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); + Map requestHeaders = getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Permission, path, + permission, requestHeaders, options); + + return request; + } + + @Override + public Flux> replacePermission(Permission permission, RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> replacePermissionInternal(Permission permission, RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance) { + try { + if (permission == null) { + throw new IllegalArgumentException("permission"); + } + logger.debug("Replacing a Permission. permission id [{}]", permission.id()); + RxDocumentClientImpl.validateResource(permission); + + String path = Utils.joinPath(permission.selfLink(), null); + Map requestHeaders = getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, + ResourceType.Permission, path, permission, requestHeaders, options); + + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.replace(request).map(response -> toResourceResponse(response, Permission.class)); + + } catch (Exception e) { + logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> deletePermission(String permissionLink, RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> deletePermissionInternal(String permissionLink, RequestOptions options, + IDocumentClientRetryPolicy retryPolicyInstance) { + + try { + if (StringUtils.isEmpty(permissionLink)) { + throw new IllegalArgumentException("permissionLink"); + } + logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); + String path = Utils.joinPath(permissionLink, null); + Map requestHeaders = getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, + ResourceType.Permission, path, requestHeaders, options); + + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.delete(request).map(response -> toResourceResponse(response, Permission.class)); + + } catch (Exception e) { + logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> readPermission(String permissionLink, RequestOptions options) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> readPermissionInternal(String permissionLink, RequestOptions options, IDocumentClientRetryPolicy retryPolicyInstance ) { + try { + if (StringUtils.isEmpty(permissionLink)) { + throw new IllegalArgumentException("permissionLink"); + } + logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); + String path = Utils.joinPath(permissionLink, null); + Map requestHeaders = getRequestHeaders(options); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + ResourceType.Permission, path, requestHeaders, options); + + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + return this.read(request).map(response -> toResourceResponse(response, Permission.class)); + + } catch (Exception e) { + logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> readPermissions(String userLink, FeedOptions options) { + + if (StringUtils.isEmpty(userLink)) { + throw new IllegalArgumentException("userLink"); + } + + return readFeed(options, ResourceType.Permission, Permission.class, + Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); + } + + @Override + public Flux> queryPermissions(String userLink, String query, + FeedOptions options) { + return queryPermissions(userLink, new SqlQuerySpec(query), options); + } + + @Override + public Flux> queryPermissions(String userLink, SqlQuerySpec querySpec, + FeedOptions options) { + return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission); + } + + @Override + public Flux> replaceOffer(Offer offer) { + return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer), this.resetSessionTokenRetryPolicy.getRequestPolicy()); + } + + private Flux> replaceOfferInternal(Offer offer) { + try { + if (offer == null) { + throw new IllegalArgumentException("offer"); + } + logger.debug("Replacing an Offer. offer id [{}]", offer.id()); + RxDocumentClientImpl.validateResource(offer); + + String path = Utils.joinPath(offer.selfLink(), null); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, + ResourceType.Offer, path, offer, null, null); + return this.replace(request).map(response -> toResourceResponse(response, Offer.class)); + + } catch (Exception e) { + logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> readOffer(String offerLink) { + IDocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); + return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); + } + + private Flux> readOfferInternal(String offerLink, IDocumentClientRetryPolicy retryPolicyInstance) { + try { + if (StringUtils.isEmpty(offerLink)) { + throw new IllegalArgumentException("offerLink"); + } + logger.debug("Reading an Offer. offerLink [{}]", offerLink); + String path = Utils.joinPath(offerLink, null); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + ResourceType.Offer, path, (HashMap)null, null); + + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + + return this.read(request).map(response -> toResourceResponse(response, Offer.class)); + + } catch (Exception e) { + logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + @Override + public Flux> readOffers(FeedOptions options) { + return readFeed(options, ResourceType.Offer, Offer.class, + Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); + } + + private Flux> readFeedCollectionChild(FeedOptions options, ResourceType resourceType, + Class klass, String resourceLink) { + if (options == null) { + options = new FeedOptions(); + } + + int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1; + + final FeedOptions finalFeedOptions = options; + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setPartitionKey(options.partitionKey()); + BiFunction createRequestFunc = (continuationToken, pageSize) -> { + Map requestHeaders = new HashMap<>(); + if (continuationToken != null) { + requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); + } + requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed, + resourceType, resourceLink, requestHeaders, finalFeedOptions); + return request; + }; + + Function>> executeFunc = request -> { + return ObservableHelper.inlineIfPossibleAsObs(() -> { + Mono collectionObs = this.collectionCache.resolveCollectionAsync(request); + Mono requestObs = this.addPartitionKeyInformation(request, null, requestOptions, collectionObs); + + return requestObs.flux().flatMap(req -> this.readFeed(req) + .map(response -> toFeedResponsePage(response, klass))); + }, this.resetSessionTokenRetryPolicy.getRequestPolicy()); + }; + + return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize); + } + + private Flux> readFeed(FeedOptions options, ResourceType resourceType, Class klass, String resourceLink) { + if (options == null) { + options = new FeedOptions(); + } + + int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1; + final FeedOptions finalFeedOptions = options; + BiFunction createRequestFunc = (continuationToken, pageSize) -> { + Map requestHeaders = new HashMap<>(); + if (continuationToken != null) { + requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); + } + requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed, + resourceType, resourceLink, requestHeaders, finalFeedOptions); + return request; + }; + + Function>> executeFunc = request -> { + return ObservableHelper.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)), + this.resetSessionTokenRetryPolicy.getRequestPolicy()); + }; + + return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize); + } + + @Override + public Flux> queryOffers(String query, FeedOptions options) { + return queryOffers(new SqlQuerySpec(query), options); + } + + @Override + public Flux> queryOffers(SqlQuerySpec querySpec, FeedOptions options) { + return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer); + } + + @Override + public Flux getDatabaseAccount() { + return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(), this.resetSessionTokenRetryPolicy.getRequestPolicy()); + } + + private Flux getDatabaseAccountInternal() { + try { + logger.debug("Getting Database Account"); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + ResourceType.DatabaseAccount, "", // path + (HashMap) null, + null); + return this.read(request).map(response -> toDatabaseAccount(response)); + + } catch (Exception e) { + logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); + return Flux.error(e); + } + } + + public Object getSession() { + return this.sessionContainer; + } + + public void setSession(Object sessionContainer) { + this.sessionContainer = (SessionContainer) sessionContainer; + } + + public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { + return partitionKeyRangeCache; + } + + public Flux getDatabaseAccountFromEndpoint(URI endpoint) { + return Flux.defer(() -> { + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + ResourceType.DatabaseAccount, "", null, (Object) null); + this.populateHeaders(request, HttpConstants.HttpMethods.GET); + + request.setEndpointOverride(endpoint); + return this.gatewayProxy.processMessage(request).doOnError(e -> { + String message = String.format("Failed to retrieve database account information. %s", + e.getCause() != null + ? e.getCause().toString() + : e.toString()); + logger.warn(message); + }).map(rsp -> rsp.getResource(DatabaseAccount.class)) + .doOnNext(databaseAccount -> { + this.useMultipleWriteLocations = this.connectionPolicy.usingMultipleWriteLocations() + && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); + }); + }); + } + + /** + * Certain requests must be routed through gateway even when the client connectivity mode is direct. + * + * @param request + * @return RxStoreModel + */ + private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { + // If a request is configured to always use GATEWAY mode(in some cases when targeting .NET Core) + // we return the GATEWAY store model + if (request.UseGatewayMode) { + return this.gatewayProxy; + } + + ResourceType resourceType = request.getResourceType(); + OperationType operationType = request.getOperationType(); + + if (resourceType == ResourceType.Offer || + resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || + resourceType == ResourceType.PartitionKeyRange) { + return this.gatewayProxy; + } + + if (operationType == OperationType.Create + || operationType == OperationType.Upsert) { + if (resourceType == ResourceType.Database || + resourceType == ResourceType.User || + resourceType == ResourceType.DocumentCollection || + resourceType == ResourceType.Permission) { + return this.gatewayProxy; + } else { + return this.storeModel; + } + } else if (operationType == OperationType.Delete) { + if (resourceType == ResourceType.Database || + resourceType == ResourceType.User || + resourceType == ResourceType.DocumentCollection) { + return this.gatewayProxy; + } else { + return this.storeModel; + } + } else if (operationType == OperationType.Replace) { + if (resourceType == ResourceType.DocumentCollection) { + return this.gatewayProxy; + } else { + return this.storeModel; + } + } else if (operationType == OperationType.Read) { + if (resourceType == ResourceType.DocumentCollection) { + return this.gatewayProxy; + } else { + return this.storeModel; + } + } else { + if ((request.getOperationType() == OperationType.Query || request.getOperationType() == OperationType.SqlQuery) && + Utils.isCollectionChild(request.getResourceType())) { + if (request.getPartitionKeyRangeIdentity() == null) { + return this.gatewayProxy; + } + } + + return this.storeModel; + } + } + + @Override + public void close() { + logger.info("Shutting down ..."); + LifeCycleUtils.closeQuietly(this.globalEndpointManager); + LifeCycleUtils.closeQuietly(this.storeClientFactory); + + try { + this.reactorHttpClient.shutdown(); + } catch (Exception e) { + logger.warn("Failure in shutting down reactorHttpClient", e); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RxDocumentServiceRequest.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RxDocumentServiceRequest.java new file mode 100644 index 0000000000000..89577ae39c051 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RxDocumentServiceRequest.java @@ -0,0 +1,1049 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.ChangeFeedOptions; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.SqlQuerySpec; +import com.azure.data.cosmos.internal.directconnectivity.WFConstants; +import com.azure.data.cosmos.internal.routing.PartitionKeyRangeIdentity; +import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.StringUtils; +import reactor.core.publisher.Flux; + +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Map; + +/** + * This is core Transport/Connection agnostic request to the Azure Cosmos DB database service. + */ +public class RxDocumentServiceRequest { + private static final char PREFER_HEADER_SEPERATOR = ';'; + private static final String PREFER_HEADER_VALUE_FORMAT = "%s=%s"; + + public volatile boolean forcePartitionKeyRangeRefresh; + public volatile boolean forceCollectionRoutingMapRefresh; + private String resourceId; + private final ResourceType resourceType; + private final Map headers; + private volatile String continuation; + private boolean isMedia = false; + private final boolean isNameBased; + private final OperationType operationType; + private final String resourceAddress; + public volatile boolean forceNameCacheRefresh; + private volatile URI endpointOverride = null; + private final String activityId; + private volatile String resourceFullName; + + private volatile String originalSessionToken; + private volatile PartitionKeyRangeIdentity partitionKeyRangeIdentity; + private volatile Integer defaultReplicaIndex; + + public DocumentServiceRequestContext requestContext; + + private Flux contentObservable; + private byte[] byteContent; + + // NOTE: TODO: these fields are copied from .Net SDK + // some of these fields are missing from the main java sdk service request + // so it means most likely the corresponding features are also missing from the main sdk + // we need to wire this up. + public boolean UseGatewayMode; + + private volatile boolean isDisposed = false; + public volatile String entityId; + public volatile String queryString; + public volatile boolean isFeed; + public volatile AuthorizationTokenType authorizationTokenType; + public volatile Map properties; + + public boolean isReadOnlyRequest() { + return this.operationType == OperationType.Read + || this.operationType == OperationType.ReadFeed + || this.operationType == OperationType.Head + || this.operationType == OperationType.HeadFeed + || this.operationType == OperationType.Query + || this.operationType == OperationType.SqlQuery; + } + + public boolean isReadOnlyScript() { + String isReadOnlyScript = this.headers.get(HttpConstants.HttpHeaders.IS_READ_ONLY_SCRIPT); + if(StringUtils.isEmpty(isReadOnlyScript)) { + return false; + } else { + return this.operationType.equals(OperationType.ExecuteJavaScript) && isReadOnlyScript.equalsIgnoreCase(Boolean.TRUE.toString()); + } + } + + /** + * @param operationType the operation type. + * @param resourceIdOrFullName the request id or full name. + * @param resourceType the resource type. + * @param byteContent the byte content. + * @param headers the headers. + * @param isNameBased whether request is name based. + * @param authorizationTokenType the request authorizationTokenType. + */ + private RxDocumentServiceRequest(OperationType operationType, + String resourceIdOrFullName, + ResourceType resourceType, + byte[] byteContent, + Map headers, + boolean isNameBased, + AuthorizationTokenType authorizationTokenType) { + this.operationType = operationType; + this.forceNameCacheRefresh = false; + this.resourceType = resourceType; + this.byteContent = byteContent; + this.headers = headers != null ? headers : new HashMap<>(); + this.activityId = Utils.randomUUID().toString(); + this.isFeed = false; + this.isNameBased = isNameBased; + if (!isNameBased) { + this.resourceId = resourceIdOrFullName; + } + this.resourceAddress = resourceIdOrFullName; + this.authorizationTokenType = authorizationTokenType; + this.requestContext = new DocumentServiceRequestContext(); + if (StringUtils.isNotEmpty(this.headers.get(WFConstants.BackendHeaders.PARTITION_KEY_RANGE_ID))) + this.partitionKeyRangeIdentity = PartitionKeyRangeIdentity.fromHeader(this.headers.get(WFConstants.BackendHeaders.PARTITION_KEY_RANGE_ID)); + } + + /** + * Creates a AbstractDocumentServiceRequest + * + * @param operationType the operation type. + * @param resourceIdOrFullName the request id or full name. + * @param resourceType the resource type. + * @param path the path. + * @param headers the headers + */ + private RxDocumentServiceRequest(OperationType operationType, + String resourceIdOrFullName, + ResourceType resourceType, + String path, + Map headers) { + this.requestContext = new DocumentServiceRequestContext(); + this.operationType = operationType; + this.resourceType = resourceType; + this.requestContext.sessionToken = null; + this.headers = headers != null ? headers : new HashMap<>(); + this.activityId = Utils.randomUUID().toString(); + this.isFeed = false; + PathInfo pathInfo = new PathInfo(false, null, null, false); + if (StringUtils.isNotEmpty(path)) { + if (PathsHelper.tryParsePathSegments(path, pathInfo, null)) { + this.isNameBased = pathInfo.isNameBased; + this.isFeed = pathInfo.isFeed; + resourceIdOrFullName = pathInfo.resourceIdOrFullName; + if (!this.isNameBased) { + if (resourceType == ResourceType.Media) { + this.resourceId = getAttachmentIdFromMediaId(resourceIdOrFullName); + } else { + this.resourceId = resourceIdOrFullName; + } + + this.resourceAddress = resourceIdOrFullName; + + // throw exception when the address parsing fail + // do not parse address for offer resource + if (StringUtils.isNotEmpty(this.resourceId) && !ResourceId.tryParse(this.resourceId).getLeft() + && !resourceType.equals(ResourceType.Offer) && !resourceType.equals(ResourceType.Media) + && !resourceType.equals(ResourceType.MasterPartition) + && !resourceType.equals(ResourceType.ServerPartition) + && !resourceType.equals(ResourceType.DatabaseAccount) + && !resourceType.equals(ResourceType.RidRange)) { + throw new IllegalArgumentException( + String.format(RMResources.InvalidResourceUrlQuery, path, HttpConstants.QueryStrings.URL)); + } + } else { + this.resourceAddress = resourceIdOrFullName; + this.resourceId = null; + } + } else { + throw new IllegalArgumentException(RMResources.NotFound); + } + } else { + this.isNameBased = false; + this.resourceAddress = resourceIdOrFullName; + } + + if (StringUtils.isNotEmpty(this.headers.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID))) { + this.partitionKeyRangeIdentity = PartitionKeyRangeIdentity + .fromHeader(this.headers.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID)); + } + } + + /** + * Creates a DocumentServiceRequest + * + * @param resourceId the resource Id. + * @param resourceType the resource type. + * @param content the byte content observable\ + * @param contentObservable the byte content observable + * @param headers the request headers. + */ + private RxDocumentServiceRequest(OperationType operationType, + String resourceId, + ResourceType resourceType, + Flux contentObservable, + byte[] content, + String path, + Map headers, + AuthorizationTokenType authorizationTokenType) { + this( operationType, + resourceId, + resourceType, + path, + headers); + this.authorizationTokenType = authorizationTokenType; + this.byteContent = content; + this.contentObservable = contentObservable; + } + + /** + * Creates a DocumentServiceRequest with an HttpEntity. + * + * @param resourceType the resource type. + * @param path the relative URI path. + * @param contentObservable the byte content observable + * @param headers the request headers. + */ + private RxDocumentServiceRequest(OperationType operationType, + ResourceType resourceType, + String path, + Flux contentObservable, + Map headers, + AuthorizationTokenType authorizationTokenType) { + this(operationType, extractIdFromUri(path), resourceType, contentObservable, null, path, headers, authorizationTokenType); + } + + /** + * Creates a DocumentServiceRequest with an HttpEntity. + * + * @param resourceType the resource type. + * @param path the relative URI path. + * @param byteContent the byte content. + * @param headers the request headers. + */ + private RxDocumentServiceRequest(OperationType operationType, + ResourceType resourceType, + String path, + byte[] byteContent, + Map headers, + AuthorizationTokenType authorizationTokenType) { + this(operationType, extractIdFromUri(path), resourceType, null, byteContent, path, headers, authorizationTokenType); + } + + /** + * Creates a DocumentServiceRequest with an HttpEntity. + * + * @param resourceType the resource type. + * @param path the relative URI path. + * @param headers the request headers. + */ + private RxDocumentServiceRequest(OperationType operationType, + ResourceType resourceType, + String path, + Map headers, + AuthorizationTokenType authorizationTokenType) { + this(operationType, extractIdFromUri(path), resourceType, null , null, path, headers, authorizationTokenType); + } + + public void setContentBytes(byte[] bytes) { + this.byteContent = bytes; + } + + /** + * Creates a DocumentServiceRequest with a stream. + * + * @param operation the operation type. + * @param resourceType the resource type. + * @param relativePath the relative URI path. + * @param content the content observable + * @param headers the request headers. + * @return the created document service request. + */ + public static RxDocumentServiceRequest create(OperationType operation, + ResourceType resourceType, + String relativePath, + Flux content, + Map headers) { + return new RxDocumentServiceRequest(operation, resourceType, relativePath, content, headers, AuthorizationTokenType.PrimaryMasterKey); + } + + /** + * Creates a DocumentServiceRequest with a stream. + * + * @param operation the operation type. + * @param resourceType the resource type. + * @param relativePath the relative URI path. + * @param content the content observable + * @param headers the request headers. + * @return the created document service request. + */ + public static RxDocumentServiceRequest create(OperationType operation, + ResourceType resourceType, + String relativePath, + Flux content, + Map headers, + AuthorizationTokenType authorizationTokenType) { + return new RxDocumentServiceRequest(operation, resourceType, relativePath, content, headers, authorizationTokenType); + } + + /** Creates a DocumentServiceRequest with a stream. + * + * @param operation the operation type. + * @param resourceType the resource type. + * @param relativePath the relative URI path. + * @param inputStream the input stream. + * @param headers the request headers. + * @return the created document service request. + */ + public static RxDocumentServiceRequest create(OperationType operation, + ResourceType resourceType, + String relativePath, + InputStream inputStream, + Map headers) throws IOException { + Flux byteFlux = Flux.just(IOUtils.toByteArray(inputStream)); + return new RxDocumentServiceRequest(operation, resourceType, relativePath, byteFlux, headers, AuthorizationTokenType.PrimaryMasterKey); + } + + /** Creates a DocumentServiceRequest with a stream. + * + * @param operation the operation type. + * @param resourceType the resource type. + * @param relativePath the relative URI path. + * @param inputStream the input stream. + * @param headers the request headers. + * @param authorizationTokenType the request authorizationTokenType. + * @return the created document service request. + */ + public static RxDocumentServiceRequest create(OperationType operation, + ResourceType resourceType, + String relativePath, + InputStream inputStream, + Map headers, + AuthorizationTokenType authorizationTokenType) throws IOException { + Flux byteFlux = Flux.just(IOUtils.toByteArray(inputStream)); + return new RxDocumentServiceRequest(operation, resourceType, relativePath, byteFlux, headers, authorizationTokenType); + } + + /** + * Creates a DocumentServiceRequest with a resource. + * + * @param operation the operation type. + * @param resourceType the resource type. + * @param relativePath the relative URI path. + * @param resource the resource of the request. + * @param headers the request headers. + * @return the created document service request. + */ + public static RxDocumentServiceRequest create(OperationType operation, + ResourceType resourceType, + String relativePath, + Resource resource, + Map headers) { + return RxDocumentServiceRequest.create(operation, resourceType, relativePath, resource, headers, (RequestOptions)null); + } + + /** + * Creates a DocumentServiceRequest with a resource. + * + * @param operation the operation type. + * @param resourceType the resource type. + * @param relativePath the relative URI path. + * @param resource the resource of the request. + * @param headers the request headers. + * @param options the request/feed/changeFeed options. + * @return the created document service request. + */ + public static RxDocumentServiceRequest create(OperationType operation, + ResourceType resourceType, + String relativePath, + Resource resource, + Map headers, + Object options) { + + RxDocumentServiceRequest request = new RxDocumentServiceRequest(operation, resourceType, relativePath, + // TODO: this re-encodes, can we improve performance here? + resource.toJson().getBytes(StandardCharsets.UTF_8), headers, AuthorizationTokenType.PrimaryMasterKey); + request.properties = getProperties(options); + return request; + } + + /** + * Creates a DocumentServiceRequest with a query. + * + * @param operation the operation type. + * @param resourceType the resource type. + * @param relativePath the relative URI path. + * @param query the query. + * @param headers the request headers. + * @param options the request/feed/changeFeed options. + * @return the created document service request. + */ + public static RxDocumentServiceRequest create(OperationType operation, + ResourceType resourceType, + String relativePath, + String query, + Map headers, + Object options) { + RxDocumentServiceRequest request = new RxDocumentServiceRequest(operation, resourceType, relativePath, + query.getBytes(StandardCharsets.UTF_8), headers, AuthorizationTokenType.PrimaryMasterKey); + request.properties = getProperties(options); + return request; + } + + /** + * Creates a DocumentServiceRequest with a query. + * + * @param operation the operation type. + * @param resourceType the resource type. + * @param relativePath the relative URI path. + * @param query the query. + * @param headers the request headers. + * @param authorizationTokenType the request authorizationTokenType. + * @return the created document service request. + */ + public static RxDocumentServiceRequest create(OperationType operation, + ResourceType resourceType, + String relativePath, + String query, + Map headers, + AuthorizationTokenType authorizationTokenType) { + return new RxDocumentServiceRequest(operation, resourceType, relativePath, + query.getBytes(StandardCharsets.UTF_8), headers, authorizationTokenType); + } + + /** + * Creates a DocumentServiceRequest with a query. + * + * @param resourceType the resource type. + * @param relativePath the relative URI path. + * @param querySpec the query. + * @param queryCompatibilityMode the QueryCompatibilityMode mode. + * @param headers the request headers. + * @return the created document service request. + */ + public static RxDocumentServiceRequest create(ResourceType resourceType, + String relativePath, + SqlQuerySpec querySpec, + QueryCompatibilityMode queryCompatibilityMode, + Map headers) { + OperationType operation; + String queryText; + switch (queryCompatibilityMode) { + case SqlQuery: + if (querySpec.parameters() != null && querySpec.parameters().size() > 0) { + throw new IllegalArgumentException( + String.format("Unsupported argument in query compatibility mode '{%s}'", + queryCompatibilityMode.toString())); + } + + operation = OperationType.SqlQuery; + queryText = querySpec.queryText(); + break; + + case Default: + case Query: + default: + operation = OperationType.Query; + queryText = querySpec.toJson(); + break; + } + + Flux body = Flux.just(queryText).map(s -> StandardCharsets.UTF_8.encode(s).array()); + return new RxDocumentServiceRequest(operation, resourceType, relativePath, body, headers, AuthorizationTokenType.PrimaryMasterKey); + } + + /** + * Creates a DocumentServiceRequest without body. + * + * @param operation the operation type. + * @param resourceType the resource type. + * @param relativePath the relative URI path. + * @param headers the request headers. + * @return the created document service request. + */ + public static RxDocumentServiceRequest create(OperationType operation, + ResourceType resourceType, + String relativePath, + Map headers) { + return RxDocumentServiceRequest.create(operation, resourceType, relativePath, headers, (RequestOptions)null); + } + + /** + * Creates a DocumentServiceRequest without body. + * + * @param operation the operation type. + * @param resourceType the resource type. + * @param relativePath the relative URI path. + * @param headers the request headers. + * @param options the request/feed/changeFeed options. + * @return the created document service request. + */ + public static RxDocumentServiceRequest create(OperationType operation, + ResourceType resourceType, + String relativePath, + Map headers, + Object options) { + RxDocumentServiceRequest request = new RxDocumentServiceRequest(operation, resourceType, relativePath, headers, AuthorizationTokenType.PrimaryMasterKey); + request.properties = getProperties(options); + return request; + } + + /** + * Creates a DocumentServiceRequest without body. + * + * @param operation the operation type. + * @param resourceType the resource type. + * @param relativePath the relative URI path. + * @param headers the request headers. + * @param authorizationTokenType the request authorizationTokenType. + * @return the created document service request. + */ + public static RxDocumentServiceRequest create(OperationType operation, + ResourceType resourceType, + String relativePath, + Map headers, + AuthorizationTokenType authorizationTokenType) { + return new RxDocumentServiceRequest(operation, resourceType, relativePath, headers, authorizationTokenType); + } + + /** + * Creates a DocumentServiceRequest without body. + * + * @param operation the operation type. + * @param resourceType the resource type. + * @param relativePath the relative URI path. + * @param headers the request headers. + * @return the created document service request. + */ + public static RxDocumentServiceRequest create(OperationType operation, + Resource resource, + ResourceType resourceType, + String relativePath, + Map headers) { + byte[] resourceContent = resource.toJson().getBytes(StandardCharsets.UTF_8); + return new RxDocumentServiceRequest(operation, resourceType, relativePath, resourceContent, headers, AuthorizationTokenType.PrimaryMasterKey); + } + + /** + * Creates a DocumentServiceRequest without body. + * + * @param operation the operation type. + * @param resourceType the resource type. + * @param relativePath the relative URI path. + * @param headers the request headers. + * @param authorizationTokenType the request authorizationTokenType. + * @return the created document service request. + */ + public static RxDocumentServiceRequest create(OperationType operation, + Resource resource, + ResourceType resourceType, + String relativePath, + Map headers, + AuthorizationTokenType authorizationTokenType) { + byte[] resourceContent = resource.toJson().getBytes(StandardCharsets.UTF_8); + return new RxDocumentServiceRequest(operation, resourceType, relativePath, resourceContent, headers, authorizationTokenType); + } + + /** + * Creates a DocumentServiceRequest with a resourceId. + * + * @param operation the operation type. + * @param resourceId the resource id. + * @param resourceType the resource type. + * @param headers the request headers. + * @return the created document service request. + */ + public static RxDocumentServiceRequest create(OperationType operation, + String resourceId, + ResourceType resourceType, + Map headers) { + return new RxDocumentServiceRequest(operation, resourceId,resourceType, null, headers, false, AuthorizationTokenType.PrimaryMasterKey) ; + } + + /** + * Creates a DocumentServiceRequest with a resourceId. + * + * @param operation the operation type. + * @param resourceId the resource id. + * @param resourceType the resource type. + * @param headers the request headers. + * @param authorizationTokenType the request authorizationTokenType. + * @return the created document service request. + */ + public static RxDocumentServiceRequest create(OperationType operation, + String resourceId, + ResourceType resourceType, + Map headers, + AuthorizationTokenType authorizationTokenType) { + return new RxDocumentServiceRequest(operation, resourceId, resourceType, null, headers, false, authorizationTokenType); + } + + /** + * Creates a DocumentServiceRequest with a resourceId. + * + * @param operation the operation type. + * @param resourceId the resource id. + * @param resourceType the resource type. + * @param headers the request headers. + * @return the created document service request. + */ + public static RxDocumentServiceRequest create(OperationType operation, + String resourceId, + ResourceType resourceType, + Resource resource, + Map headers) { + byte[] resourceContent = resource.toJson().getBytes(StandardCharsets.UTF_8); + return new RxDocumentServiceRequest(operation, resourceId, resourceType, resourceContent, headers, false, AuthorizationTokenType.PrimaryMasterKey); + } + + /** + * Creates a DocumentServiceRequest with a resourceId. + * + * @param operation the operation type. + * @param resourceId the resource id. + * @param resourceType the resource type. + * @param headers the request headers. + * @param authorizationTokenType the request authorizationTokenType. + * @return the created document service request. + */ + public static RxDocumentServiceRequest create(OperationType operation, + String resourceId, + ResourceType resourceType, + Resource resource, + Map headers, + AuthorizationTokenType authorizationTokenType) { + byte[] resourceContent = resource.toJson().getBytes(StandardCharsets.UTF_8); + return new RxDocumentServiceRequest(operation, resourceId, resourceType, resourceContent, headers, false, authorizationTokenType); + } + + /** + * Creates a DocumentServiceRequest with operationType and resourceType + * @param operation the operation type + * @param resourceType the resource type + * @return the created document service request. + */ + public static RxDocumentServiceRequest create(OperationType operation, + ResourceType resourceType) { + return new RxDocumentServiceRequest(operation, null, resourceType, null, null); + } + + public static RxDocumentServiceRequest createFromName( + OperationType operationType, + String resourceFullName, + ResourceType resourceType) { + return new RxDocumentServiceRequest(operationType, + resourceFullName, + resourceType, + null, + new HashMap<>(), + true, + AuthorizationTokenType.PrimaryMasterKey + ); + } + + public static RxDocumentServiceRequest createFromName( + OperationType operationType, + String resourceFullName, + ResourceType resourceType, + AuthorizationTokenType authorizationTokenType) { + return new RxDocumentServiceRequest(operationType, + resourceFullName, + resourceType, + null, + new HashMap<>(), + true, + authorizationTokenType + ); + } + + public static RxDocumentServiceRequest createFromName( + OperationType operationType, + Resource resource, + String resourceFullName, + ResourceType resourceType) { + byte[] resourceContent = resource.toJson().getBytes(StandardCharsets.UTF_8); + return new RxDocumentServiceRequest(operationType, + resourceFullName, + resourceType, + resourceContent, + new HashMap<>(), + true, + AuthorizationTokenType.PrimaryMasterKey + ); + } + + public static RxDocumentServiceRequest createFromName( + OperationType operationType, + Resource resource, + String resourceFullName, + ResourceType resourceType, + AuthorizationTokenType authorizationTokenType) { + byte[] resourceContent = resource.toJson().getBytes(StandardCharsets.UTF_8); + return new RxDocumentServiceRequest(operationType, + resourceFullName, + resourceType, + resourceContent, + new HashMap<>(), + true, + authorizationTokenType + ); + } + + private static String extractIdFromUri(String path) { + if (path.length() == 0) { + return path; + } + + if (path.charAt(path.length() - 1) != '/') { + path = path + '/'; + } + + if (path.charAt(0) != '/') { + path = '/' + path; + } + // This is a hack. We need a padding '=' so that path.split("/") + // returns even number of string pieces. + // TODO(pushi): Improve the code and remove the hack. + path = path + '='; + + // The path will be in the form of + // /[resourceType]/[resourceId]/ or + // /[resourceType]/[resourceId]/[resourceType]/ + // The result of split will be in the form of + // [[[resourceType], [resourceId] ... ,[resourceType], ""] + // In the first case, to extract the resourceId it will the element + // before last ( at length -2 ) and the type will before it + // ( at length -3 ) + // In the second case, to extract the resource type it will the element + // before last ( at length -2 ) + String[] pathParts = StringUtils.split(path, "/"); + if (pathParts.length % 2 == 0) { + // request in form /[resourceType]/[resourceId]/. + return pathParts[pathParts.length - 2]; + } else { + // request in form /[resourceType]/[resourceId]/[resourceType]/. + return pathParts[pathParts.length - 3]; + } + } + + static String getAttachmentIdFromMediaId(String mediaId) { + // '/' was replaced with '-'. + byte[] buffer = Utils.Base64Decoder.decode(mediaId.replace('-', '/').getBytes()); + + final int resoureIdLength = 20; + String attachmentId; + + if (buffer.length > resoureIdLength) { + // We are cuting off the storage index. + byte[] newBuffer = new byte[resoureIdLength]; + System.arraycopy(buffer, 0, newBuffer, 0, resoureIdLength); + attachmentId = Utils.encodeBase64String(newBuffer).replace('/', '-'); + } else { + attachmentId = mediaId; + } + + return attachmentId; + } + + /** + * Gets the resource id. + * + * @return the resource id. + */ + public String getResourceId() { + return this.resourceId; + } + + /** + * Sets the resource id. + * + */ + public void setResourceId(String resourceId) { + this.resourceId = resourceId; + } + + /** + * Gets the resource type. + * + * @return the resource type. + */ + public ResourceType getResourceType() { + return this.resourceType; + } + + /** + * Gets the request headers. + * + * @return the request headers. + */ + public Map getHeaders() { + return this.headers; + } + + /** + * Gets the continuation. + * + * @return the continuation. + */ + public String getContinuation() { + return this.continuation; + } + + public void setContinuation(String continuation) { + this.continuation = continuation; + } + + public boolean getIsMedia() { + return this.isMedia; + } + + public void setIsMedia(boolean isMedia) { + this.isMedia = isMedia; + } + + public boolean getIsNameBased() { + return this.isNameBased; + } + + public OperationType getOperationType() { + return this.operationType; + } + + public String getResourceAddress() { + return resourceAddress; + } + + public boolean isForceNameCacheRefresh() { + return forceNameCacheRefresh; + } + + public void setForceNameCacheRefresh(boolean forceNameCacheRefresh) { + this.forceNameCacheRefresh = forceNameCacheRefresh; + } + + public URI getEndpointOverride() { + return this.endpointOverride; + } + + public void setEndpointOverride(URI endpointOverride) { + this.endpointOverride = endpointOverride; + } + + public String getActivityId() { + return this.activityId; + } + + public PartitionKeyRangeIdentity getPartitionKeyRangeIdentity() { + return partitionKeyRangeIdentity; + } + + public void routeTo(PartitionKeyRangeIdentity partitionKeyRangeIdentity) { + this.setPartitionKeyRangeIdentity(partitionKeyRangeIdentity); + } + + public void setPartitionKeyRangeIdentity(PartitionKeyRangeIdentity partitionKeyRangeIdentity) { + this.partitionKeyRangeIdentity = partitionKeyRangeIdentity; + if (partitionKeyRangeIdentity != null) { + this.headers.put(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeIdentity.toHeader()); + } else { + this.headers.remove(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID); + } + } + + public String getOriginalSessionToken() { + return originalSessionToken; + } + + public void setOriginalSessionToken(String originalSessionToken) { + this.originalSessionToken = originalSessionToken; + } + + public void setDefaultReplicaIndex(Integer defaultReplicaIndex) { + this.defaultReplicaIndex = defaultReplicaIndex; + } + + public Integer getDefaultReplicaIndex() { + return defaultReplicaIndex; + } + + public boolean isChangeFeedRequest() { + return this.headers.containsKey(HttpConstants.HttpHeaders.A_IM); + } + + public boolean isWritingToMaster() { + return operationType.isWriteOperation() && resourceType.isMasterResource(); + } + + public boolean isReadingFromMaster() { + if (resourceType == ResourceType.Offer || + resourceType == ResourceType.Database || + resourceType == ResourceType.User || + resourceType == ResourceType.Permission || + resourceType == ResourceType.Topology || + resourceType == ResourceType.DatabaseAccount || + resourceType == ResourceType.PartitionKeyRange || + (resourceType == ResourceType.DocumentCollection + && (operationType == OperationType.ReadFeed + || operationType == OperationType.Query + || operationType == OperationType.SqlQuery))) { + return true; + } + return false; + } + + public boolean isValidAddress(ResourceType resourceType) { + ResourceType resourceTypeToValidate = ResourceType.Unknown; + + if(resourceType != ResourceType.Unknown) { + resourceTypeToValidate = resourceType; + } else { + if(!this.isFeed) { + resourceTypeToValidate =this.resourceType; + } else { + if(this.resourceType == ResourceType.Database) { + return true; + } else if(this.resourceType == ResourceType.DocumentCollection || + this.resourceType == ResourceType.User) { + resourceTypeToValidate = ResourceType.Database; + } else if(this.resourceType == ResourceType.Permission) { + resourceTypeToValidate = ResourceType.User; + } else if(this.resourceType == ResourceType.Document || + this.resourceType == ResourceType.StoredProcedure || + this.resourceType == ResourceType.UserDefinedFunction || + this.resourceType == ResourceType.Trigger || + this.resourceType == ResourceType.Conflict || + this.resourceType == ResourceType.PartitionKeyRange) { + resourceTypeToValidate = ResourceType.DocumentCollection; + } else if(this.resourceType == ResourceType.Attachment) { + resourceTypeToValidate = ResourceType.Document; + } else { + return false; + } + } + } + + if (this.isNameBased) { + return PathsHelper.validateResourceFullName(resourceType != ResourceType.Unknown ? resourceType : resourceTypeToValidate, this.resourceAddress); + } else { + return PathsHelper.validateResourceId(resourceTypeToValidate, this.resourceId); + } + } + + public void addPreferHeader(String preferHeaderName, String preferHeaderValue) { + String headerToAdd = String.format(PREFER_HEADER_VALUE_FORMAT, preferHeaderName, preferHeaderValue); + String preferHeader = this.headers.get(HttpConstants.HttpHeaders.PREFER); + if(StringUtils.isNotEmpty(preferHeader)) { + preferHeader += PREFER_HEADER_SEPERATOR + headerToAdd; + } else { + preferHeader = headerToAdd; + } + this.headers.put(HttpConstants.HttpHeaders.PREFER, preferHeader); + } + + public static RxDocumentServiceRequest CreateFromResource(RxDocumentServiceRequest request, Resource modifiedResource) { + RxDocumentServiceRequest modifiedRequest; + if (!request.getIsNameBased()) { + modifiedRequest = RxDocumentServiceRequest.create(request.getOperationType(), + request.getResourceId(), + request.getResourceType(), + modifiedResource, + request.headers); + } else { + modifiedRequest = RxDocumentServiceRequest.createFromName(request.getOperationType(), + modifiedResource, + request.getResourceAddress(), + request.getResourceType()); + } + return modifiedRequest; + } + + public void clearRoutingHints() { + this.partitionKeyRangeIdentity = null; + this.requestContext.resolvedPartitionKeyRange = null; + } + + public Flux getContentObservable() { + return contentObservable; + } + + public byte[] getContent() { + return byteContent; + } + + public RxDocumentServiceRequest clone() { + RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(this.getOperationType(), this.resourceId,this.getResourceType(),this.getHeaders()); + rxDocumentServiceRequest.setContentBytes(this.getContent()); + rxDocumentServiceRequest.setContinuation(this.getContinuation()); + rxDocumentServiceRequest.setDefaultReplicaIndex(this.getDefaultReplicaIndex()); + rxDocumentServiceRequest.setEndpointOverride(this.getEndpointOverride()); + rxDocumentServiceRequest.setForceNameCacheRefresh(this.isForceNameCacheRefresh()); + rxDocumentServiceRequest.setIsMedia(this.getIsMedia()); + rxDocumentServiceRequest.setOriginalSessionToken(this.getOriginalSessionToken()); + rxDocumentServiceRequest.setPartitionKeyRangeIdentity(this.getPartitionKeyRangeIdentity()); + rxDocumentServiceRequest.contentObservable = this.getContentObservable(); + rxDocumentServiceRequest.forceCollectionRoutingMapRefresh = this.forceCollectionRoutingMapRefresh; + rxDocumentServiceRequest.forcePartitionKeyRangeRefresh = this.forcePartitionKeyRangeRefresh; + rxDocumentServiceRequest.UseGatewayMode = this.UseGatewayMode; + rxDocumentServiceRequest.queryString = this.queryString; + rxDocumentServiceRequest.requestContext = this.requestContext; + return rxDocumentServiceRequest; + } + + public void Dispose() { + if (this.isDisposed) { + return; + } + + if (this.byteContent != null) { + this.byteContent = null; + } + + this.isDisposed = true; + } + + private static Map getProperties(Object options) { + if (options == null) { + return null; + } else if (options instanceof RequestOptions) { + return ((RequestOptions) options).getProperties(); + } else if (options instanceof FeedOptions) { + return ((FeedOptions) options).properties(); + } else if (options instanceof ChangeFeedOptions) { + return ((ChangeFeedOptions) options).properties(); + } else { + return null; + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RxDocumentServiceResponse.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RxDocumentServiceResponse.java new file mode 100644 index 0000000000000..2485ce563e388 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RxDocumentServiceResponse.java @@ -0,0 +1,212 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosResponseDiagnostics; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.internal.directconnectivity.Address; +import com.azure.data.cosmos.internal.directconnectivity.StoreResponse; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ArrayNode; +import org.apache.commons.lang3.StringUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.lang.reflect.InvocationTargetException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * This is core Transport/Connection agnostic response for the Azure Cosmos DB database service. + */ +public class RxDocumentServiceResponse { + private final int statusCode; + private final Map headersMap; + private final StoreResponse storeResponse; + + public RxDocumentServiceResponse(StoreResponse response) { + String[] headerNames = response.getResponseHeaderNames(); + String[] headerValues = response.getResponseHeaderValues(); + + this.headersMap = new HashMap<>(headerNames.length); + + // Gets status code. + this.statusCode = response.getStatus(); + + // Extracts headers. + for (int i = 0; i < headerNames.length; i++) { + this.headersMap.put(headerNames[i], headerValues[i]); + } + + this.storeResponse = response; + } + + public static String getResourceKey(Class c) { + if (c.equals(Conflict.class)) { + return InternalConstants.ResourceKeys.CONFLICTS; + } else if (c.equals(Database.class)) { + return InternalConstants.ResourceKeys.DATABASES; + } else if (Document.class.isAssignableFrom(c)) { + return InternalConstants.ResourceKeys.DOCUMENTS; + } else if (c.equals(DocumentCollection.class)) { + return InternalConstants.ResourceKeys.DOCUMENT_COLLECTIONS; + } else if (c.equals(Offer.class)) { + return InternalConstants.ResourceKeys.OFFERS; + } else if (c.equals(Permission.class)) { + return InternalConstants.ResourceKeys.PERMISSIONS; + } else if (c.equals(Trigger.class)) { + return InternalConstants.ResourceKeys.TRIGGERS; + } else if (c.equals(StoredProcedure.class)) { + return InternalConstants.ResourceKeys.STOREDPROCEDURES; + } else if (c.equals(User.class)) { + return InternalConstants.ResourceKeys.USERS; + } else if (c.equals(UserDefinedFunction.class)) { + return InternalConstants.ResourceKeys.USER_DEFINED_FUNCTIONS; + } else if (c.equals(Address.class)) { + return InternalConstants.ResourceKeys.ADDRESSES; + } else if (c.equals(PartitionKeyRange.class)) { + return InternalConstants.ResourceKeys.PARTITION_KEY_RANGES; + } + + throw new IllegalArgumentException("c"); + } + + public int getStatusCode() { + return this.statusCode; + } + + public Map getResponseHeaders() { + return this.headersMap; + } + + public String getReponseBodyAsString() { + return this.storeResponse.getResponseBody(); + } + + public T getResource(Class c) { + String responseBody = this.getReponseBodyAsString(); + if (StringUtils.isEmpty(responseBody)) + return null; + + T resource = null; + try { + resource = c.getConstructor(String.class).newInstance(responseBody); + } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException + | NoSuchMethodException | SecurityException e) { + throw new IllegalStateException("Failed to instantiate class object.", e); + } + if(PathsHelper.isPublicResource(resource)) { + BridgeInternal.setAltLink(resource, PathsHelper.generatePathForNameBased(resource, this.getOwnerFullName(),resource.id())); + } + + return resource; + } + + public List getQueryResponse(Class c) { + String responseBody = this.getReponseBodyAsString(); + if (responseBody == null) { + return new ArrayList(); + } + + JsonNode jobject = fromJson(responseBody); + String resourceKey = RxDocumentServiceResponse.getResourceKey(c); + ArrayNode jTokenArray = (ArrayNode) jobject.get(resourceKey); + + // Aggregate queries may return a nested array + ArrayNode innerArray; + while (jTokenArray != null && jTokenArray.size() == 1 && (innerArray = toArrayNode(jTokenArray.get(0))) != null) { + jTokenArray = innerArray; + } + + List queryResults = new ArrayList(); + + if (jTokenArray != null) { + for (int i = 0; i < jTokenArray.size(); ++i) { + JsonNode jToken = jTokenArray.get(i); + // Aggregate on single partition collection may return the aggregated value only + // In that case it needs to encapsulated in a special document + String resourceJson = jToken.isNumber() || jToken.isBoolean() + ? String.format("{\"%s\": %s}", Constants.Properties.AGGREGATE, jToken.asText()) + : toJson(jToken); + T resource = null; + try { + resource = c.getConstructor(String.class).newInstance(resourceJson); + } catch (InstantiationException | IllegalAccessException | IllegalArgumentException + | InvocationTargetException | NoSuchMethodException | SecurityException e) { + throw new IllegalStateException("Failed to instantiate class object.", e); + } + + queryResults.add(resource); + } + } + + return queryResults; + } + + private ArrayNode toArrayNode(JsonNode n) { + if (n.isArray()) { + return (ArrayNode) n; + } else { + return null; + } + } + + private static JsonNode fromJson(String json){ + try { + return Utils.getSimpleObjectMapper().readTree(json); + } catch (IOException e) { + throw new IllegalStateException(String.format("Unable to parse JSON %s", json), e); + } + } + + private static String toJson(Object object){ + try { + return Utils.getSimpleObjectMapper().writeValueAsString(object); + } catch (JsonProcessingException e) { + throw new IllegalStateException("Can't serialize the object into the json string", e); + } + } + + private String getOwnerFullName() { + if (this.headersMap != null) { + return this.headersMap.get(HttpConstants.HttpHeaders.OWNER_FULL_NAME); + } + return null; + } + + public InputStream getContentStream() { + return this.storeResponse.getResponseStream(); + } + + CosmosResponseDiagnostics getCosmosResponseRequestDiagnosticStatistics() { + if (this.storeResponse == null) { + return null; + } + return this.storeResponse.getCosmosResponseDiagnostics(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RxGatewayStoreModel.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RxGatewayStoreModel.java new file mode 100644 index 0000000000000..a46f076a6a4cd --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RxGatewayStoreModel.java @@ -0,0 +1,521 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosError; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.directconnectivity.StoreResponse; +import com.azure.data.cosmos.internal.http.HttpClient; +import com.azure.data.cosmos.internal.http.HttpHeaders; +import com.azure.data.cosmos.internal.http.HttpRequest; +import com.azure.data.cosmos.internal.http.HttpResponse; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponseStatus; +import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.Callable; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + * + * Used internally to provide functionality to communicate and process response from GATEWAY in the Azure Cosmos DB database service. + */ +class RxGatewayStoreModel implements RxStoreModel { + + private final static int INITIAL_RESPONSE_BUFFER_SIZE = 1024; + private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); + private final Map defaultHeaders; + private final HttpClient httpClient; + private final QueryCompatibilityMode queryCompatibilityMode; + private final GlobalEndpointManager globalEndpointManager; + private ConsistencyLevel defaultConsistencyLevel; + private ISessionContainer sessionContainer; + + public RxGatewayStoreModel( + ISessionContainer sessionContainer, + ConsistencyLevel defaultConsistencyLevel, + QueryCompatibilityMode queryCompatibilityMode, + UserAgentContainer userAgentContainer, + GlobalEndpointManager globalEndpointManager, + HttpClient httpClient) { + this.defaultHeaders = new HashMap<>(); + this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, + "no-cache"); + this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, + HttpConstants.Versions.CURRENT_VERSION); + + if (userAgentContainer == null) { + userAgentContainer = new UserAgentContainer(); + } + + this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); + + if (defaultConsistencyLevel != null) { + this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, + defaultConsistencyLevel.toString()); + } + + this.defaultConsistencyLevel = defaultConsistencyLevel; + this.globalEndpointManager = globalEndpointManager; + this.queryCompatibilityMode = queryCompatibilityMode; + + this.httpClient = httpClient; + this.sessionContainer = sessionContainer; + } + + private Flux doCreate(RxDocumentServiceRequest request) { + return this.performRequest(request, HttpMethod.POST); + } + + private Flux upsert(RxDocumentServiceRequest request) { + return this.performRequest(request, HttpMethod.POST); + } + + private Flux read(RxDocumentServiceRequest request) { + return this.performRequest(request, HttpMethod.GET); + } + + private Flux replace(RxDocumentServiceRequest request) { + return this.performRequest(request, HttpMethod.PUT); + } + + private Flux delete(RxDocumentServiceRequest request) { + return this.performRequest(request, HttpMethod.DELETE); + } + + private Flux execute(RxDocumentServiceRequest request) { + return this.performRequest(request, HttpMethod.POST); + } + + private Flux readFeed(RxDocumentServiceRequest request) { + return this.performRequest(request, HttpMethod.GET); + } + + private Flux query(RxDocumentServiceRequest request) { + request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); + + switch (this.queryCompatibilityMode) { + case SqlQuery: + request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, + RuntimeConstants.MediaTypes.SQL); + break; + case Default: + case Query: + default: + request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, + RuntimeConstants.MediaTypes.QUERY_JSON); + break; + } + return this.performRequest(request, HttpMethod.POST); + } + + /** + * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. + * + * @param request + * @param method + * @return Flux + */ + public Flux performRequest(RxDocumentServiceRequest request, HttpMethod method) { + + try { + URI uri = getUri(request); + + HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); + + Flux byteBufObservable = Flux.empty(); + + if (request.getContentObservable() != null) { + byteBufObservable = request.getContentObservable().map(Unpooled::wrappedBuffer); + } else if (request.getContent() != null){ + byteBufObservable = Flux.just(Unpooled.wrappedBuffer(request.getContent())); + } + + + HttpRequest httpRequest = new HttpRequest(method, + uri, + uri.getPort(), + httpHeaders, + byteBufObservable); + + Mono httpResponseMono = this.httpClient.send(httpRequest); + + return toDocumentServiceResponse(httpResponseMono, request); + + } catch (Exception e) { + return Flux.error(e); + } + } + + private HttpHeaders getHttpRequestHeaders(Map headers) { + HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); + // Add default headers. + for (Entry entry : this.defaultHeaders.entrySet()) { + if (!headers.containsKey(entry.getKey())) { + // populate default header only if there is no overwrite by the request header + httpHeaders.set(entry.getKey(), entry.getValue()); + } + } + + // Add override headers. + if (headers != null) { + for (Entry entry : headers.entrySet()) { + if (entry.getValue() == null) { + // netty doesn't allow setting null value in header + httpHeaders.set(entry.getKey(), ""); + } else { + httpHeaders.set(entry.getKey(), entry.getValue()); + } + } + } + return httpHeaders; + } + + private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { + URI rootUri = request.getEndpointOverride(); + if (rootUri == null) { + if (request.getIsMedia()) { + // For media read request, always use the write endpoint. + rootUri = this.globalEndpointManager.getWriteEndpoints().get(0).toURI(); + } else { + rootUri = this.globalEndpointManager.resolveServiceEndpoint(request).toURI(); + } + } + + String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); + if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { + path = StringUtils.EMPTY; + } + + return new URI("https", + null, + rootUri.getHost(), + rootUri.getPort(), + ensureSlashPrefixed(path), + null, // Query string not used. + null); + } + + private String ensureSlashPrefixed(String path) { + if (path == null) { + return path; + } + + if (path.startsWith("/")) { + return path; + } + + return "/" + path; + } + + private Mono toString(Flux contentObservable) { + return contentObservable + .reduce( + new ByteArrayOutputStream(INITIAL_RESPONSE_BUFFER_SIZE), + (out, bb) -> { + try { + bb.readBytes(out, bb.readableBytes()); + return out; + } + catch (IOException e) { + throw new RuntimeException(e); + } + }) + .map(out -> new String(out.toByteArray(), StandardCharsets.UTF_8)); + } + + /** + * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. + * + * + * Once the customer code subscribes to the observable returned by the CRUD APIs, + * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. + * + * @param httpResponseMono + * @param request + * @return {@link Flux} + */ + private Flux toDocumentServiceResponse(Mono httpResponseMono, + RxDocumentServiceRequest request) { + + if (request.getIsMedia()) { + return httpResponseMono.flatMap(httpResponse -> { + + // header key/value pairs + HttpHeaders httpResponseHeaders = httpResponse.headers(); + int httpResponseStatus = httpResponse.statusCode(); + + Flux inputStreamObservable; + + if (request.getOperationType() == OperationType.Delete) { + // for delete we don't expect any body + inputStreamObservable = Flux.just(IOUtils.toInputStream("", StandardCharsets.UTF_8)); + } else { + // transforms the ByteBufFlux to Flux + inputStreamObservable = httpResponse + .body() + .flatMap(byteBuf -> + Flux.just(IOUtils.toInputStream(byteBuf.toString(StandardCharsets.UTF_8), StandardCharsets.UTF_8))); + } + + return inputStreamObservable + .flatMap(contentInputStream -> { + try { + // If there is any error in the header response this throws exception + // TODO: potential performance improvement: return Observable.error(exception) on failure instead of throwing Exception + validateOrThrow(request, + HttpResponseStatus.valueOf(httpResponseStatus), + httpResponseHeaders, + null, + contentInputStream); + + // transforms to Observable + StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils + .unescape(httpResponseHeaders.toMap().entrySet()), contentInputStream); + return Flux.just(rsp); + } catch (Exception e) { + return Flux.error(e); + } + }).single(); + + }).map(RxDocumentServiceResponse::new).flux(); + + } else { + return httpResponseMono.flatMap(httpResponse -> { + + // header key/value pairs + HttpHeaders httpResponseHeaders = httpResponse.headers(); + int httpResponseStatus = httpResponse.statusCode(); + + Flux contentObservable; + + if (request.getOperationType() == OperationType.Delete) { + // for delete we don't expect any body + contentObservable = Flux.just(StringUtils.EMPTY); + } else { + // transforms the ByteBufFlux to Flux + contentObservable = toString(httpResponse.body()).flux(); + } + + return contentObservable + .flatMap(content -> { + try { + // If there is any error in the header response this throws exception + // TODO: potential performance improvement: return Observable.error(exception) on failure instead of throwing Exception + validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content, null); + + // transforms to Observable + StoreResponse rsp = new StoreResponse(httpResponseStatus, + HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), + content); + return Flux.just(rsp); + } catch (Exception e) { + return Flux.error(e); + } + }).single(); + + }).map(RxDocumentServiceResponse::new) + .onErrorResume(throwable -> { + if (!(throwable instanceof Exception)) { + // fatal error + logger.error("Unexpected failure {}", throwable.getMessage(), throwable); + return Mono.error(throwable); + } + + Exception exception = (Exception) throwable; + if (!(exception instanceof CosmosClientException)) { + // wrap in CosmosClientException + logger.error("Network failure", exception); + CosmosClientException dce = BridgeInternal.createCosmosClientException(0, exception); + BridgeInternal.setRequestHeaders(dce, request.getHeaders()); + return Mono.error(dce); + } + + return Mono.error(exception); + }).flux(); + } + } + + private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, String body, + InputStream inputStream) throws CosmosClientException { + + int statusCode = status.code(); + + if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { + if (body == null && inputStream != null) { + try { + body = IOUtils.toString(inputStream, StandardCharsets.UTF_8); + } catch (IOException e) { + logger.error("Failed to get content from the http response", e); + CosmosClientException dce = BridgeInternal.createCosmosClientException(0, e); + BridgeInternal.setRequestHeaders(dce, request.getHeaders()); + throw dce; + } finally { + IOUtils.closeQuietly(inputStream); + } + } + + String statusCodeString = status.reasonPhrase() != null + ? status.reasonPhrase().replace(" ", "") + : ""; + CosmosError cosmosError; + cosmosError = (StringUtils.isNotEmpty(body)) ? BridgeInternal.createCosmosError(body) : new CosmosError(); + cosmosError = new CosmosError(statusCodeString, + String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), + cosmosError.getPartitionedQueryExecutionInfo()); + + CosmosClientException dce = BridgeInternal.createCosmosClientException(statusCode, cosmosError, headers.toMap()); + BridgeInternal.setRequestHeaders(dce, request.getHeaders()); + throw dce; + } + } + + private Flux invokeAsyncInternal(RxDocumentServiceRequest request) { + switch (request.getOperationType()) { + case Create: + return this.doCreate(request); + case Upsert: + return this.upsert(request); + case Delete: + return this.delete(request); + case ExecuteJavaScript: + return this.execute(request); + case Read: + return this.read(request); + case ReadFeed: + return this.readFeed(request); + case Replace: + return this.replace(request); + case SqlQuery: + case Query: + return this.query(request); + default: + throw new IllegalStateException("Unknown operation type " + request.getOperationType()); + } + } + + private Flux invokeAsync(RxDocumentServiceRequest request) { + Callable> funcDelegate = () -> invokeAsyncInternal(request).single(); + return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()).flux(); + } + + @Override + public Flux processMessage(RxDocumentServiceRequest request) { + this.applySessionToken(request); + + Flux responseObs = invokeAsync(request); + + return responseObs.onErrorResume( + e -> { + CosmosClientException dce = Utils.as(e, CosmosClientException.class); + + if (dce == null) { + logger.error("unexpected failure {}", e.getMessage(), e); + return Flux.error(e); + } + + if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && + (dce.statusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || + dce.statusCode() == HttpConstants.StatusCodes.CONFLICT || + ( + dce.statusCode() == HttpConstants.StatusCodes.NOTFOUND && + !Exceptions.isSubStatusCode(dce, + HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { + this.captureSessionToken(request, dce.responseHeaders()); + } + + return Flux.error(dce); + } + ).map(response -> + { + this.captureSessionToken(request, response.getResponseHeaders()); + return response; + } + ); + } + + private void captureSessionToken(RxDocumentServiceRequest request, Map responseHeaders) { + if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { + String resourceId; + if (request.getIsNameBased()) { + resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); + } else { + resourceId = request.getResourceId(); + } + this.sessionContainer.clearTokenByResourceId(resourceId); + } else { + this.sessionContainer.setSessionToken(request, responseHeaders); + } + } + + private void applySessionToken(RxDocumentServiceRequest request) { + Map headers = request.getHeaders(); + + if (headers != null && + !Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { + if (ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { + request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); + } + return; //User is explicitly controlling the session. + } + + String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); + + boolean sessionConsistency = + this.defaultConsistencyLevel == ConsistencyLevel.SESSION || + (!Strings.isNullOrEmpty(requestConsistencyLevel) + && Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString())); + + if (!sessionConsistency || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { + return; // Only apply the session token in case of session consistency and when resource is not a master resource + } + + //Apply the ambient session. + String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); + + if (!Strings.isNullOrEmpty(sessionToken)) { + headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); + } + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RxStoreModel.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RxStoreModel.java new file mode 100644 index 0000000000000..38fae2f6c0ea7 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/RxStoreModel.java @@ -0,0 +1,43 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import reactor.core.publisher.Flux; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public interface RxStoreModel { + + /** + * Given the request, it returns an Observable of the response. + * + * The Observable upon subscription will execute the request and upon successful execution request returns a single {@link RxDocumentServiceResponse}. + * If the execution of the request fails it returns an error. + * + * @param request + * @return + */ + Flux processMessage(RxDocumentServiceRequest request); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/SessionContainer.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/SessionContainer.java new file mode 100644 index 0000000000000..dc1f91bf57f22 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/SessionContainer.java @@ -0,0 +1,313 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.CosmosClientException; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Iterator; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import static com.azure.data.cosmos.internal.Utils.ValueHolder; + +/** + * Used internally to cache the collections' session tokens in the Azure Cosmos DB database service. + */ +public final class SessionContainer implements ISessionContainer { + private final Logger logger = LoggerFactory.getLogger(SessionContainer.class); + + /** + * SESSION token cache that maps collection ResourceID to session tokens + */ + private final ConcurrentHashMap> collectionResourceIdToSessionTokens = new ConcurrentHashMap<>(); + /** + * Collection ResourceID cache that maps collection name to collection ResourceID + * When collection name is provided instead of self-link, this is used in combination with + * collectionResourceIdToSessionTokens to retrieve the session token for the collection by name + */ + private final ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + private final ReentrantReadWriteLock.ReadLock readLock = readWriteLock.readLock(); + private final ReentrantReadWriteLock.WriteLock writeLock = readWriteLock.writeLock(); + + private final ConcurrentHashMap collectionNameToCollectionResourceId = new ConcurrentHashMap<>(); + private final ConcurrentHashMap collectionResourceIdToCollectionName = new ConcurrentHashMap<>(); + private final String hostName; + + public SessionContainer(final String hostName) { + this.hostName = hostName; + } + + public String getHostName() { + return this.hostName; + } + + public String getSessionToken(String collectionLink) { + + PathInfo pathInfo = new PathInfo(false, null, null, false); + ConcurrentHashMap partitionKeyRangeIdToTokenMap = null; + if (PathsHelper.tryParsePathSegments(collectionLink, pathInfo, null)) { + Long UniqueDocumentCollectionId = null; + if (pathInfo.isNameBased) { + String collectionName = PathsHelper.getCollectionPath(pathInfo.resourceIdOrFullName); + UniqueDocumentCollectionId = this.collectionNameToCollectionResourceId.get(collectionName); + } else { + ResourceId resourceId = ResourceId.parse(pathInfo.resourceIdOrFullName); + if (resourceId.getDocumentCollection() != 0) { + UniqueDocumentCollectionId = resourceId.getUniqueDocumentCollectionId(); + } + } + + if (UniqueDocumentCollectionId != null) { + partitionKeyRangeIdToTokenMap = this.collectionResourceIdToSessionTokens.get(UniqueDocumentCollectionId); + } + } + + if (partitionKeyRangeIdToTokenMap == null) { + return StringUtils.EMPTY; + } + + return SessionContainer.getCombinedSessionToken(partitionKeyRangeIdToTokenMap); + } + + private ConcurrentHashMap getPartitionKeyRangeIdToTokenMap(RxDocumentServiceRequest request) { + return getPartitionKeyRangeIdToTokenMap(request.getIsNameBased(), request.getResourceId(), request.getResourceAddress()); + } + + private ConcurrentHashMap getPartitionKeyRangeIdToTokenMap(boolean isNameBased, String rId, String resourceAddress) { + ConcurrentHashMap rangeIdToTokenMap = null; + if (!isNameBased) { + if (!StringUtils.isEmpty(rId)) { + ResourceId resourceId = ResourceId.parse(rId); + if (resourceId.getDocumentCollection() != 0) { + rangeIdToTokenMap = + this.collectionResourceIdToSessionTokens.get(resourceId.getUniqueDocumentCollectionId()); + } + } + } else { + String collectionName = Utils.getCollectionName(resourceAddress); + if (!StringUtils.isEmpty(collectionName) && this.collectionNameToCollectionResourceId.containsKey(collectionName)) { + rangeIdToTokenMap = this.collectionResourceIdToSessionTokens.get( + this.collectionNameToCollectionResourceId.get(collectionName)); + } + } + return rangeIdToTokenMap; + } + + + public String resolveGlobalSessionToken(RxDocumentServiceRequest request) { + ConcurrentHashMap partitionKeyRangeIdToTokenMap = this.getPartitionKeyRangeIdToTokenMap(request); + if (partitionKeyRangeIdToTokenMap != null) { + return SessionContainer.getCombinedSessionToken(partitionKeyRangeIdToTokenMap); + } + + return StringUtils.EMPTY; + } + + @Override + public ISessionToken resolvePartitionLocalSessionToken(RxDocumentServiceRequest request, String partitionKeyRangeId) { + return SessionTokenHelper.resolvePartitionLocalSessionToken(request, + partitionKeyRangeId, + this.getPartitionKeyRangeIdToTokenMap(request)); + } + + @Override + public void clearTokenByCollectionFullName(String collectionFullName) { + if (!Strings.isNullOrEmpty(collectionFullName)) { + String collectionName = PathsHelper.getCollectionPath(collectionFullName); + this.writeLock.lock(); + try { + if (this.collectionNameToCollectionResourceId.containsKey(collectionName)) { + Long rid = this.collectionNameToCollectionResourceId.get(collectionName); + this.collectionResourceIdToSessionTokens.remove(rid); + this.collectionResourceIdToCollectionName.remove(rid); + this.collectionNameToCollectionResourceId.remove(collectionName); + } + } finally { + this.writeLock.unlock(); + } + } + } + + @Override + public void clearTokenByResourceId(String resourceId) { + if (!StringUtils.isEmpty(resourceId)) { + ResourceId resource = ResourceId.parse(resourceId); + if (resource.getDocumentCollection() != 0) { + Long rid = resource.getUniqueDocumentCollectionId(); + this.writeLock.lock(); + try { + if (this.collectionResourceIdToCollectionName.containsKey(rid)) { + String collectionName = this.collectionResourceIdToCollectionName.get(rid); + this.collectionResourceIdToSessionTokens.remove(rid); + this.collectionResourceIdToCollectionName.remove(rid); + this.collectionNameToCollectionResourceId.remove(collectionName); + } + } finally { + this.writeLock.unlock(); + } + } + } + } + + @Override + public void setSessionToken(RxDocumentServiceRequest request, Map responseHeaders) { + String token = responseHeaders.get(HttpConstants.HttpHeaders.SESSION_TOKEN); + + if (!Strings.isNullOrEmpty(token)) { + ValueHolder resourceId = ValueHolder.initialize(null); + ValueHolder collectionName = ValueHolder.initialize(null); + + if (shouldUpdateSessionToken(request, responseHeaders, resourceId, collectionName)) { + this.setSessionToken(resourceId.v, collectionName.v, token); + } + } + } + + @Override + public void setSessionToken(String collectionRid, String collectionFullName, Map responseHeaders) { + ResourceId resourceId = ResourceId.parse(collectionRid); + String collectionName = PathsHelper.getCollectionPath(collectionFullName); + String token = responseHeaders.get(HttpConstants.HttpHeaders.SESSION_TOKEN); + if (!Strings.isNullOrEmpty(token)) { + this.setSessionToken(resourceId, collectionName, token); + } + } + + private void setSessionToken(ResourceId resourceId, String collectionName, String token) { + String partitionKeyRangeId; + ISessionToken parsedSessionToken; + + String[] tokenParts = StringUtils.split(token, ':'); + partitionKeyRangeId = tokenParts[0]; + parsedSessionToken = SessionTokenHelper.parse(tokenParts[1]); + + logger.trace("UPDATE SESSION token {} {} {}", resourceId.getUniqueDocumentCollectionId(), collectionName, parsedSessionToken); + + boolean isKnownCollection; + + this.readLock.lock(); + try { + isKnownCollection = this.collectionNameToCollectionResourceId.containsKey(collectionName) && + this.collectionResourceIdToCollectionName.containsKey(resourceId.getUniqueDocumentCollectionId()) && + this.collectionNameToCollectionResourceId.get(collectionName) == resourceId.getUniqueDocumentCollectionId() && + this.collectionResourceIdToCollectionName.get(resourceId.getUniqueDocumentCollectionId()).equals(collectionName); + if (isKnownCollection) { + this.addSessionToken(resourceId, partitionKeyRangeId, parsedSessionToken); + } + } finally { + this.readLock.unlock(); + } + + if (!isKnownCollection) { + this.writeLock.lock(); + try { + if (collectionName != null && resourceId.getUniqueDocumentCollectionId() != 0) { + this.collectionNameToCollectionResourceId.compute(collectionName, (k, v) -> resourceId.getUniqueDocumentCollectionId()); + this.collectionResourceIdToCollectionName.compute(resourceId.getUniqueDocumentCollectionId(), (k, v) -> collectionName); + } + addSessionToken(resourceId, partitionKeyRangeId, parsedSessionToken); + } finally { + this.writeLock.unlock(); + } + } + } + + private void addSessionToken(ResourceId resourceId, String partitionKeyRangeId, ISessionToken parsedSessionToken) { + this.collectionResourceIdToSessionTokens.compute( + resourceId.getUniqueDocumentCollectionId(), (k, existingTokens) -> { + if (existingTokens == null) { + ConcurrentHashMap tokens = new ConcurrentHashMap<>(); + tokens.put(partitionKeyRangeId, parsedSessionToken); + return tokens; + } + + existingTokens.merge(partitionKeyRangeId, parsedSessionToken, (existingSessionTokens, newSessionToken) -> { + try { + if (existingSessionTokens == null) { + return newSessionToken; + } + + return existingSessionTokens.merge(newSessionToken); + } catch (CosmosClientException e) { + throw new IllegalStateException(e); + } + }); + + return existingTokens; + } + ); + } + + private static String getCombinedSessionToken(ConcurrentHashMap tokens) { + StringBuilder result = new StringBuilder(); + if (tokens != null) { + for (Iterator> iterator = tokens.entrySet().iterator(); iterator.hasNext(); ) { + Entry entry = iterator.next(); + result = result.append(entry.getKey()).append(":").append(entry.getValue().convertToString()); + if (iterator.hasNext()) { + result = result.append(","); + } + } + } + + return result.toString(); + } + + private static boolean shouldUpdateSessionToken( + RxDocumentServiceRequest request, + Map responseHeaders, + ValueHolder resourceId, + ValueHolder collectionName) { + resourceId.v = null; + String ownerFullName = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_FULL_NAME); + if (Strings.isNullOrEmpty(ownerFullName)) ownerFullName = request.getResourceAddress(); + + collectionName.v = PathsHelper.getCollectionPath(ownerFullName); + String resourceIdString; + + if (!request.getIsNameBased()) { + resourceIdString = request.getResourceId(); + } else { + resourceIdString = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); + if (Strings.isNullOrEmpty(resourceIdString)) resourceIdString = request.getResourceId(); + } + + if (!Strings.isNullOrEmpty(resourceIdString)) { + resourceId.v = ResourceId.parse(resourceIdString); + + if (resourceId.v.getDocumentCollection() != 0 && + collectionName != null && + !ReplicatedResourceClientUtils.isReadingFromMaster(request.getResourceType(), request.getOperationType())) { + return true; + } + } + + return false; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/SessionTokenHelper.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/SessionTokenHelper.java new file mode 100644 index 0000000000000..5926de23533e4 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/SessionTokenHelper.java @@ -0,0 +1,183 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.BadRequestException; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.InternalServerErrorException; +import org.apache.commons.lang3.StringUtils; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +import static com.azure.data.cosmos.internal.Utils.ValueHolder; + +/** + * Used internally to provides helper functions to work with session tokens in the Azure Cosmos DB database service. + */ +public class SessionTokenHelper { + + public static void setOriginalSessionToken(RxDocumentServiceRequest request, String originalSessionToken) { + if (request == null) { + throw new IllegalArgumentException("request is null"); + } + + if (originalSessionToken == null) { + request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); + } else { + request.getHeaders().put(HttpConstants.HttpHeaders.SESSION_TOKEN, originalSessionToken); + } + } + + public static void setPartitionLocalSessionToken(RxDocumentServiceRequest request, ISessionContainer sessionContainer) throws CosmosClientException { + String originalSessionToken = request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); + String partitionKeyRangeId = request.requestContext.resolvedPartitionKeyRange.id(); + + + if (Strings.isNullOrEmpty(partitionKeyRangeId)) { + // AddressCache/address resolution didn't produce partition key range id. + // In this case it is a bug. + throw new InternalServerErrorException(RMResources.PartitionKeyRangeIdAbsentInContext); + } + + if (StringUtils.isNotEmpty(originalSessionToken)) { + ISessionToken sessionToken = getLocalSessionToken(request, originalSessionToken, partitionKeyRangeId); + request.requestContext.sessionToken = sessionToken; + } else { + // use ambient session token. + ISessionToken sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, partitionKeyRangeId); + request.requestContext.sessionToken = sessionToken; + } + + if (request.requestContext.sessionToken == null) { + request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); + } else { + request.getHeaders().put(HttpConstants.HttpHeaders.SESSION_TOKEN, + String.format("%1s:%2s", partitionKeyRangeId, request.requestContext.sessionToken.convertToString())); + } + } + + private static ISessionToken getLocalSessionToken( + RxDocumentServiceRequest request, + String globalSessionToken, + String partitionKeyRangeId) throws CosmosClientException { + + if (partitionKeyRangeId == null || partitionKeyRangeId.isEmpty()) { + // AddressCache/address resolution didn't produce partition key range id. + // In this case it is a bug. + throw new IllegalStateException("Partition key range Id is absent in the context."); + } + + // Convert global session token to local - there's no point in sending global token over the wire to the backend. + // Global session token is comma separated array of : pairs. For example: + // 2:425344,748:2341234,99:42344 + // Local session token is single : pair. + // Backend only cares about pair which relates to the range owned by the partition. + String[] localTokens = StringUtils.split(globalSessionToken, ","); + Set partitionKeyRangeSet = new HashSet<>(); + partitionKeyRangeSet.add(partitionKeyRangeId); + + ISessionToken highestSessionToken = null; + + if (request.requestContext.resolvedPartitionKeyRange != null && request.requestContext.resolvedPartitionKeyRange.getParents() != null) { + partitionKeyRangeSet.addAll(request.requestContext.resolvedPartitionKeyRange.getParents()); + } + + for (String localToken : localTokens) { + String[] items = StringUtils.split(localToken, ":"); + if (items.length != 2) { + throw new BadRequestException(String.format(RMResources.InvalidSessionToken, partitionKeyRangeId)); + } + + ISessionToken parsedSessionToken = SessionTokenHelper.parse(items[1]); + + if (partitionKeyRangeSet.contains(items[0])) { + + if (highestSessionToken == null) { + highestSessionToken = parsedSessionToken; + } else { + highestSessionToken = highestSessionToken.merge(parsedSessionToken); + } + + } + } + + return highestSessionToken; + } + + static ISessionToken resolvePartitionLocalSessionToken(RxDocumentServiceRequest request, + String partitionKeyRangeId, + ConcurrentHashMap rangeIdToTokenMap) { + if (rangeIdToTokenMap != null) { + if (rangeIdToTokenMap.containsKey(partitionKeyRangeId)) { + return rangeIdToTokenMap.get(partitionKeyRangeId); + } else { + Collection parents = request.requestContext.resolvedPartitionKeyRange.getParents(); + if (parents != null) { + List parentsList = new ArrayList<>(parents); + for (int i = parentsList.size() - 1; i >= 0; i--) { + String parentId = parentsList.get(i); + if (rangeIdToTokenMap.containsKey(parentId)) { + return rangeIdToTokenMap.get(parentId); + } + } + } + } + } + + return null; + } + + public static ISessionToken parse(String sessionToken) { + ValueHolder partitionKeyRangeSessionToken = ValueHolder.initialize(null); + + if (SessionTokenHelper.tryParse(sessionToken, partitionKeyRangeSessionToken)) { + return partitionKeyRangeSessionToken.v; + } else { + throw new RuntimeException(new BadRequestException(String.format(RMResources.InvalidSessionToken, sessionToken))); + } + } + + static boolean tryParse(String sessionToken, ValueHolder parsedSessionToken) { + parsedSessionToken.v = null; + if (!Strings.isNullOrEmpty(sessionToken)) { + String[] sessionTokenSegments = StringUtils.split(sessionToken,":"); + return VectorSessionToken.tryCreate(sessionTokenSegments[sessionTokenSegments.length - 1], parsedSessionToken); + } else { + return false; + } + } + + public static void validateAndRemoveSessionToken(RxDocumentServiceRequest request) throws CosmosClientException { + String sessionToken = request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); + if (!Strings.isNullOrEmpty(sessionToken)) { + getLocalSessionToken(request, sessionToken, StringUtils.EMPTY); + request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/StoredProcedure.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/StoredProcedure.java new file mode 100644 index 0000000000000..59e60d6594be4 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/StoredProcedure.java @@ -0,0 +1,82 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.Resource; + +/** + * Represents a stored procedure in the Azure Cosmos DB database service. + *

+ * Cosmos DB allows stored procedures to be executed in the storage tier, directly against a document collection. The + * script gets executed under ACID transactions on the primary storage partition of the specified collection. For + * additional details, refer to the server-side JavaScript API documentation. + */ +public class StoredProcedure extends Resource { + + /** + * Constructor. + */ + public StoredProcedure() { + super(); + } + + /** + * Constructor. + * + * @param jsonString the json string that represents the stored procedure. + */ + public StoredProcedure(String jsonString) { + super(jsonString); + } + + /** + * Sets the id + * @param id the name of the resource. + * @return the current stored procedure + */ + public StoredProcedure id(String id){ + super.id(id); + return this; + } + + /** + * Get the body of the stored procedure. + * + * @return the body of the stored procedure. + */ + public String getBody() { + return super.getString(Constants.Properties.BODY); + } + + /** + * Set the body of the stored procedure. + * + * @param body the body of the stored procedure. + */ + public void setBody(String body) { + BridgeInternal.setProperty(this, Constants.Properties.BODY, body); + } +} + diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/StoredProcedureResponse.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/StoredProcedureResponse.java new file mode 100644 index 0000000000000..7ccac84529036 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/StoredProcedureResponse.java @@ -0,0 +1,154 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.CosmosResponseDiagnostics; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Map; + +/** + * Represents the response returned from a stored procedure in the Azure Cosmos DB database service. + * Wraps the response body and headers. + */ +public final class StoredProcedureResponse { + private final static Logger logger = LoggerFactory.getLogger(StoredProcedureResponse.class); + private final RxDocumentServiceResponse response; + + /** + * Constructs StoredProcedureResponse. + * + * @param response the document service response. + */ + public StoredProcedureResponse(RxDocumentServiceResponse response) { + this.response = response; + } + + /** + * Gets the Activity ID of the request. + * + * @return the activity id. + */ + public String getActivityId() { + return this.response.getResponseHeaders().get(HttpConstants.HttpHeaders.ACTIVITY_ID); + } + + /** + * Gets the token for use with session consistency requests. + * + * @return the session token. + */ + public String getSessionToken() { + return this.response.getResponseHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); + } + + /** + * Gets the request completion status code. + * + * @return the status code. + */ + public int getStatusCode() { + return this.response.getStatusCode(); + } + + /** + * Gets the maximum size limit for this entity (in megabytes (MB) for server resources and in count for master + * resources). + * + * @return the max resource quota. + */ + public String getMaxResourceQuota() { + return this.response.getResponseHeaders().get(HttpConstants.HttpHeaders.MAX_RESOURCE_QUOTA); + } + + /** + * Gets the current size of this entity (in megabytes (MB) for server resources and in count for master resources) + * + * @return the current resource quota usage. + */ + public String getCurrentResourceQuotaUsage() { + return this.response.getResponseHeaders().get(HttpConstants.HttpHeaders.CURRENT_RESOURCE_QUOTA_USAGE); + } + + /** + * Gets the number of normalized requests charged. + * + * @return the request charge. + */ + public double getRequestCharge() { + String value = this.response.getResponseHeaders().get(HttpConstants.HttpHeaders.REQUEST_CHARGE); + try { + return Double.valueOf(value); + } catch (NumberFormatException e) { + logger.warn("INVALID x-ms-request-charge value {}.", value); + return 0; + } + } + + /** + * Gets the headers associated with the response. + * + * @return the response headers. + */ + public Map getResponseHeaders() { + return this.response.getResponseHeaders(); + } + + /** + * Gets the response of a stored procedure, serialized into a document. + * + * @return the response as a document. + */ + public Document getResponseAsDocument() { + return this.response.getResource(Document.class); + } + + /** + * Gets the response of a stored procedure as a string. + * + * @return the response as a string. + */ + public String getResponseAsString() { + return this.response.getReponseBodyAsString(); + } + + /** + * Gets the output from stored procedure console.log() statements. + * + * @return the output string from the stored procedure console.log() statements. + */ + public String getScriptLog() { + return this.response.getResponseHeaders().get(HttpConstants.HttpHeaders.SCRIPT_LOG_RESULTS); + } + + /** + * Gets the request diagnostic statics for execution of stored procedure. + * + * @return request diagnostic statistics for execution of stored procedure. + */ + public CosmosResponseDiagnostics getCosmosResponseDiagnostics() { + return this.response.getCosmosResponseRequestDiagnosticStatistics(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Strings.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Strings.java new file mode 100644 index 0000000000000..7f03efc4d0e2c --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Strings.java @@ -0,0 +1,93 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import org.apache.commons.lang3.StringUtils; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class Strings { + public static final String Emtpy = ""; + + public static boolean isNullOrWhiteSpace(String str) { + return StringUtils.isEmpty(str) || StringUtils.isWhitespace(str); + } + public static boolean isNullOrEmpty(String str) { + return StringUtils.isEmpty(str); + } + + public static String toString(boolean value) { + return Boolean.toString(value); + } + + public static String toString(int value) { + return Integer.toString(value); + } + + public static boolean areEqual(String str1, String str2) { + return StringUtils.equals(str1, str2); + } + + public static boolean areEqualIgnoreCase(String str1, String str2) { + return StringUtils.equalsIgnoreCase(str1, str2); + } + + public static boolean containsIgnoreCase(String str1, String str2) { + return StringUtils.containsIgnoreCase(str1, str2); + } + + public static int compare(String str1, String str2) { + return StringUtils.compare(str1, str2); + } + + public static String toCamelCase(String str) { + if (isNullOrEmpty(str)) { + return str; + } + + return str.substring(0, 1).toUpperCase() + str.substring(1, str.length()).toLowerCase(); + } + + public static String fromCamelCaseToUpperCase(String str) { + if (str == null) { + return null; + } + + StringBuilder result = new StringBuilder(str); + + int i = 1; + while (i < result.length()) { + if (Character.isUpperCase(result.charAt(i))) { + result.insert(i, '_'); + i += 2; + } else { + result.replace(i, i + 1, Character.toString(Character.toUpperCase(result.charAt(i)))); + i ++; + } + } + + return result.toString(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/TestConfigurations.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/TestConfigurations.java new file mode 100644 index 0000000000000..ce78c7d99d395 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/TestConfigurations.java @@ -0,0 +1,84 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.google.common.base.Strings; +import org.apache.commons.lang3.StringUtils; + +/** + * Contains the configurations for tests. + * + * For running tests, you can pass a customized endpoint configuration in one of the following + * ways: + *

    + *
  • -DACCOUNT_KEY="[your-key]" -ACCOUNT_HOST="[your-endpoint]" as JVM + * command-line option.
  • + *
  • You can set ACCOUNT_KEY and ACCOUNT_HOST as environment variables.
  • + *
+ * + * If none of the above is set, emulator endpoint will be used. + */ +public final class TestConfigurations { + // REPLACE MASTER_KEY and HOST with values from your Azure Cosmos DB account. + // The default values are credentials of the local emulator, which are not used in any production environment. + // + public static String MASTER_KEY = + System.getProperty("ACCOUNT_KEY", + StringUtils.defaultString(Strings.emptyToNull( + System.getenv().get("ACCOUNT_KEY")), + "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==")); + + public static String HOST = + System.getProperty("ACCOUNT_HOST", + StringUtils.defaultString(Strings.emptyToNull( + System.getenv().get("ACCOUNT_HOST")), + "https://localhost:443/")); + + public static String CONSISTENCY = + System.getProperty("ACCOUNT_CONSISTENCY", + StringUtils.defaultString(Strings.emptyToNull( + System.getenv().get("ACCOUNT_CONSISTENCY")), "Strong")); + + public static String PREFERRED_LOCATIONS = + System.getProperty("PREFERRED_LOCATIONS", + StringUtils.defaultString(Strings.emptyToNull( + System.getenv().get("PREFERRED_LOCATIONS")), null)); + + public static String MAX_RETRY_LIMIT = + System.getProperty("MAX_RETRY_LIMIT", + StringUtils.defaultString(Strings.emptyToNull( + System.getenv().get("MAX_RETRY_LIMIT")), + "2")); + + public static String DESIRED_CONSISTENCIES = + System.getProperty("DESIRED_CONSISTENCIES", + StringUtils.defaultString(Strings.emptyToNull( + System.getenv().get("DESIRED_CONSISTENCIES")), + null)); + + public static String PROTOCOLS = + System.getProperty("PROTOCOLS", + StringUtils.defaultString(Strings.emptyToNull( + System.getenv().get("PROTOCOLS")), + null)); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Trigger.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Trigger.java new file mode 100644 index 0000000000000..7916cffa1c8ab --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Trigger.java @@ -0,0 +1,125 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.TriggerOperation; +import com.azure.data.cosmos.TriggerType; +import org.apache.commons.lang3.StringUtils; + +/** + * Represents a trigger in the Azure Cosmos DB database service. + *

+ * Cosmos DB supports pre and post triggers defined in JavaScript to be executed on creates, updates and deletes. For + * additional details, refer to the server-side JavaScript API documentation. + */ +public class Trigger extends Resource { + + /** + * Constructor. + */ + public Trigger() { + super(); + } + + /** + * Constructor. + * + * @param jsonString the json string that represents the trigger. + */ + public Trigger(String jsonString) { + super(jsonString); + } + + /** + * Get the body of the trigger. + * + * @return the body of the trigger. + */ + public String getBody() { + return super.getString(Constants.Properties.BODY); + } + + /** + * Set the body of the trigger. + * + * @param body the body of the trigger. + */ + public void setBody(String body) { + BridgeInternal.setProperty(this, Constants.Properties.BODY, body); + } + + /** + * Get the type of the trigger. + * + * @return the trigger type. + */ + public TriggerType getTriggerType() { + TriggerType result = TriggerType.PRE; + try { + result = TriggerType.valueOf( + StringUtils.upperCase(super.getString(Constants.Properties.TRIGGER_TYPE))); + } catch (IllegalArgumentException e) { + // ignore the exception and return the default + this.getLogger().warn("INVALID triggerType value {}.", super.getString(Constants.Properties.TRIGGER_TYPE)); + } + return result; + } + + /** + * Set the type of the resource. + * + * @param triggerType the trigger type. + */ + public void setTriggerType(TriggerType triggerType) { + BridgeInternal.setProperty(this, Constants.Properties.TRIGGER_TYPE, triggerType.toString()); + } + + /** + * Get the operation type of the trigger. + * + * @return the trigger operation. + */ + public TriggerOperation getTriggerOperation() { + TriggerOperation result = TriggerOperation.CREATE; + try { + result = TriggerOperation.valueOf( + StringUtils.upperCase(super.getString(Constants.Properties.TRIGGER_OPERATION))); + } catch (IllegalArgumentException e) { + // ignore the exception and return the default + this.getLogger().warn("INVALID triggerOperation value {}.", super.getString(Constants.Properties.TRIGGER_OPERATION)); + } + return result; + } + + /** + * Set the operation type of the trigger. + * + * @param triggerOperation the trigger operation. + */ + public void setTriggerOperation(TriggerOperation triggerOperation) { + BridgeInternal.setProperty(this, Constants.Properties.TRIGGER_OPERATION, triggerOperation.toString()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Undefined.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Undefined.java new file mode 100644 index 0000000000000..82bdf0cb88b42 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Undefined.java @@ -0,0 +1,56 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.JsonSerializable; + +/** + * Represents the 'Undefined' partition key in the Azure Cosmos DB database service. + */ +public class Undefined extends JsonSerializable { + + private final static Undefined value = new Undefined(); + + /** + * Constructor. CREATE a new instance of the Undefined object. + */ + private Undefined() { + } + + /** + * Returns the singleton value of Undefined. + * + * @return the Undefined value + */ + public static Undefined Value() { + return value; + } + + /** + * Returns the string representation of Undfined. + */ + public String toString() { + return "{}"; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/User.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/User.java new file mode 100644 index 0000000000000..3a5fc9974857a --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/User.java @@ -0,0 +1,72 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.Resource; + +/** + * Represents a database user in the Azure Cosmos DB database service. + */ +public class User extends Resource { + + /** + * Initialize a user object. + */ + public User() { + super(); + } + + /** + * Initialize a user object from json string. + * + * @param jsonString the json string that represents the database user. + */ + public User(String jsonString) { + super(jsonString); + } + + /** + * Sets the id + * @param id the name of the resource. + * @return the current instance of User + */ + public User id(String id){ + super.id(id); + return this; + } + + /** + * Gets the self-link of the permissions associated with the user. + * + * @return the permissions link. + */ + public String getPermissionsLink() { + String selfLink = this.selfLink(); + if (selfLink.endsWith("/")) { + return selfLink + super.getString(Constants.Properties.PERMISSIONS_LINK); + } else { + return selfLink + "/" + super.getString(Constants.Properties.PERMISSIONS_LINK); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/UserAgentContainer.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/UserAgentContainer.java new file mode 100644 index 0000000000000..5266b26fba984 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/UserAgentContainer.java @@ -0,0 +1,62 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +/** + * Used internally. The user agent object, which is used to track the version of the Java SDK of the Azure Cosmos DB database service. + */ +public class UserAgentContainer { + + private static final int MAX_SUFFIX_LENGTH = 64; + private final String baseUserAgent; + private String suffix; + private String userAgent; + + private UserAgentContainer(String sdkName, String sdkVersion) { + this.baseUserAgent = Utils.getUserAgent(sdkName, sdkVersion); + this.suffix = ""; + this.userAgent = baseUserAgent; + } + + public UserAgentContainer() { + this(HttpConstants.Versions.SDK_NAME, HttpConstants.Versions.SDK_VERSION); + } + + public String getSuffix() { + return this.suffix; + } + + public void setSuffix(String suffix) { + if (suffix.length() > MAX_SUFFIX_LENGTH) { + suffix = suffix.substring(0, MAX_SUFFIX_LENGTH); + } + + this.suffix = suffix; + this.userAgent = baseUserAgent.concat(this.suffix); + } + + public String getUserAgent() { + return this.userAgent; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/UserDefinedFunction.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/UserDefinedFunction.java new file mode 100644 index 0000000000000..30bd5c7f67089 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/UserDefinedFunction.java @@ -0,0 +1,71 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.Resource; + +/** + * Represents a user defined function in the Azure Cosmos DB database service. + *

+ * Cosmos DB supports JavaScript UDFs which can be used inside queries, stored procedures and triggers. For additional + * details, refer to the server-side JavaScript API documentation. + */ +public class UserDefinedFunction extends Resource { + + /** + * Constructor. + */ + public UserDefinedFunction() { + super(); + } + + /** + * Constructor. + * + * @param jsonString the json string that represents the user defined function. + */ + public UserDefinedFunction(String jsonString) { + super(jsonString); + } + + /** + * Get the body of the user defined function. + * + * @return the body. + */ + public String getBody() { + return super.getString(Constants.Properties.BODY); + } + + /** + * Set the body of the user defined function. + * + * @param body the body. + */ + public void setBody(String body) { + BridgeInternal.setProperty(this, Constants.Properties.BODY, body); + } +} + diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Utils.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Utils.java new file mode 100644 index 0000000000000..843ea904d1a4f --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/Utils.java @@ -0,0 +1,575 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.BadRequestException; +import com.azure.data.cosmos.ConsistencyLevel; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.uuid.EthernetAddress; +import com.fasterxml.uuid.Generators; +import com.fasterxml.uuid.impl.TimeBasedGenerator; +import org.apache.commons.lang3.StringUtils; + +import java.io.UnsupportedEncodingException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.net.URLEncoder; +import java.time.OffsetDateTime; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.UUID; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class Utils { + private static final ZoneId GMT_ZONE_ID = ZoneId.of("GMT"); + public static final Base64.Encoder Base64Encoder = Base64.getEncoder(); + public static final Base64.Decoder Base64Decoder = Base64.getDecoder(); + + private static final ObjectMapper simpleObjectMapper = new ObjectMapper(); + private static final TimeBasedGenerator TimeUUIDGegerator = + Generators.timeBasedGenerator(EthernetAddress.constructMulticastAddress()); + + // NOTE DateTimeFormatter.RFC_1123_DATE_TIME cannot be used. + // because cosmos db rfc1123 validation requires two digits for day. + // so Thu, 04 Jan 2018 00:30:37 GMT is accepted by the cosmos db service, + // but Thu, 4 Jan 2018 00:30:37 GMT is not. + // Therefore, we need a custom date time formatter. + private static final DateTimeFormatter RFC_1123_DATE_TIME = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US); + + static { + Utils.simpleObjectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + Utils.simpleObjectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true); + Utils.simpleObjectMapper.configure(JsonParser.Feature.ALLOW_TRAILING_COMMA, true); + Utils.simpleObjectMapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); + } + + public static byte[] getUTF8Bytes(String str) throws UnsupportedEncodingException { + return str.getBytes("UTF-8"); + } + + public static String encodeBase64String(byte[] binaryData) { + String encodedString = Base64Encoder.encodeToString(binaryData); + + if (encodedString.endsWith("\r\n")) { + encodedString = encodedString.substring(0, encodedString.length() - 2); + } + return encodedString; + } + + /** + * Checks whether the specified link is Name based or not + * + * @param link the link to analyze. + * @return true or false + */ + public static boolean isNameBased(String link) { + if (StringUtils.isEmpty(link)) { + return false; + } + + // trimming the leading "/" + if (link.startsWith("/") && link.length() > 1) { + link = link.substring(1); + } + + // Splitting the link(separated by "/") into parts + String[] parts = StringUtils.split(link, "/"); + + // First part should be "dbs" + if (parts.length == 0 || StringUtils.isEmpty(parts[0]) + || !parts[0].equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) { + return false; + } + + // The second part is the database id(ResourceID or Name) and cannot be + // empty + if (parts.length < 2 || StringUtils.isEmpty(parts[1])) { + return false; + } + + // Either ResourceID or database name + String databaseID = parts[1]; + + // Length of databaseID(in case of ResourceID) is always 8 + if (databaseID.length() != 8) { + return true; + } + + // Decoding the databaseID + byte[] buffer = ResourceId.fromBase64String(databaseID); + + // Length of decoded buffer(in case of ResourceID) is always 4 + if (buffer.length != 4) { + return true; + } + + return false; + } + + /** + * Checks whether the specified link is a Database Self Link or a Database + * ID based link + * + * @param link the link to analyze. + * @return true or false + */ + public static boolean isDatabaseLink(String link) { + if (StringUtils.isEmpty(link)) { + return false; + } + + // trimming the leading and trailing "/" from the input string + link = trimBeginningAndEndingSlashes(link); + + // Splitting the link(separated by "/") into parts + String[] parts = StringUtils.split(link, "/"); + + if (parts.length != 2) { + return false; + } + + // First part should be "dbs" + if (StringUtils.isEmpty(parts[0]) || !parts[0].equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) { + return false; + } + + // The second part is the database id(ResourceID or Name) and cannot be + // empty + if (StringUtils.isEmpty(parts[1])) { + return false; + } + + return true; + } + + /** + * Checks whether the specified path segment is a resource type + * + * @param resourcePathSegment the path segment to analyze. + * @return true or false + */ + public static boolean IsResourceType(String resourcePathSegment) { + if (StringUtils.isEmpty(resourcePathSegment)) { + return false; + } + + switch (resourcePathSegment.toLowerCase()) { + case Paths.ATTACHMENTS_PATH_SEGMENT: + case Paths.COLLECTIONS_PATH_SEGMENT: + case Paths.DATABASES_PATH_SEGMENT: + case Paths.PERMISSIONS_PATH_SEGMENT: + case Paths.USERS_PATH_SEGMENT: + case Paths.DOCUMENTS_PATH_SEGMENT: + case Paths.STORED_PROCEDURES_PATH_SEGMENT: + case Paths.TRIGGERS_PATH_SEGMENT: + case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT: + case Paths.CONFLICTS_PATH_SEGMENT: + case Paths.PARTITION_KEY_RANGES_PATH_SEGMENT: + return true; + + default: + return false; + } + } + + /** + * Joins the specified paths by appropriately padding them with '/' + * + * @param path1 the first path segment to join. + * @param path2 the second path segment to join. + * @return the concatenated path with '/' + */ + public static String joinPath(String path1, String path2) { + path1 = trimBeginningAndEndingSlashes(path1); + String result = "/" + path1 + "/"; + + if (!StringUtils.isEmpty(path2)) { + path2 = trimBeginningAndEndingSlashes(path2); + result += path2 + "/"; + } + + return result; + } + + /** + * Trims the beginning and ending '/' from the given path + * + * @param path the path to trim for beginning and ending slashes + * @return the path without beginning and ending '/' + */ + public static String trimBeginningAndEndingSlashes(String path) { + if(path == null) { + return null; + } + + if (path.startsWith("/")) { + path = path.substring(1); + } + + if (path.endsWith("/")) { + path = path.substring(0, path.length() - 1); + } + + return path; + } + + public static Map paramEncode(Map queryParams) { + // TODO: this is not performant revisit + HashMap map = new HashMap<>(); + for(Map.Entry paramEntry: queryParams.entrySet()) { + try { + map.put(paramEntry.getKey(), URLEncoder.encode(paramEntry.getValue(), "UTF-8")); + } catch (UnsupportedEncodingException e) { + throw new IllegalStateException(e); + } + } + return map; + } + + public static String createQuery(Map queryParameters) { + if (queryParameters == null) + return ""; + StringBuilder queryString = new StringBuilder(); + for (Map.Entry nameValuePair : queryParameters.entrySet()) { + String key = nameValuePair.getKey(); + String value = nameValuePair.getValue(); + if (key != null && !key.isEmpty()) { + if (queryString.length() > 0) { + queryString.append(RuntimeConstants.Separators.Query[1]); + } + queryString.append(key); + if (value != null) { + queryString.append(RuntimeConstants.Separators.Query[2]); + queryString.append(value); + } + } + } + return queryString.toString(); + } + + public static URL setQuery(String urlString, String query) { + + if (urlString == null) + throw new IllegalStateException("urlString parameter can't be null."); + query = Utils.removeLeadingQuestionMark(query); + try { + if (query != null && !query.isEmpty()) { + return new URI(Utils.addTrailingSlash(urlString) + RuntimeConstants.Separators.Query[0] + query) + .toURL(); + } else { + return new URI(Utils.addTrailingSlash(urlString)).toURL(); + } + } catch (MalformedURLException e) { + throw new IllegalStateException("Uri is invalid: ", e); + } catch (URISyntaxException e) { + throw new IllegalStateException("Uri is invalid: ", e); + } + } + + /** + * Given the full path to a resource, extract the collection path. + * + * @param resourceFullName the full path to the resource. + * @return the path of the collection in which the resource is. + */ + public static String getCollectionName(String resourceFullName) { + if (resourceFullName != null) { + resourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName); + + int slashCount = 0; + for (int i = 0; i < resourceFullName.length(); i++) { + if (resourceFullName.charAt(i) == '/') { + slashCount++; + if (slashCount == 4) { + return resourceFullName.substring(0, i); + } + } + } + } + return resourceFullName; + } + + public static Boolean isCollectionPartitioned(DocumentCollection collection) { + if (collection == null) { + throw new IllegalArgumentException("collection"); + } + + return collection.getPartitionKey() != null + && collection.getPartitionKey().paths() != null + && collection.getPartitionKey().paths().size() > 0; + } + + public static boolean isCollectionChild(ResourceType type) { + return type == ResourceType.Document || type == ResourceType.Attachment || type == ResourceType.Conflict + || type == ResourceType.StoredProcedure || type == ResourceType.Trigger || type == ResourceType.UserDefinedFunction; + } + + public static boolean isWriteOperation(OperationType operationType) { + return operationType == OperationType.Create || operationType == OperationType.Upsert || operationType == OperationType.Delete || operationType == OperationType.Replace + || operationType == OperationType.ExecuteJavaScript; + } + + public static boolean isFeedRequest(OperationType requestOperationType) { + return requestOperationType == OperationType.Create || + requestOperationType == OperationType.Upsert || + requestOperationType == OperationType.ReadFeed || + requestOperationType == OperationType.Query || + requestOperationType == OperationType.SqlQuery || + requestOperationType == OperationType.HeadFeed; + } + + private static String addTrailingSlash(String path) { + if (path == null || path.isEmpty()) + path = new String(RuntimeConstants.Separators.Url); + else if (path.charAt(path.length() - 1) != RuntimeConstants.Separators.Url[0]) + path = path + RuntimeConstants.Separators.Url[0]; + + return path; + } + + private static String removeLeadingQuestionMark(String path) { + if (path == null || path.isEmpty()) + return path; + + if (path.charAt(0) == RuntimeConstants.Separators.Query[0]) + return path.substring(1); + + return path; + } + + public static boolean isValidConsistency(ConsistencyLevel backendConsistency, + ConsistencyLevel desiredConsistency) { + switch (backendConsistency) { + case STRONG: + return desiredConsistency == ConsistencyLevel.STRONG || + desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS || + desiredConsistency == ConsistencyLevel.SESSION || + desiredConsistency == ConsistencyLevel.EVENTUAL || + desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX; + + case BOUNDED_STALENESS: + return desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS || + desiredConsistency == ConsistencyLevel.SESSION || + desiredConsistency == ConsistencyLevel.EVENTUAL || + desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX; + + case SESSION: + case EVENTUAL: + case CONSISTENT_PREFIX: + return desiredConsistency == ConsistencyLevel.SESSION || + desiredConsistency == ConsistencyLevel.EVENTUAL || + desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX; + + default: + throw new IllegalArgumentException("backendConsistency"); + } + } + + public static String getUserAgent(String sdkName, String sdkVersion) { + String osName = System.getProperty("os.name"); + if (osName == null) { + osName = "Unknown"; + } + osName = osName.replaceAll("\\s", ""); + String userAgent = String.format("%s/%s JRE/%s %s/%s", + osName, + System.getProperty("os.version"), + System.getProperty("java.version"), + sdkName, + sdkVersion); + return userAgent; + } + + public static ObjectMapper getSimpleObjectMapper() { + return Utils.simpleObjectMapper; + } + + /** + * Returns Current Time in RFC 1123 format, e.g, + * Fri, 01 Dec 2017 19:22:30 GMT. + * + * @return an instance of STRING + */ + public static String nowAsRFC1123() { + ZonedDateTime now = ZonedDateTime.now(GMT_ZONE_ID); + return Utils.RFC_1123_DATE_TIME.format(now); + } + + public static UUID randomUUID() { + return TimeUUIDGegerator.generate(); + } + + public static String zonedDateTimeAsUTCRFC1123(OffsetDateTime offsetDateTime){ + return Utils.RFC_1123_DATE_TIME.format(offsetDateTime.atZoneSameInstant(GMT_ZONE_ID)); + } + + public static int getValueOrDefault(Integer val, int defaultValue) { + return val != null ? val.intValue() : defaultValue; + } + + public static void checkStateOrThrow(boolean value, String argumentName, String message) throws IllegalArgumentException { + + IllegalArgumentException t = checkStateOrReturnException(value, argumentName, message); + if (t != null) { + throw t; + } + } + + public static void checkNotNullOrThrow(Object val, String argumentName, String message) throws NullPointerException { + + NullPointerException t = checkNotNullOrReturnException(val, argumentName, message); + if (t != null) { + throw t; + } + } + + public static void checkStateOrThrow(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) throws IllegalArgumentException { + IllegalArgumentException t = checkStateOrReturnException(value, argumentName, argumentName, messageTemplateParams); + if (t != null) { + throw t; + } + } + + public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String message) { + + if (value) { + return null; + } + + return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, message)); + } + + public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) { + if (value) { + return null; + } + + return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams))); + } + + private static NullPointerException checkNotNullOrReturnException(Object val, String argumentName, String messageTemplate, Object... messageTemplateParams) { + if (val != null) { + return null; + } + + return new NullPointerException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams))); + } + + public static BadRequestException checkRequestOrReturnException(boolean value, String argumentName, String message) { + + if (value) { + return null; + } + + return new BadRequestException(String.format("argumentName: %s, message: %s", argumentName, message)); + } + + public static BadRequestException checkRequestOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) { + if (value) { + return null; + } + + return new BadRequestException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams))); + } + + @SuppressWarnings("unchecked") + public static O as(I i, Class klass) { + if (i == null) { + return null; + } + + if (klass.isInstance(i)) { + return (O) i; + } else { + return null; + } + } + + @SuppressWarnings("unchecked") + public static List immutableListOf() { + return Collections.EMPTY_LIST; + } + + public static List immutableListOf(V v1) { + List list = new ArrayList<>(); + list.add(v1); + return Collections.unmodifiableList(list); + } + + public static MapimmutableMapOf() { + return Collections.emptyMap(); + } + + public static MapimmutableMapOf(K k1, V v1) { + Map map = new HashMap(); + map.put(k1, v1); + map = Collections.unmodifiableMap(map); + return map; + } + + public static V firstOrDefault(List list) { + return list.size() > 0? list.get(0) : null ; + } + + public static class ValueHolder { + + public ValueHolder() { + } + + public ValueHolder(V v) { + this.v = v; + } + public V v; + + public static ValueHolder initialize(T v) { + return new ValueHolder(v); + } + } + + public static boolean tryGetValue(Map dictionary, K key, ValueHolder holder) { + // doesn't work for dictionary with null value + holder.v = dictionary.get(key); + return holder.v != null; + } + + public static boolean tryRemove(Map dictionary, K key, ValueHolder holder) { + // doesn't work for dictionary with null value + holder.v = dictionary.remove(key); + return holder.v != null; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/VectorSessionToken.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/VectorSessionToken.java new file mode 100644 index 0000000000000..a55959f8465bf --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/VectorSessionToken.java @@ -0,0 +1,320 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + + +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.InternalServerErrorException; +import org.apache.commons.collections4.map.UnmodifiableMap; +import org.apache.commons.lang3.ObjectUtils; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Collectors; + +import static com.azure.data.cosmos.internal.Utils.ValueHolder; + +/** + * Models vector clock bases session token. SESSION token has the following format: + * {Version}#{GlobalLSN}#{RegionId1}={LocalLsn1}#{RegionId2}={LocalLsn2}....#{RegionIdN}={LocalLsnN} + * 'Version' captures the configuration number of the partition which returned this session token. + * 'Version' is incremented everytime topology of the partition is updated (say due to Add/Remove/Failover). + * * The choice of separators '#' and '=' is important. Separators ';' and ',' are used to delimit + * per-partitionKeyRange session token + * session + * + * We make assumption that instances of this class are immutable (read only after they are constructed), so if you want to change + * this behaviour please review all of its uses and make sure that mutability doesn't break anything. + */ +public class VectorSessionToken implements ISessionToken { + private final static Logger logger = LoggerFactory.getLogger(VectorSessionToken.class); + private final static char SegmentSeparator = '#'; + private final static char RegionProgressSeparator = '='; + + private final long version; + private final long globalLsn; + private final UnmodifiableMap localLsnByRegion; + private final String sessionToken; + + private VectorSessionToken(long version, long globalLsn, UnmodifiableMap localLsnByRegion) { + this(version, globalLsn, localLsnByRegion, null); + } + + private VectorSessionToken(long version, long globalLsn, UnmodifiableMap localLsnByRegion, String sessionToken) { + this.version = version; + this.globalLsn = globalLsn; + this.localLsnByRegion = localLsnByRegion; + if (sessionToken == null) { + String regionProgress = String.join( + Character.toString(VectorSessionToken.SegmentSeparator), + localLsnByRegion. + entrySet() + .stream() + .map(kvp -> new StringBuilder().append(kvp.getKey()).append(VectorSessionToken.RegionProgressSeparator).append(kvp.getValue())) + .collect(Collectors.toList())); + + if (Strings.isNullOrEmpty(regionProgress)) { + StringBuilder sb = new StringBuilder(); + sb.append(this.version) + .append(VectorSessionToken.SegmentSeparator) + .append(this.globalLsn); + this.sessionToken = sb.toString(); + } else { + StringBuilder sb = new StringBuilder(); + sb.append(this.version) + .append(VectorSessionToken.SegmentSeparator) + .append(this.globalLsn) + .append(VectorSessionToken.SegmentSeparator) + .append(regionProgress); + this.sessionToken = sb.toString(); + } + } else { + this.sessionToken = sessionToken; + } + } + + public static boolean tryCreate(String sessionToken, ValueHolder parsedSessionToken) { + ValueHolder versionHolder = ValueHolder.initialize(-1l); + ValueHolder globalLsnHolder = ValueHolder.initialize(-1l); + + ValueHolder> localLsnByRegion = ValueHolder.initialize(null); + + if (VectorSessionToken.tryParseSessionToken( + sessionToken, + versionHolder, + globalLsnHolder, + localLsnByRegion)) { + parsedSessionToken.v = new VectorSessionToken(versionHolder.v, globalLsnHolder.v, localLsnByRegion.v, sessionToken); + return true; + } else { + return false; + } + } + + public long getLSN() { + return this.globalLsn; + } + + @Override + public boolean equals(Object obj) { + VectorSessionToken other = Utils.as(obj, VectorSessionToken.class); + + if (other == null) { + return false; + } + + return this.version == other.version + && this.globalLsn == other.globalLsn + && this.areRegionProgressEqual(other.localLsnByRegion); + } + + public boolean isValid(ISessionToken otherSessionToken) throws CosmosClientException { + VectorSessionToken other = Utils.as(otherSessionToken, VectorSessionToken.class); + + if (other == null) { + throw new IllegalArgumentException("otherSessionToken"); + } + + if (other.version < this.version || other.globalLsn < this.globalLsn) { + return false; + } + + if (other.version == this.version && other.localLsnByRegion.size() != this.localLsnByRegion.size()) { + throw new InternalServerErrorException( + String.format(RMResources.InvalidRegionsInSessionToken, this.sessionToken, other.sessionToken)); + } + + for (Map.Entry kvp : other.localLsnByRegion.entrySet()) { + Integer regionId = kvp.getKey(); + long otherLocalLsn = kvp.getValue(); + ValueHolder localLsn = ValueHolder.initialize(-1l); + + + if (!Utils.tryGetValue(this.localLsnByRegion, regionId, localLsn)) { + // Region mismatch: other session token has progress for a region which is missing in this session token + // Region mismatch can be ignored only if this session token version is smaller than other session token version + if (this.version == other.version) { + throw new InternalServerErrorException( + String.format(RMResources.InvalidRegionsInSessionToken, this.sessionToken, other.sessionToken)); + } else { + // ignore missing region as other session token version > this session token version + } + } else { + // region is present in both session tokens. + if (otherLocalLsn < localLsn.v) { + return false; + } + } + } + + return true; + } + + // Merge is commutative operation, so a.Merge(b).Equals(b.Merge(a)) + public ISessionToken merge(ISessionToken obj) throws CosmosClientException { + VectorSessionToken other = Utils.as(obj, VectorSessionToken.class); + + if (other == null) { + throw new IllegalArgumentException("obj"); + } + + if (this.version == other.version && this.localLsnByRegion.size() != other.localLsnByRegion.size()) { + throw new InternalServerErrorException( + String.format(RMResources.InvalidRegionsInSessionToken, this.sessionToken, other.sessionToken)); + } + + VectorSessionToken sessionTokenWithHigherVersion; + VectorSessionToken sessionTokenWithLowerVersion; + + if (this.version < other.version) { + sessionTokenWithLowerVersion = this; + sessionTokenWithHigherVersion = other; + } else { + sessionTokenWithLowerVersion = other; + sessionTokenWithHigherVersion = this; + } + + Map highestLocalLsnByRegion = new HashMap<>(); + + for (Map.Entry kvp : sessionTokenWithHigherVersion.localLsnByRegion.entrySet()) { + Integer regionId = kvp.getKey(); + + long localLsn1 = kvp.getValue(); + ValueHolder localLsn2 = ValueHolder.initialize(-1l); + + if (Utils.tryGetValue(sessionTokenWithLowerVersion.localLsnByRegion, regionId, localLsn2)) { + highestLocalLsnByRegion.put(regionId, Math.max(localLsn1, localLsn2.v)); + } else if (this.version == other.version) { + throw new InternalServerErrorException( + String.format(RMResources.InvalidRegionsInSessionToken, this.sessionToken, other.sessionToken)); + } else { + highestLocalLsnByRegion.put(regionId, localLsn1); + } + } + + return new VectorSessionToken( + Math.max(this.version, other.version), + Math.max(this.globalLsn, other.globalLsn), + (UnmodifiableMap) UnmodifiableMap.unmodifiableMap(highestLocalLsnByRegion)); + } + + public String convertToString() { + return this.sessionToken; + } + + private boolean areRegionProgressEqual(UnmodifiableMap other) { + if (this.localLsnByRegion.size() != other.size()) { + return false; + } + + for (Map.Entry kvp : this.localLsnByRegion.entrySet()) { + Integer regionId = kvp.getKey(); + ValueHolder localLsn1 = ValueHolder.initialize(kvp.getValue()); + ValueHolder localLsn2 = ValueHolder.initialize(-1l); + + if (Utils.tryGetValue(other, regionId, localLsn2)) { + if (ObjectUtils.notEqual(localLsn1.v, localLsn2.v)) { + return false; + } + } + } + + return true; + } + + private static boolean tryParseSessionToken( + String sessionToken, + ValueHolder version, + ValueHolder globalLsn, + ValueHolder> localLsnByRegion) { + version.v = 0L; + localLsnByRegion.v = null; + globalLsn.v = -1L; + + if (Strings.isNullOrEmpty(sessionToken)) { + logger.warn("SESSION token is empty"); + return false; + } + + String[] segments = StringUtils.split(sessionToken, VectorSessionToken.SegmentSeparator); + + if (segments.length < 2) { + return false; + } + + if (!tryParseLong(segments[0], version) + || !tryParseLong(segments[1], globalLsn)) { + logger.warn("Unexpected session token version number '{}' OR global lsn '{}'.", segments[0], segments[1]); + return false; + } + + Map lsnByRegion = new HashMap<>(); + + for (int i = 2; i < segments.length; i++) { + String regionSegment = segments[i]; + + String[] regionIdWithLsn = StringUtils.split(regionSegment, VectorSessionToken.RegionProgressSeparator); + + if (regionIdWithLsn.length != 2) { + logger.warn("Unexpected region progress segment length '{}' in session token.", regionIdWithLsn.length); + return false; + } + + ValueHolder regionId = ValueHolder.initialize(0); + ValueHolder localLsn = ValueHolder.initialize(-1l); + + if (!tryParseInt(regionIdWithLsn[0], regionId) + || !tryParseLong(regionIdWithLsn[1], localLsn)) { + logger.warn("Unexpected region progress '{}' for region '{}' in session token.", regionIdWithLsn[0], regionIdWithLsn[1]); + return false; + } + + lsnByRegion.put(regionId.v, localLsn.v); + } + + localLsnByRegion.v = (UnmodifiableMap) UnmodifiableMap.unmodifiableMap(lsnByRegion); + return true; + } + + private static boolean tryParseLong(String str, ValueHolder value) { + try { + value.v = Long.parseLong(str); + return true; + } catch (Exception e) { + return false; + } + } + + private static boolean tryParseInt(String str, ValueHolder value) { + try { + value.v = Integer.parseInt(str); + return true; + } catch (Exception e) { + return false; + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/WebExceptionRetryPolicy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/WebExceptionRetryPolicy.java new file mode 100644 index 0000000000000..f48f280481c06 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/WebExceptionRetryPolicy.java @@ -0,0 +1,79 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.internal.directconnectivity.WebExceptionUtility; +import org.apache.commons.lang3.time.StopWatch; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.time.Duration; +import java.util.concurrent.TimeUnit; + +public class WebExceptionRetryPolicy implements IRetryPolicy { + private final static Logger logger = LoggerFactory.getLogger(WebExceptionRetryPolicy.class); + + // total wait time in seconds to retry. should be max of primary reconfigrations/replication wait duration etc + private final static int waitTimeInSeconds = 30; + private final static int initialBackoffSeconds = 1; + private final static int backoffMultiplier = 2; + + private StopWatch durationTimer = new StopWatch(); + private int attemptCount = 1; + // Don't penalise first retry with delay. + private int currentBackoffSeconds = WebExceptionRetryPolicy.initialBackoffSeconds; + + public WebExceptionRetryPolicy() { + durationTimer.start(); + } + + + @Override + public Mono shouldRetry(Exception exception) { + Duration backoffTime = Duration.ofSeconds(0); + + if (!WebExceptionUtility.isWebExceptionRetriable(exception)) { + // Have caller propagate original exception. + this.durationTimer.stop(); + return Mono.just(ShouldRetryResult.noRetry()); + } + + // Don't penalise first retry with delay. + if (attemptCount++ > 1) { + int remainingSeconds = WebExceptionRetryPolicy.waitTimeInSeconds - Math.toIntExact(this.durationTimer.getTime(TimeUnit.SECONDS)); + if (remainingSeconds <= 0) { + this.durationTimer.stop(); + return Mono.just(ShouldRetryResult.noRetry()); + } + + backoffTime = Duration.ofSeconds(Math.min(this.currentBackoffSeconds, remainingSeconds)); + this.currentBackoffSeconds *= WebExceptionRetryPolicy.backoffMultiplier; + } + + logger.warn("Received retriable web exception, will retry", exception); + + return Mono.just(ShouldRetryResult.retryAfter(backoffTime)); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/caches/AsyncCache.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/caches/AsyncCache.java new file mode 100644 index 0000000000000..668ae4428b981 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/caches/AsyncCache.java @@ -0,0 +1,157 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.caches; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; + +public class AsyncCache { + + private final Logger logger = LoggerFactory.getLogger(AsyncCache.class); + private final ConcurrentHashMap> values = new ConcurrentHashMap<>(); + + private final IEqualityComparer equalityComparer; + + public AsyncCache(IEqualityComparer equalityComparer) { + this.equalityComparer = equalityComparer; + } + + public AsyncCache() { + this((value1, value2) -> { + if (value1 == value2) + return true; + if (value1 == null || value2 == null) + return false; + return value1.equals(value2); + }); + } + + public void set(TKey key, TValue value) { + logger.debug("set cache[{}]={}", key, value); + values.put(key, new AsyncLazy<>(value)); + } + + /** + * Gets value corresponding to key + * + *

+ * If another initialization function is already running, new initialization function will not be started. + * The result will be result of currently running initialization function. + *

+ * + *

+ * If previous initialization function is successfully completed - value returned by it will be returned unless + * it is equal to obsoleteValue, in which case new initialization function will be started. + *

+ *

+ * If previous initialization function failed - new one will be launched. + *

+ * + * @param key Key for which to get a value. + * @param obsoleteValue Value which is obsolete and needs to be refreshed. + * @param singleValueInitFunc Initialization function. + * @return Cached value or value returned by initialization function. + */ + public Mono getAsync( + TKey key, + TValue obsoleteValue, + Callable> singleValueInitFunc) { + + AsyncLazy initialLazyValue = values.get(key); + if (initialLazyValue != null) { + + logger.debug("cache[{}] exists", key); + return initialLazyValue.single().flux().flatMap(value -> { + + if (!equalityComparer.areEqual(value, obsoleteValue)) { + logger.debug("Returning cache[{}] as it is different from obsoleteValue", key); + return Flux.just(value); + } + + logger.debug("cache[{}] result value is obsolete ({}), computing new value", key, obsoleteValue); + AsyncLazy asyncLazy = new AsyncLazy<>(singleValueInitFunc); + AsyncLazy actualValue = values.merge(key, asyncLazy, + (lazyValue1, lazyValue2) -> lazyValue1 == initialLazyValue ? lazyValue2 : lazyValue1); + return actualValue.single().flux(); + + }, err -> { + + logger.debug("cache[{}] resulted in error {}, computing new value", key, err); + AsyncLazy asyncLazy = new AsyncLazy<>(singleValueInitFunc); + AsyncLazy resultAsyncLazy = values.merge(key, asyncLazy, + (lazyValue1, lazyValu2) -> lazyValue1 == initialLazyValue ? lazyValu2 : lazyValue1); + return resultAsyncLazy.single().flux(); + + }, Flux::empty).single(); + } + + logger.debug("cache[{}] doesn't exist, computing new value", key); + AsyncLazy asyncLazy = new AsyncLazy<>(singleValueInitFunc); + AsyncLazy resultAsyncLazy = values.merge(key, asyncLazy, + (lazyValue1, lazyValu2) -> lazyValue1 == initialLazyValue ? lazyValu2 : lazyValue1); + return resultAsyncLazy.single(); + } + + public void remove(TKey key) { + values.remove(key); + } + + /** + * Remove value from cache and return it if present + * @param key + * @return Value if present, default value if not present + */ + public Mono removeAsync(TKey key) { + AsyncLazy lazy = values.remove(key); + return lazy.single(); + // TODO: .Net returns default value on failure of single why? + } + + public void clear() { + this.values.clear(); + } + + /** + * Forces refresh of the cached item if it is not being refreshed at the moment. + * @param key + * @param singleValueInitFunc + */ + public void refresh( + TKey key, + Callable> singleValueInitFunc) { + logger.debug("refreshing cache[{}]", key); + AsyncLazy initialLazyValue = values.get(key); + if (initialLazyValue != null && (initialLazyValue.isSucceeded() || initialLazyValue.isFaulted())) { + AsyncLazy newLazyValue = new AsyncLazy<>(singleValueInitFunc); + + // UPDATE the new task in the cache, + values.merge(key, newLazyValue, + (lazyValue1, lazyValu2) -> lazyValue1 == initialLazyValue ? lazyValu2 : lazyValue1); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/caches/AsyncLazy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/caches/AsyncLazy.java new file mode 100644 index 0000000000000..234c7b25b982e --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/caches/AsyncLazy.java @@ -0,0 +1,76 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.caches; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.util.concurrent.Callable; + +class AsyncLazy { + + private final static Logger logger = LoggerFactory.getLogger(AsyncLazy.class); + + private final Mono single; + + private volatile boolean succeeded; + private volatile boolean failed; + + public AsyncLazy(Callable> func) { + this(Mono.defer(() -> { + logger.debug("using Function> {}", func); + try { + return func.call(); + } catch (Exception e) { + return Mono.error(e); + } + })); + } + + public AsyncLazy(TValue value) { + this.single = Mono.just(value); + this.succeeded = true; + this.failed = false; + } + + private AsyncLazy(Mono single) { + logger.debug("constructor"); + this.single = single + .doOnSuccess(v -> this.succeeded = true) + .doOnError(e -> this.failed = true) + .cache(); + } + + public Mono single() { + return single; + } + + public boolean isSucceeded() { + return succeeded; + } + + public boolean isFaulted() { + return failed; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/caches/IEqualityComparer.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/caches/IEqualityComparer.java new file mode 100644 index 0000000000000..3b77b00d96bbc --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/caches/IEqualityComparer.java @@ -0,0 +1,27 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.caches; + +interface IEqualityComparer { + boolean areEqual(TValue v1, TValue v2); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/caches/IPartitionKeyRangeCache.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/caches/IPartitionKeyRangeCache.java new file mode 100644 index 0000000000000..07d99280dff9e --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/caches/IPartitionKeyRangeCache.java @@ -0,0 +1,50 @@ +/** + * The MIT License (MIT) + * Copyright (c) 2017 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.caches; + +import com.azure.data.cosmos.internal.ICollectionRoutingMapCache; +import com.azure.data.cosmos.internal.IRoutingMapProvider; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.routing.CollectionRoutingMap; +import com.azure.data.cosmos.internal.routing.Range; +import reactor.core.publisher.Mono; + +import java.util.List; +import java.util.Map; + +/** + * + */ +public interface IPartitionKeyRangeCache extends IRoutingMapProvider, ICollectionRoutingMapCache { + + Mono tryLookupAsync(String collectionRid, CollectionRoutingMap previousValue, Map properties); + + Mono> tryGetOverlappingRangesAsync(String collectionRid, Range range, boolean forceRefresh, + Map properties); + + Mono tryGetPartitionKeyRangeByIdAsync(String collectionResourceId, String partitionKeyRangeId, boolean forceRefresh, + Map properties); + + Mono tryGetRangeByPartitionKeyRangeId(String collectionRid, String partitionKeyRangeId, Map properties); + +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/caches/RxClientCollectionCache.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/caches/RxClientCollectionCache.java new file mode 100644 index 0000000000000..565d218954242 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/caches/RxClientCollectionCache.java @@ -0,0 +1,121 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.caches; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.internal.ISessionContainer; +import com.azure.data.cosmos.internal.AuthorizationTokenType; +import com.azure.data.cosmos.internal.ClearingSessionContainerClientRetryPolicy; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.IAuthorizationTokenProvider; +import com.azure.data.cosmos.internal.IDocumentClientRetryPolicy; +import com.azure.data.cosmos.internal.IRetryPolicyFactory; +import com.azure.data.cosmos.internal.ObservableHelper; +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.internal.PathsHelper; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.RxDocumentServiceResponse; +import com.azure.data.cosmos.internal.RxStoreModel; +import com.azure.data.cosmos.internal.Utils; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.io.UnsupportedEncodingException; +import java.net.URLEncoder; +import java.util.HashMap; +import java.util.Map; + +/** + * Caches collection information. + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class RxClientCollectionCache extends RxCollectionCache { + + private RxStoreModel storeModel; + private final IAuthorizationTokenProvider tokenProvider; + private final IRetryPolicyFactory retryPolicy; + private final ISessionContainer sessionContainer; + + public RxClientCollectionCache(ISessionContainer sessionContainer, + RxStoreModel storeModel, + IAuthorizationTokenProvider tokenProvider, + IRetryPolicyFactory retryPolicy) { + this.storeModel = storeModel; + this.tokenProvider = tokenProvider; + this.retryPolicy = retryPolicy; + this.sessionContainer = sessionContainer; + } + + protected Mono getByRidAsync(String collectionRid, Map properties) { + IDocumentClientRetryPolicy retryPolicyInstance = new ClearingSessionContainerClientRetryPolicy(this.sessionContainer, this.retryPolicy.getRequestPolicy()); + return ObservableHelper.inlineIfPossible( + () -> this.readCollectionAsync(PathsHelper.generatePath(ResourceType.DocumentCollection, collectionRid, false), retryPolicyInstance, properties) + , retryPolicyInstance); + } + + protected Mono getByNameAsync(String resourceAddress, Map properties) { + IDocumentClientRetryPolicy retryPolicyInstance = new ClearingSessionContainerClientRetryPolicy(this.sessionContainer, this.retryPolicy.getRequestPolicy()); + return ObservableHelper.inlineIfPossible( + () -> this.readCollectionAsync(resourceAddress, retryPolicyInstance, properties), + retryPolicyInstance); + } + + private Mono readCollectionAsync(String collectionLink, IDocumentClientRetryPolicy retryPolicyInstance, Map properties) { + + String path = Utils.joinPath(collectionLink, null); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.DocumentCollection, + path, + new HashMap<>()); + + request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); + + String resourceName = request.getResourceAddress(); + String authorizationToken = tokenProvider.getUserAuthorizationToken( + resourceName, + request.getResourceType(), + HttpConstants.HttpMethods.GET, + request.getHeaders(), + AuthorizationTokenType.PrimaryMasterKey, + properties); + + try { + authorizationToken = URLEncoder.encode(authorizationToken, "UTF-8"); + } catch (UnsupportedEncodingException e) { + return Mono.error(new IllegalStateException("Failed to encode authtoken.", e)); + } + request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorizationToken); + + if (retryPolicyInstance != null){ + retryPolicyInstance.onBeforeSendRequest(request); + } + + Flux responseObs = this.storeModel.processMessage(request); + return responseObs.map(response -> BridgeInternal.toResourceResponse(response, DocumentCollection.class) + .getResource()).single(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/caches/RxCollectionCache.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/caches/RxCollectionCache.java new file mode 100644 index 0000000000000..1a3d510a81915 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/caches/RxCollectionCache.java @@ -0,0 +1,209 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.caches; + +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.InvalidPartitionException; +import com.azure.data.cosmos.NotFoundException; +import com.azure.data.cosmos.internal.PathsHelper; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.ResourceId; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.routing.PartitionKeyRangeIdentity; +import org.apache.commons.lang3.StringUtils; +import reactor.core.publisher.Mono; + +import java.util.Map; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public abstract class RxCollectionCache { + + private final AsyncCache collectionInfoByNameCache; + private final AsyncCache collectionInfoByIdCache; + + protected RxCollectionCache() { + this.collectionInfoByNameCache = new AsyncCache<>(new CollectionRidComparer()); + this.collectionInfoByIdCache = new AsyncCache<>(new CollectionRidComparer()); + } + + /** + * Resolves a request to a collection in a sticky manner. + * Unless request.ForceNameCacheRefresh is equal to true, it will return the same collection. + * @param request Request to resolve. + * @return an instance of Single<DocumentCollection> + */ + public Mono resolveCollectionAsync( + RxDocumentServiceRequest request) { + // Mono Void to represent only terminal events specifically complete and error + Mono init = null; + if (request.getIsNameBased()) { + if (request.isForceNameCacheRefresh()) { + Mono mono = this.refreshAsync(request); + init = mono.then(Mono.fromRunnable(() -> request.setForceNameCacheRefresh(false))); + } + + Mono collectionInfoObs = this.resolveByPartitionKeyRangeIdentityAsync( + request.getPartitionKeyRangeIdentity(), request.properties); + + if (init != null) { + collectionInfoObs = init.then(collectionInfoObs); + } + + return collectionInfoObs.flatMap(Mono::just).switchIfEmpty(Mono.defer(() -> { + if (request.requestContext.resolvedCollectionRid == null) { + + Mono collectionInfoRes = this.resolveByNameAsync(request.getResourceAddress(), request.properties); + + return collectionInfoRes.flatMap(collection -> { + // TODO: how to async log this? + // logger.debug( + // "Mapped resourceName {} to resourceId {}.", + // request.getResourceAddress(), + // collectionInfo.resourceId()); + + request.setResourceId(collection.resourceId()); + request.requestContext.resolvedCollectionRid = collection.resourceId(); + return Mono.just(collection); + + }); + } else { + return this.resolveByRidAsync(request.requestContext.resolvedCollectionRid, request.properties); + } + })); + } else { + return resolveByPartitionKeyRangeIdentityAsync(request.getPartitionKeyRangeIdentity(),request.properties) + .flatMap(Mono::just).switchIfEmpty(this.resolveByRidAsync(request.getResourceAddress(), request.properties)); + } + } + + /** + * This method is only used in retry policy as it doesn't have request handy. + * @param resourceAddress + */ + public void refresh(String resourceAddress, Map properties) { + if (PathsHelper.isNameBased(resourceAddress)) { + String resourceFullName = PathsHelper.getCollectionPath(resourceAddress); + + this.collectionInfoByNameCache.refresh( + resourceFullName, + () -> { + Mono collectionObs = this.getByNameAsync(resourceFullName, properties); + return collectionObs.doOnSuccess(collection -> this.collectionInfoByIdCache.set(collection.resourceId(), collection)); + }); + } + } + + protected abstract Mono getByRidAsync(String collectionRid, Map properties); + + protected abstract Mono getByNameAsync(String resourceAddress, Map properties); + + private Mono resolveByPartitionKeyRangeIdentityAsync(PartitionKeyRangeIdentity partitionKeyRangeIdentity, Map properties) { + // if request is targeted at specific partition using x-ms-documentd-partitionkeyrangeid header, + // which contains value ",", then resolve to collection rid in this header. + if (partitionKeyRangeIdentity != null && partitionKeyRangeIdentity.getCollectionRid() != null) { + return this.resolveByRidAsync(partitionKeyRangeIdentity.getCollectionRid(), properties) + .onErrorResume(e -> { + if (e instanceof NotFoundException) { + // This is signal to the upper logic either to refresh + // collection cache and retry. + return Mono.error(new InvalidPartitionException(RMResources.InvalidDocumentCollection)); + } + return Mono.error(e); + + }); + } + return Mono.empty(); + } + + private Mono resolveByRidAsync( + String resourceId, Map properties) { + + ResourceId resourceIdParsed = ResourceId.parse(resourceId); + String collectionResourceId = resourceIdParsed.getDocumentCollectionId().toString(); + + return this.collectionInfoByIdCache.getAsync( + collectionResourceId, + null, + () -> this.getByRidAsync(collectionResourceId, properties)); + } + + private Mono resolveByNameAsync( + String resourceAddress, Map properties) { + + String resourceFullName = PathsHelper.getCollectionPath(resourceAddress); + + return this.collectionInfoByNameCache.getAsync( + resourceFullName, + null, + () -> { + Mono collectionObs = this.getByNameAsync(resourceFullName, properties); + return collectionObs.doOnSuccess(collection -> this.collectionInfoByIdCache.set(collection.resourceId(), collection)); + }); + } + + private Mono refreshAsync(RxDocumentServiceRequest request) { + // TODO System.Diagnostics.Debug.Assert(request.IsNameBased); + + String resourceFullName = PathsHelper.getCollectionPath(request.getResourceAddress()); + Mono mono; + + if (request.requestContext.resolvedCollectionRid != null) { + // Here we will issue backend call only if cache wasn't already refreshed (if whatever is there corresponds to previously resolved collection rid). + DocumentCollection obsoleteValue = new DocumentCollection(); + obsoleteValue.resourceId(request.requestContext.resolvedCollectionRid); + + mono = this.collectionInfoByNameCache.getAsync( + resourceFullName, + obsoleteValue, + () -> { + Mono collectionObs = this.getByNameAsync(resourceFullName, request.properties); + return collectionObs.doOnSuccess(collection -> { + this.collectionInfoByIdCache.set(collection.resourceId(), collection); + }); + }).then(); + } else { + // In case of ForceRefresh directive coming from client, there will be no ResolvedCollectionRid, so we + // need to refresh unconditionally. + mono = Mono.fromRunnable(() -> this.refresh(request.getResourceAddress(), request.properties)); + } + + return mono.doOnSuccess(aVoid -> request.requestContext.resolvedCollectionRid = null); + } + + private class CollectionRidComparer implements IEqualityComparer { + public boolean areEqual(DocumentCollection left, DocumentCollection right) { + if (left == null && right == null) { + return true; + } + + if ((left == null) ^ (right == null)) { + return false; + } + + return StringUtils.equals(left.resourceId(), right.resourceId()); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/caches/RxPartitionKeyRangeCache.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/caches/RxPartitionKeyRangeCache.java new file mode 100644 index 0000000000000..c52fdd85ce0f5 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/caches/RxPartitionKeyRangeCache.java @@ -0,0 +1,230 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.caches; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.NotFoundException; +import com.azure.data.cosmos.internal.Exceptions; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.Utils; +import com.azure.data.cosmos.internal.routing.CollectionRoutingMap; +import com.azure.data.cosmos.internal.routing.IServerIdentity; +import com.azure.data.cosmos.internal.routing.InMemoryCollectionRoutingMap; +import com.azure.data.cosmos.internal.routing.Range; +import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + **/ +public class RxPartitionKeyRangeCache implements IPartitionKeyRangeCache { + private final Logger logger = LoggerFactory.getLogger(RxPartitionKeyRangeCache.class); + + private final AsyncCache routingMapCache; + private final AsyncDocumentClient client; + private final RxCollectionCache collectionCache; + + public RxPartitionKeyRangeCache(AsyncDocumentClient client, RxCollectionCache collectionCache) { + this.routingMapCache = new AsyncCache<>(); + this.client = client; + this.collectionCache = collectionCache; + } + + /* (non-Javadoc) + * @see IPartitionKeyRangeCache#tryLookupAsync(java.lang.STRING, com.azure.data.cosmos.internal.routing.CollectionRoutingMap) + */ + @Override + public Mono tryLookupAsync(String collectionRid, CollectionRoutingMap previousValue, Map properties) { + return routingMapCache.getAsync( + collectionRid, + previousValue, + () -> getRoutingMapForCollectionAsync(collectionRid, previousValue, properties)) + .onErrorResume(err -> { + logger.debug("tryLookupAsync on collectionRid {} encountered failure", collectionRid, err); + CosmosClientException dce = Utils.as(err, CosmosClientException.class); + if (dce != null && Exceptions.isStatusCode(dce, HttpConstants.StatusCodes.NOTFOUND)) { + return Mono.empty(); + } + + return Mono.error(err); + }); + } + + @Override + public Mono tryLookupAsync(String collectionRid, CollectionRoutingMap previousValue, boolean forceRefreshCollectionRoutingMap, + Map properties) { + return tryLookupAsync(collectionRid, previousValue, properties); + } + + /* (non-Javadoc) + * @see IPartitionKeyRangeCache#tryGetOverlappingRangesAsync(java.lang.STRING, com.azure.data.cosmos.internal.routing.RANGE, boolean) + */ + @Override + public Mono> tryGetOverlappingRangesAsync(String collectionRid, Range range, boolean forceRefresh, + Map properties) { + + Mono routingMapObs = tryLookupAsync(collectionRid, null, properties); + + return routingMapObs.flatMap(routingMap -> { + if (forceRefresh) { + logger.debug("tryGetOverlappingRangesAsync with forceRefresh on collectionRid {}", collectionRid); + return tryLookupAsync(collectionRid, routingMap, properties); + } + + return Mono.just(routingMap); + }).switchIfEmpty(Mono.empty()).map(routingMap -> routingMap.getOverlappingRanges(range)).switchIfEmpty(Mono.defer(() -> { + logger.debug("Routing Map Null for collection: {} for range: {}, forceRefresh:{}", collectionRid, range.toString(), forceRefresh); + return Mono.empty(); + })); + } + + /* (non-Javadoc) + * @see IPartitionKeyRangeCache#tryGetPartitionKeyRangeByIdAsync(java.lang.STRING, java.lang.STRING, boolean) + */ + @Override + public Mono tryGetPartitionKeyRangeByIdAsync(String collectionResourceId, String partitionKeyRangeId, + boolean forceRefresh, Map properties) { + + Mono routingMapObs = tryLookupAsync(collectionResourceId, null, properties); + + return routingMapObs.flatMap(routingMap -> { + if (forceRefresh && routingMap != null) { + return tryLookupAsync(collectionResourceId, routingMap, properties); + } + return Mono.justOrEmpty(routingMap); + + }).switchIfEmpty(Mono.defer(Mono::empty)).map(routingMap -> routingMap.getRangeByPartitionKeyRangeId(partitionKeyRangeId)).switchIfEmpty(Mono.defer(() -> { + logger.debug("Routing Map Null for collection: {}, PartitionKeyRangeId: {}, forceRefresh:{}", collectionResourceId, partitionKeyRangeId, forceRefresh); + return null; + })); + } + + /* (non-Javadoc) + * @see IPartitionKeyRangeCache#tryGetRangeByPartitionKeyRangeId(java.lang.STRING, java.lang.STRING) + */ + @Override + public Mono tryGetRangeByPartitionKeyRangeId(String collectionRid, String partitionKeyRangeId, Map properties) { + Mono routingMapObs = routingMapCache.getAsync( + collectionRid, + null, + () -> getRoutingMapForCollectionAsync(collectionRid, null, properties)); + + return routingMapObs.map(routingMap -> routingMap.getRangeByPartitionKeyRangeId(partitionKeyRangeId)) + .onErrorResume(err -> { + CosmosClientException dce = Utils.as(err, CosmosClientException.class); + logger.debug("tryGetRangeByPartitionKeyRangeId on collectionRid {} and partitionKeyRangeId {} encountered failure", + collectionRid, partitionKeyRangeId, err); + + if (dce != null && Exceptions.isStatusCode(dce, HttpConstants.StatusCodes.NOTFOUND)) { + return Mono.empty(); + } + + return Mono.error(dce); + }); + } + + private Mono getRoutingMapForCollectionAsync( + String collectionRid, + CollectionRoutingMap previousRoutingMap, + Map properties) { + + // TODO: NOTE: main java code doesn't do anything in regard to the previous routing map + // .Net code instead of using DocumentClient controls sending request and receiving requests here + + // here we stick to what main java sdk does, investigate later. + + Mono> rangesObs = getPartitionKeyRange(collectionRid, false, properties); + + return rangesObs.flatMap(ranges -> { + + List> rangesTuples = + ranges.stream().map(range -> new ImmutablePair<>(range, (IServerIdentity) null)).collect(Collectors.toList()); + + + CollectionRoutingMap routingMap; + if (previousRoutingMap == null) + { + // Splits could have happened during change feed query and we might have a mix of gone and new ranges. + Set goneRanges = new HashSet<>(ranges.stream().flatMap(range -> CollectionUtils.emptyIfNull(range.getParents()).stream()).collect(Collectors.toSet())); + + routingMap = InMemoryCollectionRoutingMap.tryCreateCompleteRoutingMap( + rangesTuples.stream().filter(tuple -> !goneRanges.contains(tuple.left.id())).collect(Collectors.toList()), + collectionRid); + } + else + { + routingMap = previousRoutingMap.tryCombine(rangesTuples); + } + + if (routingMap == null) + { + // RANGE information either doesn't exist or is not complete. + return Mono.error(new NotFoundException(String.format("GetRoutingMapForCollectionAsync(collectionRid: {%s}), RANGE information either doesn't exist or is not complete.", collectionRid))); + } + + return Mono.just(routingMap); + }); + } + + private Mono> getPartitionKeyRange(String collectionRid, boolean forceRefresh, Map properties) { + RxDocumentServiceRequest request = RxDocumentServiceRequest.create( + OperationType.ReadFeed, + collectionRid, + ResourceType.PartitionKeyRange, + null + ); //this request doesn't actually go to server + + request.requestContext.resolvedCollectionRid = collectionRid; + Mono collectionObs = collectionCache.resolveCollectionAsync(request); + + return collectionObs.flatMap(coll -> { + + FeedOptions feedOptions = new FeedOptions(); + if (properties != null) { + feedOptions.properties(properties); + } + return client.readPartitionKeyRanges(coll.selfLink(), feedOptions) + // maxConcurrent = 1 to makes it in the right order + .flatMap(p -> Flux.fromIterable(p.results()), 1).collectList(); + }); + } +} + diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/Bootstrapper.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/Bootstrapper.java new file mode 100644 index 0000000000000..1ebd65c2160bf --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/Bootstrapper.java @@ -0,0 +1,37 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import reactor.core.publisher.Mono; + +/** + * Bootstrapping interface. + */ +public interface Bootstrapper { + /** + * It initializes the bootstrapping. + * + * @return a deferred computation of this call. + */ + Mono initialize(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/CancellationToken.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/CancellationToken.java new file mode 100644 index 0000000000000..cf5ff3eddd2b4 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/CancellationToken.java @@ -0,0 +1,41 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +/** + * Propagates notification that operations should be canceled.. + */ +public class CancellationToken { + private final CancellationTokenSource tokenSource; + + public CancellationToken(CancellationTokenSource source) { + this.tokenSource = source; + } + + /** + * @return true if the cancellation was requested from the source. + */ + public boolean isCancellationRequested() { + return tokenSource.isCancellationRequested(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/CancellationTokenSource.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/CancellationTokenSource.java new file mode 100644 index 0000000000000..7cb363ca3a96a --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/CancellationTokenSource.java @@ -0,0 +1,61 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import java.io.Closeable; +import java.io.IOException; + +/** + * Signals to a {@link CancellationToken} that it should be canceled.. + */ +public class CancellationTokenSource implements Closeable { + + private volatile boolean tokenSourceClosed; + private volatile boolean cancellationRequested; + + public CancellationTokenSource() { + this.tokenSourceClosed = false; + this.cancellationRequested = false; + } + + public synchronized boolean isCancellationRequested() { + if (tokenSourceClosed) { + throw new IllegalStateException("Object already closed"); + } + + return this.cancellationRequested; + } + + public CancellationToken getToken() { + return new CancellationToken(this); + } + + public synchronized void cancel() { + this.cancellationRequested = true; + } + + @Override + public synchronized void close() throws IOException { + if (tokenSourceClosed) return; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ChangeFeedContextClient.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ChangeFeedContextClient.java new file mode 100644 index 0000000000000..f715a440fcb61 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ChangeFeedContextClient.java @@ -0,0 +1,164 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import com.azure.data.cosmos.ChangeFeedOptions; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosContainerProperties; +import com.azure.data.cosmos.CosmosContainerRequestOptions; +import com.azure.data.cosmos.CosmosContainerResponse; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosDatabaseRequestOptions; +import com.azure.data.cosmos.CosmosDatabaseResponse; +import com.azure.data.cosmos.CosmosItem; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.CosmosItemRequestOptions; +import com.azure.data.cosmos.CosmosItemResponse; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.SqlQuerySpec; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.net.URI; + +/** + * The interface that captures the APIs required to handle change feed processing logic. + */ +public interface ChangeFeedContextClient { + /** + * Reads the feed (sequence) of {@link PartitionKeyRange} for a database account from the Azure Cosmos DB service as an asynchronous operation. + * + * @param partitionKeyRangesOrCollectionLink the link of the resources to be read, or owner collection link, SelfLink or AltLink. E.g. /dbs/db_rid/colls/coll_rid/pkranges. + * @param feedOptions the options for the request; it can be set as null. + * @return an an {@link Flux} containing one or several feed response pages of the obtained items or an error. + */ + Flux> readPartitionKeyRangeFeed(String partitionKeyRangesOrCollectionLink, FeedOptions feedOptions); + + /** + * Method to create a change feed query for documents. + * + * @param collectionLink Specifies the collection to read documents from. + * @param feedOptions The options for processing the query results feed. + * @return an {@link Flux} containing one or several feed response pages of the obtained items or an error. + */ + Flux> createDocumentChangeFeedQuery(CosmosContainer collectionLink, ChangeFeedOptions feedOptions); + + /** + * Reads a database. + * + * @param database a reference to the database. + * @param options the {@link CosmosContainerRequestOptions} for this request; it can be set as null. + * @return an {@link Mono} containing the single cosmos database response with the read database or an error. + */ + Mono readDatabase(CosmosDatabase database, CosmosDatabaseRequestOptions options); + + /** + * Reads a {@link CosmosContainer}. + * + * @param containerLink a reference to the container. + * @param options the {@link CosmosContainerRequestOptions} for this request; it can be set as null. + * @return an {@link Mono} containing the single cosmos container response with the read container or an error. + */ + Mono readContainer(CosmosContainer containerLink, CosmosContainerRequestOptions options); + + /** + * Creates a {@link CosmosItem}. + * + * @param containerLink the reference to the parent container. + * @param document the document represented as a POJO or Document object. + * @param options the request options. + * @param disableAutomaticIdGeneration the flag for disabling automatic id generation. + * @return an {@link Mono} containing the single resource response with the created cosmos item or an error. + */ + Mono createItem(CosmosContainer containerLink, Object document, CosmosItemRequestOptions options, + boolean disableAutomaticIdGeneration); + + /** + * DELETE a {@link CosmosItem}. + * + * @param itemLink the item reference. + * @param options the request options. + * @return an {@link Mono} containing the cosmos item resource response with the deleted item or an error. + */ + Mono deleteItem(CosmosItem itemLink, CosmosItemRequestOptions options); + + /** + * Replaces a {@link CosmosItem}. + * + * @param itemLink the item reference. + * @param document the document represented as a POJO or Document object. + * @param options the request options. + * @return an {@link Mono} containing the cosmos item resource response with the replaced item or an error. + */ + Mono replaceItem(CosmosItem itemLink, Object document, CosmosItemRequestOptions options); + + /** + * Reads a {@link CosmosItem} + * + * @param itemLink the item reference. + * @param options the request options. + * @return an {@link Mono} containing the cosmos item resource response with the read item or an error. + */ + Mono readItem(CosmosItem itemLink, CosmosItemRequestOptions options); + + /** + * Query for items in a document container. + * + * @param containerLink the reference to the parent container. + * @param querySpec the SQL query specification. + * @param options the feed options. + * @return an {@link Flux} containing one or several feed response pages of the obtained items or an error. + */ + Flux> queryItems(CosmosContainer containerLink, SqlQuerySpec querySpec, FeedOptions options); + + /** + * @return the Cosmos client's service endpoint. + */ + URI getServiceEndpoint(); + + /** + * Reads and returns the container properties. + * + * @param containerLink a reference to the container. + * @param options the {@link CosmosContainerRequestOptions} for this request; it can be set as null. + * @return an {@link Mono} containing the read container properties. + */ + Mono readContainerSettings(CosmosContainer containerLink, CosmosContainerRequestOptions options); + + /** + * @return the Cosmos container client. + */ + CosmosContainer getContainerClient(); + + /** + * @return the Cosmos database client. + */ + CosmosDatabase getDatabaseClient(); + + /** + * Closes the document client instance and cleans up the resources. + */ + void close(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ChangeFeedObserver.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ChangeFeedObserver.java new file mode 100644 index 0000000000000..b360c22b4c44d --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ChangeFeedObserver.java @@ -0,0 +1,55 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import com.azure.data.cosmos.CosmosItemProperties; + +import java.util.List; + +/** + * The interface used to deliver change events to document feed observers. + */ +public interface ChangeFeedObserver { + /** + * This is called when change feed observer is opened. + * + * @param context the context specifying partition for this observer, etc. + */ + void open(ChangeFeedObserverContext context); + + /** + * This is called when change feed observer is closed. + * + * @param context the context specifying partition for this observer, etc. + * @param reason the reason the observer is closed. + */ + void close(ChangeFeedObserverContext context, ChangeFeedObserverCloseReason reason); + + /** + * This is called when document changes are available on change feed. + * + * @param context the context specifying partition for this observer, etc. + * @param docs the documents changed. + */ + void processChanges(ChangeFeedObserverContext context, List docs); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ChangeFeedObserverCloseReason.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ChangeFeedObserverCloseReason.java new file mode 100644 index 0000000000000..4b2ef51483b51 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ChangeFeedObserverCloseReason.java @@ -0,0 +1,58 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +/** + * The reason for the {@link ChangeFeedObserver} to close. + */ +public enum ChangeFeedObserverCloseReason { + /** + * UNKNOWN failure. This should never be sent to observers. + */ + UNKNOWN, + + /** + * The ChangeFeedEventProcessor is shutting down. + */ + SHUTDOWN, + + /** + * The resource, such as database or collection was removed. + */ + RESOURCE_GONE, + + /** + * Lease was lost due to expiration or load-balancing. + */ + LEASE_LOST, + + /** + * ChangeFeedObserver threw an exception. + */ + OBSERVER_ERROR, + + /** + * The lease is gone. This can be due to partition split. + */ + LEASE_GONE, +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ChangeFeedObserverContext.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ChangeFeedObserverContext.java new file mode 100644 index 0000000000000..15d18034a8e2a --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ChangeFeedObserverContext.java @@ -0,0 +1,58 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.FeedResponse; +import reactor.core.publisher.Mono; + +/** + * Represents the context passed to {@link ChangeFeedObserver} events. + */ +public interface ChangeFeedObserverContext { + + /** + * Gets the id of the partition for the current event. + * + * @return the id of the partition for the current event. + */ + String getPartitionKeyRangeId(); + + /** + * Gets the response from the underlying call. + * + * @return the response from the underlying call. + */ + FeedResponse getFeedResponse(); + + /** + * Checkpoints progress of a stream. This method is valid only if manual checkpoint was configured. + *

+ * Client may accept multiple change feed batches to process in parallel. + * Once first N document processing was finished the client can call checkpoint on the last completed batches in the row. + * In case of automatic checkpointing this is method throws. + * + * @return a representation of the deferred computation of this call. + */ + Mono checkpoint(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ChangeFeedObserverFactory.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ChangeFeedObserverFactory.java new file mode 100644 index 0000000000000..b87929bd4a1cc --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ChangeFeedObserverFactory.java @@ -0,0 +1,35 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +/** + * Factory class used to create instance(s) of {@link ChangeFeedObserver}. + */ +public interface ChangeFeedObserverFactory { + /** + * Creates an instance of a {@link ChangeFeedObserver}. + * + * @return an instance of a {@link ChangeFeedObserver}. + */ + ChangeFeedObserver createObserver(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/CheckpointFrequency.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/CheckpointFrequency.java new file mode 100644 index 0000000000000..e6ab9a3e83eee --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/CheckpointFrequency.java @@ -0,0 +1,113 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import java.time.Duration; + +/** + * Specifies the frequency of lease event. The event will trigger when either of conditions is satisfied. + */ +public class CheckpointFrequency { + private boolean explicitCheckpoint; + private int processedDocumentCount; + private Duration timeInterval; + + public CheckpointFrequency() { + this.explicitCheckpoint = false; + // DEFAULT to always checkpoint after processing each feed batch. + processedDocumentCount = 0; + timeInterval = null; + } + + /** + * Gets a value indicating whether explicit check-pointing is enabled. + *

+ * By default false. Setting to true means changefeed host will never checkpoint and client code needs to explicitly + * checkpoint via {@link PartitionCheckpointer} + * + * @return a value indicating whether explicit check-pointing is enabled. + */ + public boolean isExplicitCheckpoint() { + return explicitCheckpoint; + } + + /** + * Gets the value that specifies to checkpoint every specified number of docs. + * + * @return the value that specifies to checkpoint every specified number of docs. + */ + public int getProcessedDocumentCount() { + return this.processedDocumentCount; + } + + /** + * Gets the value that specifies to checkpoint every specified time interval. + * + * @return the value that specifies to checkpoint every specified time interval. + */ + public Duration getTimeInterval() { + return this.timeInterval; + } + + /** + * Sets a value indicating explicit check-pointing is enabled. + * + * @return current {@link CheckpointFrequency}. + */ + public CheckpointFrequency withExplicitCheckpoint() { + this.explicitCheckpoint = true; + return this; + } + + /** + * Sets a value indicating explicit checkpointing is disabled. + * + * @return current {@link CheckpointFrequency}. + */ + public CheckpointFrequency withoutExplicitCheckpoint() { + this.explicitCheckpoint = false; + return this; + } + + /** + * Sets the value that specifies to checkpoint every specified number of docs. + * + * @param processedDocumentCount the value that specifies to checkpoint every specified number of docs. + * @return current {@link CheckpointFrequency}. + */ + public CheckpointFrequency withProcessedDocumentCount(int processedDocumentCount) { + this.processedDocumentCount = processedDocumentCount; + return this; + } + + /** + * Sets the value that specifies to checkpoint every specified time interval. + * + * @param timeInterval the value that specifies to checkpoint every specified time interval. + * @return current {@link CheckpointFrequency}. + */ + public CheckpointFrequency withTimeInterval(Duration timeInterval) { + this.timeInterval = timeInterval; + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/HealthMonitor.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/HealthMonitor.java new file mode 100644 index 0000000000000..f5c99caeea8d6 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/HealthMonitor.java @@ -0,0 +1,38 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import reactor.core.publisher.Mono; + +/** + * A strategy for handling the situation when the change feed processor is not able to acquire lease due to unknown reasons. + */ +public interface HealthMonitor { + /** + * A logic to handle that exceptional situation. + * + * @param record the monitoring record. + * @return a representation of the deferred computation of this call. + */ + Mono inspect(HealthMonitoringRecord record); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/HealthMonitoringRecord.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/HealthMonitoringRecord.java new file mode 100644 index 0000000000000..c0aadd7660feb --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/HealthMonitoringRecord.java @@ -0,0 +1,92 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +/** + * A record used in the health monitoring. + */ +public class HealthMonitoringRecord { + public final HealthSeverity severity; + public final MonitoredOperation operation; + public final Lease lease; + public final Throwable throwable; + + /** + * Initializes a new instance of the {@link HealthMonitoringRecord} class. + * + * @param severity the health severity level. + * @param operation the operation. + * @param lease the lease. + * @param throwable the exception. + */ + public HealthMonitoringRecord(HealthSeverity severity, MonitoredOperation operation, Lease lease, Throwable throwable) { + if (lease == null) throw new IllegalArgumentException("lease"); + this.severity = severity; + this.operation = operation; + this.lease = lease; + this.throwable = throwable; + } + + /** + * @return the severity of this monitoring record. + */ + public HealthSeverity getSeverity() { + return this.severity; + } + + /** + * The health severity level. + */ + public enum HealthSeverity { + /** + * Critical level. + */ + CRITICAL(10), + + /** + * Error level. + */ + ERROR(20), + + /** + * Information level. + */ + INFORMATIONAL(30); + + public final int value; + + HealthSeverity(int value){ + this.value = value; + } + } + + /** + * The health monitoring phase. + */ + public enum MonitoredOperation { + /** + * A phase when the instance tries to acquire the lease. + */ + ACQUIRE_LEASE, + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/Lease.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/Lease.java new file mode 100644 index 0000000000000..178dd354080d4 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/Lease.java @@ -0,0 +1,142 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import com.azure.data.cosmos.ChangeFeedProcessor; + +import java.time.ZonedDateTime; +import java.util.Map; + +/** + * Represents a lease that is persisted as a document in the lease collection. + *

+ * Leases are used to: + * Keep track of the {@link ChangeFeedProcessor} progress for a particular Partition Key RANGE. + * Distribute load between different instances of {@link ChangeFeedProcessor}. + * Ensure reliable recovery for cases when an instance of {@link ChangeFeedProcessor} gets disconnected, hangs or crashes. + */ +public interface Lease { + /** + * Gets the partition associated with the lease. + * + * @return the partition associated with the lease. + */ + String getLeaseToken(); + + /** + * Gets the host name owner of the lease. + * + *

+ * The Owner keeps track which {@link ChangeFeedProcessor} is currently processing that Partition Key RANGE. + * + * @return the host name owner of the lease. + */ + String getOwner(); + + /** + * Gets the timestamp of the lease. + * + * @return the timestamp of the lease. + */ + String getTimestamp(); + + /** + * Gets the continuation token used to determine the last processed point of the Change Feed. + * + * @return the continuation token used to determine the last processed point of the Change Feed. + */ + String getContinuationToken(); + + /** + * Sets the continuation token used to determine the last processed point of the Change Feed. + * + * + * @param continuationToken the continuation token used to determine the last processed point of the Change Feed. + */ + void setContinuationToken(String continuationToken); + + /** + * Gets the lease ID. + * + * @return the lease ID. + */ + String getId(); + + /** + * Gets the concurrency token. + * + * @return the concurrency token. + */ + String getConcurrencyToken(); + + /** + * Gets the custom lease item which can be managed from {@link PartitionLoadBalancingStrategy}. + * + * @return the custom lease item. + */ + Map getProperties(); + + /** + * Sets the host name owner of the lease. + * + *

+ * The Owner keeps track which {@link ChangeFeedProcessor} is currently processing that Partition Key RANGE. + * + * @param owner the host name owner of the lease. + */ + void setOwner(String owner); + + /** + * Sets the timestamp of the lease. + * + *

+ * The timestamp is used to determine lease expiration. + * + * @param timestamp the timestamp of the lease. + */ + void setTimestamp(ZonedDateTime timestamp); + + /** + * Sets the lease ID. + * + * + * @param id the lease ID. + */ + void setId(String id); + + /** + * Sets the concurrency token. + * + * + * @param concurrencyToken the concurrency token. + */ + void setConcurrencyToken(String concurrencyToken); + + /** + * Sets the custom lease item which can be managed from {@link PartitionLoadBalancingStrategy}. + * + * + * @param properties the custom lease item. + */ + void setProperties(Map properties); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/LeaseCheckpointer.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/LeaseCheckpointer.java new file mode 100644 index 0000000000000..da413ca2bdefd --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/LeaseCheckpointer.java @@ -0,0 +1,41 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import reactor.core.publisher.Mono; + +/** + * Interface for check-pointing the lease. + */ +public interface LeaseCheckpointer { + /** + * Check-points the lease. + *

+ * Throws LeaseLostException if other host acquired the lease or lease was deleted. + * + * @param lease the lease to renew. + * @param continuationToken the continuation token. + * @return the updated renewed lease. + */ + Mono checkpoint(Lease lease, String continuationToken); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/LeaseContainer.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/LeaseContainer.java new file mode 100644 index 0000000000000..be77084431c3d --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/LeaseContainer.java @@ -0,0 +1,40 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import reactor.core.publisher.Flux; + +/** + * Represents operations to get leases from lease store.. + */ +public interface LeaseContainer { + /** + * @return all leases. + */ + Flux getAllLeases(); + + /** + * @return all leases owned by the current host. + */ + Flux getOwnedLeases(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/LeaseManager.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/LeaseManager.java new file mode 100644 index 0000000000000..342213d20d075 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/LeaseManager.java @@ -0,0 +1,84 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import com.azure.data.cosmos.internal.changefeed.exceptions.LeaseLostException; +import reactor.core.publisher.Mono; + +/** + * It defines a way to perform operations with {@link Lease}. + */ +public interface LeaseManager { + /** + * Checks whether the lease exists and creates it if it does not exist. + * + * @param leaseToken the lease token to work with. + * @param continuationToken the continuation token if it exists. + * @return the lease. + */ + Mono createLeaseIfNotExist(String leaseToken, String continuationToken); + + /** + * Deletes the lease. + * + * @param lease the lease to delete. + * @return a deferred computation of this call. + */ + Mono delete(Lease lease); + + /** + * Acquires ownership of the lease. + * It can throw {@link LeaseLostException} if other host acquired concurrently the lease. + * + * @param lease the lease to acquire. + * @return the updated lease. + */ + Mono acquire(Lease lease); + + /** + * It releases ownership of the lease. + * It can throw {@link LeaseLostException} if other host acquired the lease. + * + * @param lease the lease to acquire. + * @return a deferred computation of this call. + */ + Mono release(Lease lease); + + /** + * Renew the lease; leases are periodically renewed to prevent expiration. + * It can throw {@link LeaseLostException} if other host acquired the lease. + * + * @param lease the lease to renew. + * @return the updated lease. + */ + Mono renew(Lease lease); + + /** + * REPLACE item from the specified lease. + * It can throw {@link LeaseLostException} if other host acquired the lease. + * + * @param leaseToUpdatePropertiesFrom the new item. + * @return updated lease. + */ + Mono updateProperties(Lease leaseToUpdatePropertiesFrom); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/LeaseRenewer.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/LeaseRenewer.java new file mode 100644 index 0000000000000..07795e34f8131 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/LeaseRenewer.java @@ -0,0 +1,43 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import reactor.core.publisher.Mono; + +/** + * Interface for the lease renewer. + */ +public interface LeaseRenewer { + /** + * Starts the lease renewer. + * + * @param cancellationToken the token used for canceling the workload. + * @return a deferred operation of this call. + */ + Mono run(CancellationToken cancellationToken); + + /** + * @return the inner exception if any, otherwise null. + */ + RuntimeException getResultException(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/LeaseStore.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/LeaseStore.java new file mode 100644 index 0000000000000..b443127de135b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/LeaseStore.java @@ -0,0 +1,61 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import reactor.core.publisher.Mono; + +import java.time.Duration; + +/** + * Represents the lease store container to deal with initialization/cleanup of leases + * for particular monitoring collection and lease container prefix. + */ +public interface LeaseStore { + + /** + * @return true if the lease store is initialized. + */ + Mono isInitialized(); + + /** + * Mark the store as initialized. + * + * @return a deferred computation of this operation call. + */ + Mono markInitialized(); + + /** + * Places a lock on the lease store for initialization. Only one process may own the store for the lock time. + * + * @param lockExpirationTime the time for the lock to expire. + * @return true if the lock was acquired, false otherwise. + */ + Mono acquireInitializationLock(Duration lockExpirationTime); + + /** + * Releases the lock one the lease store for initialization. + * + * @return true if the lock was acquired and was released, false if the lock was not acquired. + */ + Mono releaseInitializationLock(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/LeaseStoreManager.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/LeaseStoreManager.java new file mode 100644 index 0000000000000..75f06ef593fd2 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/LeaseStoreManager.java @@ -0,0 +1,153 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.internal.changefeed.implementation.LeaseStoreManagerImpl; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.time.Duration; + +/** + * Defines an interface for operations with {@link Lease}. + */ +public interface LeaseStoreManager extends LeaseContainer, LeaseManager, LeaseStore, LeaseCheckpointer +{ + /** + * Provides flexible way to build lease manager constructor parameters. + * For the actual creation of lease manager instance, delegates to lease manager factory. + */ + interface LeaseStoreManagerBuilderDefinition { + LeaseStoreManagerBuilderDefinition leaseContextClient(ChangeFeedContextClient leaseContextClient); + + LeaseStoreManagerBuilderDefinition leasePrefix(String leasePrefix); + + LeaseStoreManagerBuilderDefinition leaseCollectionLink(CosmosContainer leaseCollectionLink); + + LeaseStoreManagerBuilderDefinition requestOptionsFactory(RequestOptionsFactory requestOptionsFactory); + + LeaseStoreManagerBuilderDefinition hostName(String hostName); + + Mono build(); + } + + static LeaseStoreManagerBuilderDefinition Builder() { + return new LeaseStoreManagerImpl(); + } + + /** + * @return List of all leases. + */ + Flux getAllLeases(); + + /** + * @return all leases owned by the current host. + */ + Flux getOwnedLeases(); + + /** + * Checks whether the lease exists and creates it if it does not exist. + * + * @param leaseToken the partition to work on. + * @param continuationToken the continuation token if it exists. + * @return the lease. + */ + Mono createLeaseIfNotExist(String leaseToken, String continuationToken); + + /** + * DELETE the lease. + * + * @param lease the lease to remove. + * @return a representation of the deferred computation of this call. + */ + Mono delete(Lease lease); + + /** + * Acquire ownership of the lease. + * + * @param lease the Lease to acquire. + * @return the updated acquired lease. + */ + Mono acquire(Lease lease); + + /** + * Release ownership of the lease. + * + * @param lease the lease to acquire. + * @return a representation of the deferred computation of this call. + */ + Mono release(Lease lease); + + /** + * Renew the lease. Leases are periodically renewed to prevent expiration. + * + * @param lease the Lease to renew. + * @return the updated renewed lease. + */ + Mono renew(Lease lease); + + /** + * REPLACE item from the specified lease. + * + * @param leaseToUpdatePropertiesFrom the Lease containing new item. + * @return the updated lease. + */ + Mono updateProperties(Lease leaseToUpdatePropertiesFrom); + + /** + * Checkpoint the lease. + * + * @param lease the Lease to renew. + * @param continuationToken the continuation token. + * @return the updated renewed lease. + */ + Mono checkpoint(Lease lease, String continuationToken); + + /** + * @return true if the lease store is initialized. + */ + Mono isInitialized(); + + /** + * Mark the store as initialized. + * + * @return true if marked as initialized. + */ + Mono markInitialized(); + + /** + * Places a lock on the lease store for initialization. Only one process may own the store for the lock time. + * + * @param lockExpirationTime the time for the lock to expire. + * @return true if the lock was acquired, false otherwise. + */ + Mono acquireInitializationLock(Duration lockExpirationTime); + + /** + * Releases the lock one the lease store for initialization. + * + * @return true if the lock was acquired and was relesed, false if the lock was not acquired. + */ + Mono releaseInitializationLock(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/LeaseStoreManagerSettings.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/LeaseStoreManagerSettings.java new file mode 100644 index 0000000000000..8c892d4d4d440 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/LeaseStoreManagerSettings.java @@ -0,0 +1,63 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import com.azure.data.cosmos.CosmosContainer; + +/** + * Captures LeaseStoreManager properties. + */ +public class LeaseStoreManagerSettings { + String containerNamePrefix; + + CosmosContainer leaseCollectionLink; + + String hostName; + + public String getContainerNamePrefix() { + return this.containerNamePrefix; + } + + public LeaseStoreManagerSettings withContainerNamePrefix(String containerNamePrefix) { + this.containerNamePrefix = containerNamePrefix; + return this; + } + + public CosmosContainer getLeaseCollectionLink() { + return this.leaseCollectionLink; + } + + public LeaseStoreManagerSettings withLeaseCollectionLink(CosmosContainer collectionLink) { + this.leaseCollectionLink = collectionLink; + return this; + } + + public String getHostName() { + return this.hostName; + } + + public LeaseStoreManagerSettings withHostName(String hostName) { + this.hostName = hostName; + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionCheckpointer.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionCheckpointer.java new file mode 100644 index 0000000000000..62abcd75e7d16 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionCheckpointer.java @@ -0,0 +1,38 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import reactor.core.publisher.Mono; + +/** + * Checkpoint the given partition up to the given continuation token. + */ +public interface PartitionCheckpointer { + /** + * Checkpoints the given partition up to the given continuation token. + * + * @param сontinuationToken the continuation token. + * @return a deferred operation of this call. + */ + Mono checkpointPartition(String сontinuationToken); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionController.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionController.java new file mode 100644 index 0000000000000..8e30828ad016b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionController.java @@ -0,0 +1,51 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import reactor.core.publisher.Mono; + +/** + * Interface for the partition controller. + */ +public interface PartitionController { + /** + * Add or update lease item. + * + * @return a representation of the deferred computation of this call. + */ + Mono addOrUpdateLease(Lease lease); + + /** + * Initialize and start the partition controller thread. + * + * @return a representation of the deferred computation of this call. + */ + Mono initialize(); + + /** + * Shutdown partition controller thread. + * + * @return a representation of the deferred computation of this call. + */ + Mono shutdown(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionLoadBalancer.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionLoadBalancer.java new file mode 100644 index 0000000000000..4d580d67bbbe5 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionLoadBalancer.java @@ -0,0 +1,44 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import reactor.core.publisher.Mono; + +/** + * Interface for a partition load balancer. + */ +public interface PartitionLoadBalancer { + /** + * Starts the load balancer. + * + * @return a representation of the deferred computation of this call. + */ + Mono start(); + + /** + * Stops the load balancer. + * + * @return a representation of the deferred computation of this call. + */ + Mono stop(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionLoadBalancingStrategy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionLoadBalancingStrategy.java new file mode 100644 index 0000000000000..b8ad4f05fec68 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionLoadBalancingStrategy.java @@ -0,0 +1,97 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import com.azure.data.cosmos.ChangeFeedProcessorOptions; + +import java.util.List; + +/** + * A strategy defines which leases should be taken by the current host in a certain moment. + *

+ * It can set new {@link Lease} properties() for all returned leases if needed, including currently owned leases. + * Example + *

+ * {@code
+ *  public class CustomStrategy : PartitionLoadBalancingStrategy
+ *  {
+ *      private STRING hostName;
+ *      private STRING hostVersion;
+ *      private Duration leaseExpirationInterval;
+ *
+ *      private final STRING VersionPropertyKey = "version";
+ *
+ *      public List selectLeasesToTake(List allLeases)
+ *      {
+ *          var takenLeases = this.findLeasesToTake(allLeases);
+ *          foreach (var lease in takenLeases)
+ *          {
+ *              lease.Properties[VersionPropertyKey] = this.hostVersion;
+ *          }
+ *
+ *          return takenLeases;
+ *      }
+ *
+ *      private List findLeasesToTake(List allLeases)
+ *      {
+ *          List takenLeases = new List();
+ *          foreach (var lease in allLeases)
+ *          {
+ *              if (string.IsNullOrWhiteSpace(lease.Owner) || this.IsExpired(lease))
+ *              {
+ *                  takenLeases.Add(lease);
+ *              }
+ *
+ *              if (lease.Owner != this.hostName)
+ *              {
+ *                  var ownerVersion = lease.Properties[VersionPropertyKey];
+ *                  if (ownerVersion < this.hostVersion)
+ *                  {
+ *                      takenLeases.Add(lease);
+ *                  }
+ *
+ *                  // more logic for leases owned by other hosts
+ *              }
+ *          }
+ *
+ *          return takenLeases;
+ *      }
+ *
+ *      private boolean isExpired(Lease lease)
+ *      {
+ *          return lease.Timestamp.ToUniversalTime() + this.leaseExpirationInterval < DateTime.UtcNow;
+ *      }
+ *  } * }
+ * 
+ * + */ +public interface PartitionLoadBalancingStrategy { + /** + * Select leases that should be taken for processing. + * This method will be called periodically with {@link ChangeFeedProcessorOptions} leaseAcquireInterval(). + + * @param allLeases ALL leases. + * @return Leases that should be taken for processing by this host. + */ + List selectLeasesToTake(List allLeases); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionManager.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionManager.java new file mode 100644 index 0000000000000..a82f07e70b2bc --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionManager.java @@ -0,0 +1,44 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import reactor.core.publisher.Mono; + +/** + * Interface PartitionManager. + */ +public interface PartitionManager { + /** + * starts the partition manager. + * + * @return a representation of the deferred computation of this call. + */ + Mono start(); + + /** + * Stops the partition manager. + * + * @return a representation of the deferred computation of this call. + */ + Mono stop(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionProcessor.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionProcessor.java new file mode 100644 index 0000000000000..5f0a7b54ca584 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionProcessor.java @@ -0,0 +1,49 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import reactor.core.publisher.Mono; + +/** + * Provides an API to run continious processing on a single partition of some resource. + *

+ * Created by {@link PartitionProcessorFactory}.create() after some lease is acquired by the current host. + * Processing can perform the following tasks in a loop: + * 1. READ some data from the resource partition. + * 2. Handle possible problems with the read. + * 3. Pass the obtained data to an observer by calling {@link ChangeFeedObserver}.processChangesAsync{} with the context {@link ChangeFeedObserverContext}. + */ +public interface PartitionProcessor { + /** + * Perform partition processing. + * + * @param cancellationToken the cancellation token. + * @return a representation of the deferred computation of this call. + */ + Mono run(CancellationToken cancellationToken); + + /** + * @return the inner exception if any, otherwise null. + */ + RuntimeException getResultException(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionProcessorFactory.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionProcessorFactory.java new file mode 100644 index 0000000000000..23f1e357e2e22 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionProcessorFactory.java @@ -0,0 +1,37 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +/** + * Factory class used to create instance(s) of {@link PartitionProcessor}. + */ +public interface PartitionProcessorFactory { + /** + * Creates an instance of a {@link PartitionProcessor}. + * + * @param lease the lease to be used for partition processing. + * @param changeFeedObserver the observer instace to be used. + * @return an instance of {@link PartitionProcessor}. + */ + PartitionProcessor create(Lease lease, ChangeFeedObserver changeFeedObserver); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionSupervisor.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionSupervisor.java new file mode 100644 index 0000000000000..49a934d8bdf4d --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionSupervisor.java @@ -0,0 +1,43 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import reactor.core.publisher.Mono; + +/** + * Interface for the partition supervisor. + */ +public interface PartitionSupervisor { + /** + * Runs the task. + * + * @param cancellationToken the cancellation token. + * @return a deferred operation of this call. + */ + Mono run(CancellationToken cancellationToken); + + /** + * @return the inner exception if any, otherwise null. + */ + RuntimeException getResultException(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionSupervisorFactory.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionSupervisorFactory.java new file mode 100644 index 0000000000000..3dfffb012078c --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionSupervisorFactory.java @@ -0,0 +1,35 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +/** + * Interface for the partition supervisor factory. + */ +public interface PartitionSupervisorFactory { + /** + * + * @param lease the lease. + * @return an instance of {@link PartitionSupervisor}. + */ + PartitionSupervisor create(Lease lease); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionSynchronizer.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionSynchronizer.java new file mode 100644 index 0000000000000..fdd24569726ea --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/PartitionSynchronizer.java @@ -0,0 +1,46 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +/** + * READ DocDB partitions and create leases if they do not exist. + */ +public interface PartitionSynchronizer { + /** + * Creates missing leases. + * + * @return a deferred computation of this operation. + */ + Mono createMissingLeases(); + + /** + * Handles partition slip. + * + * @param lease the lease. + * @return the split partition documents. + */ + Flux splitPartition(Lease lease); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ProcessorSettings.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ProcessorSettings.java new file mode 100644 index 0000000000000..9a9463f5c72cf --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ProcessorSettings.java @@ -0,0 +1,116 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import com.azure.data.cosmos.CosmosContainer; + +import java.time.Duration; +import java.time.OffsetDateTime; + +/** + * Implementation for the partition processor properties. + */ +public class ProcessorSettings { + private CosmosContainer collectionSelfLink; + private String partitionKeyRangeId; + private Integer maxItemCount; + private Duration feedPollDelay; + private String startContinuation; + private OffsetDateTime startTime; +// private STRING sessionToken; + + public CosmosContainer getCollectionSelfLink() { + return this.collectionSelfLink; + } + + public ProcessorSettings withCollectionLink(CosmosContainer collectionLink) { + this.collectionSelfLink = collectionLink; + return this; + } + + public String getPartitionKeyRangeId() { + return this.partitionKeyRangeId; + } + + public ProcessorSettings withPartitionKeyRangeId(String partitionKeyRangeId) { + this.partitionKeyRangeId = partitionKeyRangeId; + return this; + } + + public int getMaxItemCount() { + return this.maxItemCount; + } + + public ProcessorSettings withMaxItemCount(int maxItemCount) { + this.maxItemCount = maxItemCount; + return this; + } + + public Duration getFeedPollDelay() { + return this.feedPollDelay; + } + + public ProcessorSettings withFeedPollDelay(Duration feedPollDelay) { + this.feedPollDelay = feedPollDelay; + return this; + } + + public String getStartContinuation() { + return this.startContinuation; + } + + public ProcessorSettings withStartContinuation(String startContinuation) { + this.startContinuation = startContinuation; + return this; + } + + private boolean startFromBeginning; + + public boolean isStartFromBeginning() { + return this.startFromBeginning; + } + + public ProcessorSettings withStartFromBeginning(boolean startFromBeginning) { + this.startFromBeginning = startFromBeginning; + return this; + } + + public OffsetDateTime getStartTime() { + return this.startTime; + } + + public ProcessorSettings withStartTime(OffsetDateTime startTime) { + this.startTime = startTime; + return this; + } + + // This is not currently supported in Java implementation. +// public STRING sessionToken() { +// return this.sessionToken; +// } +// +// public ProcessorSettings sessionToken(STRING sessionToken) { +// this.sessionToken = sessionToken; +// return this; +// } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/RemainingPartitionWork.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/RemainingPartitionWork.java new file mode 100644 index 0000000000000..ec348e16d1911 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/RemainingPartitionWork.java @@ -0,0 +1,38 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +/** + * Interface for remaining partition work. + */ +public interface RemainingPartitionWork { + /** + * @return the partition key range ID for which the remaining work is calculated. + */ + String getPartitionKeyRangeId(); + + /** + * @return the ammount of documents remaining to be processed. + */ + long getRemainingWork(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/RemainingWorkEstimator.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/RemainingWorkEstimator.java new file mode 100644 index 0000000000000..95ceb66be6517 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/RemainingWorkEstimator.java @@ -0,0 +1,46 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +/** + * Used to estimate the pending work remaining to be read in the Change Feed. Calculates the sum of pending work + * based on the difference between the latest status of the feed and the status of each existing lease. + */ +public interface RemainingWorkEstimator { + /** + * Calculates an estimate of the pending work remaining to be read in the Change Feed in amount of documents in the whole collection. + * + * @return an estimation of pending work in amount of documents. + */ + Mono estimatedRemainingWork(); + + /** + * Calculates an estimate of the pending work remaining to be read in the Change Feed in amount of documents per partition. + * + * @return an estimation of pending work in amount of documents per partitions. + */ + Flux estimatedRemainingWorkPerPartition(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/RequestOptionsFactory.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/RequestOptionsFactory.java new file mode 100644 index 0000000000000..7db485a8a3351 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/RequestOptionsFactory.java @@ -0,0 +1,36 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import com.azure.data.cosmos.CosmosItemRequestOptions; +import com.azure.data.cosmos.FeedOptions; + +/** + * Defines request options for lease requests to use with {@link LeaseStoreManager}. + */ +public interface RequestOptionsFactory { + + CosmosItemRequestOptions createRequestOptions(Lease lease); + + FeedOptions createFeedOptions(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ServiceItemLease.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ServiceItemLease.java new file mode 100644 index 0000000000000..3cd2f125039b1 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ServiceItemLease.java @@ -0,0 +1,244 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.internal.Constants; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.util.Date; +import java.util.HashMap; +import java.util.Map; + +//import com.azure.data.cosmos.internal.changefeed.internal.Constants; + +/** + * Document service lease. + */ +public class ServiceItemLease implements Lease { + private static final ZonedDateTime UNIX_START_TIME = ZonedDateTime.parse("1970-01-01T00:00:00.0Z[UTC]"); + + // TODO: add JSON annotations and rename the item. + private String id; + private String _etag; + private String LeaseToken; + private String Owner; + private String ContinuationToken; + + private Map properties; + private String timestamp; // ExplicitTimestamp + private String _ts; + + public ServiceItemLease() { + ZonedDateTime currentTime = ZonedDateTime.now(ZoneId.of("UTC")); + this.timestamp = currentTime.toString(); + this._ts = String.valueOf(currentTime.getSecond()); + this.properties = new HashMap<>(); + } + + public ServiceItemLease(ServiceItemLease other) + { + this.id = other.id; + this._etag = other._etag; + this.LeaseToken = other.LeaseToken; + this.Owner = other.Owner; + this.ContinuationToken = other.ContinuationToken; + this.properties = other.properties; + this.timestamp = other.timestamp; + this._ts = other._ts; + } + + @Override + public String getId() { + return this.id; + } + + public ServiceItemLease withId(String id) { + this.id = id; + return this; + } + + @JsonIgnore + public String getEtag() { + return this._etag; + } + + public ServiceItemLease withEtag(String etag) { + this._etag = etag; + return this; + } + + @JsonProperty("LeaseToken") + public String getLeaseToken() { + return this.LeaseToken; + } + + public ServiceItemLease withLeaseToken(String leaseToken) { + this.LeaseToken = leaseToken; + return this; + } + + @JsonProperty("Owner") + @Override + public String getOwner() { + return this.Owner; + } + + public ServiceItemLease withOwner(String owner) { + this.Owner = owner; + return this; + } + + @JsonProperty("ContinuationToken") + @Override + public String getContinuationToken() { + return this.ContinuationToken; + } + + @Override + public void setContinuationToken(String continuationToken) { + this.withContinuationToken(continuationToken); + } + + public ServiceItemLease withContinuationToken(String continuationToken) { + this.ContinuationToken = continuationToken; + return this; + } + + @Override + public Map getProperties() { + return this.properties; + } + + @Override + public void setOwner(String owner) { + this.withOwner(owner); + } + + @Override + public void setTimestamp(ZonedDateTime timestamp) { + this.withTimestamp(timestamp); + } + + public void setTimestamp(Date date) { + this.withTimestamp(date.toInstant().atZone(ZoneId.systemDefault())); + } + + public void setTimestamp(Date date, ZoneId zoneId) { + this.withTimestamp(date.toInstant().atZone(zoneId)); + } + + @Override + public void setId(String id) { + this.withId(id); + } + + @Override + public void setConcurrencyToken(String concurrencyToken) { + this.withEtag(concurrencyToken); + } + + public ServiceItemLease withConcurrencyToken(String concurrencyToken) { + return this.withEtag(concurrencyToken); + } + + @Override + public void setProperties(Map properties) { + this.withProperties(properties); + } + + public ServiceItemLease withProperties(Map properties) { + this.properties = properties; + return this; + } + + @JsonIgnore + public String getTs() { + return this._ts; + } + + public ServiceItemLease withTs(String ts) { + this._ts = ts; + return this; + } + + @JsonProperty("timestamp") + @Override + public String getTimestamp() { + if (this.timestamp == null) { + return UNIX_START_TIME.plusSeconds(Long.parseLong(this.getTs())).toString(); + } + return this.timestamp; + } + + public ServiceItemLease withTimestamp(ZonedDateTime timestamp) { + this.timestamp = timestamp.toString(); + return this; + } + + @JsonIgnore + public String getExplicitTimestamp() { + return this.timestamp; + } + + @JsonIgnore + @Override + public String getConcurrencyToken() { + return this.getEtag(); + } + + public static ServiceItemLease fromDocument(Document document) { + return new ServiceItemLease() + .withId(document.id()) + .withEtag(document.etag()) + .withTs(document.getString(Constants.Properties.LAST_MODIFIED)) + .withOwner(document.getString("Owner")) + .withLeaseToken(document.getString("LeaseToken")) + .withContinuationToken(document.getString("ContinuationToken")); + } + + public static ServiceItemLease fromDocument(CosmosItemProperties document) { + return new ServiceItemLease() + .withId(document.id()) + .withEtag(document.etag()) + .withTs(document.getString(Constants.Properties.LAST_MODIFIED)) + .withOwner(document.getString("Owner")) + .withLeaseToken(document.getString("LeaseToken")) + .withContinuationToken(document.getString("ContinuationToken")); + } + + @Override + public String toString() { + return String.format( + "%s Owner='%s' Continuation=%s Timestamp(local)=%s Timestamp(server)=%s", + this.getId(), + this.getOwner(), + this.getContinuationToken(), + this.getTimestamp(), + UNIX_START_TIME.plusSeconds(Long.parseLong(this.getTs()))); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ServiceItemLeaseUpdater.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ServiceItemLeaseUpdater.java new file mode 100644 index 0000000000000..92f6da3d26818 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/ServiceItemLeaseUpdater.java @@ -0,0 +1,36 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed; + +import com.azure.data.cosmos.CosmosItem; +import com.azure.data.cosmos.CosmosItemRequestOptions; +import reactor.core.publisher.Mono; + +import java.util.function.Function; + +/** + * Interface for service lease updater. + */ +public interface ServiceItemLeaseUpdater { + Mono updateLease(Lease cachedLease, CosmosItem itemLink, CosmosItemRequestOptions requestOptions, Function updateLease); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/exceptions/LeaseLostException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/exceptions/LeaseLostException.java new file mode 100644 index 0000000000000..126e27bfdaceb --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/exceptions/LeaseLostException.java @@ -0,0 +1,109 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.exceptions; + +import com.azure.data.cosmos.internal.changefeed.Lease; + +/** + * Exception occurred when lease is lost, that would typically happen when it is taken by another host. + * Other cases: communication failure, number of retries reached, lease not found. + */ +public class LeaseLostException extends RuntimeException { + private static final String DEFAULT_MESSAGE = "The lease was lost."; + + private Lease lease; + private boolean isGone; + + /** + * Initializes a new instance of the @link LeaseLostException} class. + */ + public LeaseLostException() + { + } + + /** + * Initializes a new instance of the @link LeaseLostException} class using the specified lease. + * + * @param lease an instance of a lost lease. + */ + public LeaseLostException(Lease lease) + { + super(DEFAULT_MESSAGE); + this.lease = lease; + } + + /** + * Initializes a new instance of the @link LeaseLostException} class using error message. + * + * @param message the exception error message. + */ + public LeaseLostException(String message) + { + super(message); + } + + /** + * Initializes a new instance of the @link LeaseLostException} class using error message and inner exception. + * + * @param message the exception error message. + * @param innerException the inner exception. + * + */ + public LeaseLostException(String message, Exception innerException) + { + super(message, innerException.getCause()); + } + + /** + * Initializes a new instance of the @link LeaseLostException} class using the specified lease, inner exception, + * and a flag indicating whether lease is gone.. + * + * @param lease an instance of a lost lease. + * @param innerException the inner exception. + * @param isGone true if lease doesn't exist. + */ + public LeaseLostException(Lease lease, Exception innerException, boolean isGone) + { + super(DEFAULT_MESSAGE, innerException.getCause()); + this.lease = lease; + this.isGone = isGone; + } + + /** + * Gets the lost lease. + * + * @return the lost lease. + */ + public Lease getLease() { + return this.lease; + } + + /** + * Gets a value indicating whether lease doesn't exist. + * + * @return true if lease is gone. + */ + public boolean isGone() { + return this.isGone; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/exceptions/ObserverException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/exceptions/ObserverException.java new file mode 100644 index 0000000000000..fd052ec2b4845 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/exceptions/ObserverException.java @@ -0,0 +1,39 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.exceptions; + +/** + * Exception occurred when an operation in a ChangeFeedObserver is running and throws by user code. + */ +public class ObserverException extends RuntimeException { + private static final String DefaultMessage = "Exception has been thrown by the Observer."; + + /** + * Initializes a new instance of the {@link ObserverException} class using the specified internal exception. + * + * @param originalException {@link Exception} thrown by the user code. + */ + public ObserverException(Exception originalException) { + super(DefaultMessage, originalException.getCause()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/exceptions/PartitionException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/exceptions/PartitionException.java new file mode 100644 index 0000000000000..5a4f543a83884 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/exceptions/PartitionException.java @@ -0,0 +1,62 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.exceptions; + +/** + * General exception occurred during partition processing. + */ +public class PartitionException extends RuntimeException { + private String lastContinuation; + + /** + * Initializes a new instance of the {@link PartitionException} class using error message and last continuation token. + * @param message the exception error message. + * @param lastContinuation the request continuation token. + */ + public PartitionException(String message, String lastContinuation) { + super(message); + this.lastContinuation = lastContinuation; + } + + /** + * Initializes a new instance of the {@link PartitionException} class using error message, the last continuation + * token and the inner exception. + * + * @param message the exception error message. + * @param lastContinuation the request continuation token. + * @param innerException the inner exception. + */ + public PartitionException(String message, String lastContinuation, Exception innerException) { + super(message, innerException.getCause()); + this.lastContinuation = lastContinuation; + } + + /** + * Gets the value of request continuation token. + * + * @return the value of request continuation token. + */ + public String getLastContinuation() { + return this.lastContinuation; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/exceptions/PartitionNotFoundException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/exceptions/PartitionNotFoundException.java new file mode 100644 index 0000000000000..9205bfffc5e81 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/exceptions/PartitionNotFoundException.java @@ -0,0 +1,50 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.exceptions; + +/** + * Exception occurred when partition wasn't found. + */ +public class PartitionNotFoundException extends PartitionException { + + /** + * Initializes a new instance of the {@link PartitionNotFoundException} class using error message and last continuation token. + * @param message the exception error message. + * @param lastContinuation the request continuation token. + */ + public PartitionNotFoundException(String message, String lastContinuation) { + super(message, lastContinuation); + } + + /** + * Initializes a new instance of the {@link PartitionNotFoundException} class using error message, the last continuation + * token and the inner exception. + * + * @param message the exception error message. + * @param lastContinuation the request continuation token. + * @param innerException the inner exception. + */ + public PartitionNotFoundException(String message, String lastContinuation, Exception innerException) { + super(message, lastContinuation, innerException); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/exceptions/PartitionSplitException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/exceptions/PartitionSplitException.java new file mode 100644 index 0000000000000..9b9d5be037bd1 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/exceptions/PartitionSplitException.java @@ -0,0 +1,49 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.exceptions; + +/** + * Exception occurred during partition split. + */ +public class PartitionSplitException extends PartitionException { + /** + * Initializes a new instance of the {@link PartitionSplitException} class using error message and last continuation token. + * @param message the exception error message. + * @param lastContinuation the request continuation token. + */ + public PartitionSplitException(String message, String lastContinuation) { + super(message, lastContinuation); + } + + /** + * Initializes a new instance of the {@link PartitionSplitException} class using error message, the last continuation + * token and the inner exception. + * + * @param message the exception error message. + * @param lastContinuation the request continuation token. + * @param innerException the inner exception. + */ + public PartitionSplitException(String message, String lastContinuation, Exception innerException) { + super(message, lastContinuation, innerException); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/exceptions/TaskCancelledException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/exceptions/TaskCancelledException.java new file mode 100644 index 0000000000000..affa262512545 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/exceptions/TaskCancelledException.java @@ -0,0 +1,37 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.exceptions; + +/** + * Exception occurred when an operation in a ChangeFeedObserver was canceled. + */ +public class TaskCancelledException extends RuntimeException { + private static final String DefaultMessage = "Operations were canceled."; + + /** + * Initializes a new instance of the {@link TaskCancelledException} class. + */ + public TaskCancelledException() { + super(DefaultMessage); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/AutoCheckpointer.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/AutoCheckpointer.java new file mode 100644 index 0000000000000..ebe5b71241160 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/AutoCheckpointer.java @@ -0,0 +1,93 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserver; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserverCloseReason; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserverContext; +import com.azure.data.cosmos.internal.changefeed.CheckpointFrequency; + +import java.time.Duration; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.util.List; + +/** + * Auto check-pointer implementation for {@link ChangeFeedObserver}. + */ +class AutoCheckpointer implements ChangeFeedObserver { + private final CheckpointFrequency checkpointFrequency; + private final ChangeFeedObserver observer; + private int processedDocCount; + private ZonedDateTime lastCheckpointTime; + + public AutoCheckpointer(CheckpointFrequency checkpointFrequency, ChangeFeedObserver observer) { + if (checkpointFrequency == null) throw new IllegalArgumentException("checkpointFrequency"); + if (observer == null) throw new IllegalArgumentException("observer"); + + this.checkpointFrequency = checkpointFrequency; + this.observer = observer; + this.lastCheckpointTime = ZonedDateTime.now(ZoneId.of("UTC")); + } + + @Override + public void open(ChangeFeedObserverContext context) { + this.observer.open(context); + } + + @Override + public void close(ChangeFeedObserverContext context, ChangeFeedObserverCloseReason reason) { + this.observer.close(context, reason); + } + + @Override + public void processChanges(ChangeFeedObserverContext context, List docs) { + this.observer.processChanges(context, docs); + this.processedDocCount ++; + + if (this.isCheckpointNeeded()) { + context.checkpoint().block(); + this.processedDocCount = 0; + this.lastCheckpointTime = ZonedDateTime.now(ZoneId.of("UTC")); + } + } + + private boolean isCheckpointNeeded() { + if (this.checkpointFrequency.getProcessedDocumentCount() == 0 && this.checkpointFrequency.getTimeInterval() == null) { + return true; + } + + if (this.processedDocCount >= this.checkpointFrequency.getProcessedDocumentCount()) { + return true; + } + + Duration delta = Duration.between(this.lastCheckpointTime, ZonedDateTime.now(ZoneId.of("UTC"))); + + if (delta.compareTo(this.checkpointFrequency.getTimeInterval()) >= 0) { + return true; + } + + return false; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/BootstrapperImpl.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/BootstrapperImpl.java new file mode 100644 index 0000000000000..701071019dead --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/BootstrapperImpl.java @@ -0,0 +1,96 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.internal.changefeed.Bootstrapper; +import com.azure.data.cosmos.internal.changefeed.LeaseStore; +import com.azure.data.cosmos.internal.changefeed.PartitionSynchronizer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.time.Duration; + +/** + * Implementation for the bootstrapping interface. + */ +class BootstrapperImpl implements Bootstrapper { + private final Logger logger = LoggerFactory.getLogger(BootstrapperImpl.class); + private final PartitionSynchronizer synchronizer; + private final LeaseStore leaseStore; + private final Duration lockTime; + private final Duration sleepTime; + + public BootstrapperImpl(PartitionSynchronizer synchronizer, LeaseStore leaseStore, Duration lockTime, Duration sleepTime) + { + if (synchronizer == null) throw new IllegalArgumentException("synchronizer"); + if (leaseStore == null) throw new IllegalArgumentException("leaseStore"); + if (lockTime == null || lockTime.isNegative() || lockTime.isZero()) throw new IllegalArgumentException("lockTime should be non-null and positive"); + if (sleepTime == null || sleepTime.isNegative() || sleepTime.isZero()) throw new IllegalArgumentException("sleepTime should be non-null and positive"); + + this.synchronizer = synchronizer; + this.leaseStore = leaseStore; + this.lockTime = lockTime; + this.sleepTime = sleepTime; + } + + @Override + public Mono initialize() { + BootstrapperImpl self = this; + + return Mono.fromRunnable( () -> { + while (true) { + boolean initialized = self.leaseStore.isInitialized().block(); + + if (initialized) break; + + boolean isLockAcquired = self.leaseStore.acquireInitializationLock(self.lockTime).block(); + + try { + if (!isLockAcquired) { + logger.info("Another instance is initializing the store"); + try { + Thread.sleep(self.sleepTime.toMillis()); + } catch (InterruptedException ex) { + logger.warn("Unexpected exception caught", ex); + } + continue; + } + + logger.info("Initializing the store"); + self.synchronizer.createMissingLeases().block(); + self.leaseStore.markInitialized().block(); + + } catch (RuntimeException ex) { + break; + } finally { + if (isLockAcquired) { + self.leaseStore.releaseInitializationLock().block(); + } + } + } + + logger.info("The store is initialized"); + }); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/ChangeFeedContextClientImpl.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/ChangeFeedContextClientImpl.java new file mode 100644 index 0000000000000..9d98c6da6fadb --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/ChangeFeedContextClientImpl.java @@ -0,0 +1,176 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.ChangeFeedOptions; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosContainerProperties; +import com.azure.data.cosmos.CosmosContainerRequestOptions; +import com.azure.data.cosmos.CosmosContainerResponse; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosDatabaseRequestOptions; +import com.azure.data.cosmos.CosmosDatabaseResponse; +import com.azure.data.cosmos.CosmosItem; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.CosmosItemRequestOptions; +import com.azure.data.cosmos.CosmosItemResponse; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.SqlQuerySpec; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedContextClient; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Scheduler; +import reactor.core.scheduler.Schedulers; + +import java.net.URI; + +import static com.azure.data.cosmos.CosmosBridgeInternal.getContextClient; + +/** + * Implementation for ChangeFeedDocumentClient. + */ +public class ChangeFeedContextClientImpl implements ChangeFeedContextClient { + private final AsyncDocumentClient documentClient; + private final CosmosContainer cosmosContainer; + private Scheduler rxScheduler; + + /** + * Initializes a new instance of the {@link ChangeFeedContextClient} interface. + * @param cosmosContainer existing client. + */ + public ChangeFeedContextClientImpl(CosmosContainer cosmosContainer) + { + if (cosmosContainer == null) { + throw new IllegalArgumentException("cosmosContainer"); + } + + this.cosmosContainer = cosmosContainer; + this.documentClient = getContextClient(cosmosContainer); + this.rxScheduler = Schedulers.elastic(); + } + + /** + * Initializes a new instance of the {@link ChangeFeedContextClient} interface. + * @param cosmosContainer existing client. + * @param rxScheduler the RX Java scheduler to observe on. + */ + public ChangeFeedContextClientImpl(CosmosContainer cosmosContainer, Scheduler rxScheduler) + { + if (cosmosContainer == null) { + throw new IllegalArgumentException("cosmosContainer"); + } + + this.cosmosContainer = cosmosContainer; + this.documentClient = getContextClient(cosmosContainer); + this.rxScheduler = rxScheduler; + + } + + @Override + public Flux> readPartitionKeyRangeFeed(String partitionKeyRangesOrCollectionLink, FeedOptions feedOptions) { + return this.documentClient.readPartitionKeyRanges(partitionKeyRangesOrCollectionLink, feedOptions) + .publishOn(this.rxScheduler); + } + + @Override + public Flux> createDocumentChangeFeedQuery(CosmosContainer collectionLink, ChangeFeedOptions feedOptions) { + return collectionLink.queryChangeFeedItems(feedOptions) + .publishOn(this.rxScheduler); + } + + @Override + public Mono readDatabase(CosmosDatabase database, CosmosDatabaseRequestOptions options) { + return database.read() + .publishOn(this.rxScheduler); + } + + @Override + public Mono readContainer(CosmosContainer containerLink, CosmosContainerRequestOptions options) { + return containerLink.read(options) + .publishOn(this.rxScheduler); + } + + @Override + public Mono createItem(CosmosContainer containerLink, Object document, CosmosItemRequestOptions options, boolean disableAutomaticIdGeneration) { + if (options != null) { + return containerLink.createItem(document, options) + .publishOn(this.rxScheduler); + } else { + return containerLink.createItem(document) + .publishOn(this.rxScheduler); + } + } + + @Override + public Mono deleteItem(CosmosItem itemLink, CosmosItemRequestOptions options) { + return itemLink.delete(options) + .publishOn(this.rxScheduler); + } + + @Override + public Mono replaceItem(CosmosItem itemLink, Object document, CosmosItemRequestOptions options) { + return itemLink.replace(document, options) + .publishOn(this.rxScheduler); + } + + @Override + public Mono readItem(CosmosItem itemLink, CosmosItemRequestOptions options) { + return itemLink.read(options) + .publishOn(this.rxScheduler); + } + + @Override + public Flux> queryItems(CosmosContainer containerLink, SqlQuerySpec querySpec, FeedOptions options) { + return containerLink.queryItems(querySpec, options) + .publishOn(this.rxScheduler); + } + + @Override + public URI getServiceEndpoint() { + return documentClient.getServiceEndpoint(); + } + + @Override + public Mono readContainerSettings(CosmosContainer containerLink, CosmosContainerRequestOptions options) { + return containerLink.read(options) + .map(cosmosContainerResponse -> cosmosContainerResponse.properties()); + } + + @Override + public CosmosContainer getContainerClient() { + return this.cosmosContainer; + } + + @Override + public CosmosDatabase getDatabaseClient() { + return this.cosmosContainer.getDatabase(); + } + + @Override + public void close() { + + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/ChangeFeedHelper.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/ChangeFeedHelper.java new file mode 100644 index 0000000000000..dee963a1b3754 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/ChangeFeedHelper.java @@ -0,0 +1,182 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.Map; + +import static com.azure.data.cosmos.internal.changefeed.implementation.ChangeFeedHelper.Paths.COLLECTIONS_PATH_SEGMENT; +import static com.azure.data.cosmos.internal.changefeed.implementation.ChangeFeedHelper.Paths.DATABASES_ROOT; +import static com.azure.data.cosmos.internal.changefeed.implementation.ChangeFeedHelper.Paths.DOCUMENTS_PATH_SEGMENT; + +/** + * Implement static methods used for various simple transformations and tasks. + */ +class ChangeFeedHelper { + private static final String DEFAULT_USER_AGENT_SUFFIX = "changefeed-2.2.6"; + + public static final int HTTP_STATUS_CODE_NOT_FOUND = 404; + public static final int HTTP_STATUS_CODE_CONFLICT = 409; + public static final int HTTP_STATUS_CODE_GONE = 410; + public static final int HTTP_STATUS_CODE_PRECONDITION_FAILED = 412; + public static final int HTTP_STATUS_CODE_TOO_MANY_REQUESTS = 429; + public static final int HTTP_STATUS_CODE_INTERNAL_SERVER_ERROR = 500; + + public static String getDatabaseLink(String databaseName) { + return String.format("/dbs/%s", databaseName); + } + + public static String getCollectionLink(String databaseName, String collectionName) { + return String.format("/dbs/%s/colls/%s", databaseName, collectionName); + } + + public static class UriFactory { + /** + * A database link in the format of "dbs/{0}/". + * + * @param databaseId the database ID. + * @return a database link in the format of "dbs/{0}/". + */ + public static String createDatabaseUri(String databaseId) { + String path = String.format("%s/%s/", DATABASES_ROOT, databaseId); + + return getUrlPath(path); + } + + /** + * A collection link in the format of "dbs/{0}/colls/{1}/". + * + * @param databaseId the database ID. + * @param collectionId the collection ID. + * @return a collection link in the format of "dbs/{0}/colls/{1}/". + */ + public static String createDocumentCollectionUri(String databaseId, String collectionId) { + String path = String.format("%s/%s/%s/%s/",DATABASES_ROOT, databaseId, + COLLECTIONS_PATH_SEGMENT, collectionId); + + return getUrlPath(path); + } + + /** + * A document link in the format of "dbs/{0}/colls/{1}/docs/{2}/". + * + * @param databaseId the database ID. + * @param collectionId the collection ID. + * @param documentId the document ID. + * @return a document link in the format of "dbs/{0}/colls/{1}/docs/{2}/". + */ + public static String createDocumentUri(String databaseId, String collectionId, String documentId) { + String path = String.format("%s/%s/%s/%s/%s/%s/",DATABASES_ROOT, databaseId, + COLLECTIONS_PATH_SEGMENT, collectionId, DOCUMENTS_PATH_SEGMENT, documentId); + + return getUrlPath(path); + } + + public static String getUrlPath(String path) { + try { + URI uri = new URI( + "http", + "localhost", + path, + null + ); + + URL url = uri.toURL(); + + return url.getPath().substring(1); + } catch (URISyntaxException | MalformedURLException uriEx) {return null;} + } + } + + /** + * Copied from com.azure.data.cosmos.internal.Paths. + */ + public static class Paths { + static final String ROOT = "/"; + + public static final String DATABASES_PATH_SEGMENT = "dbs"; + public static final String DATABASES_ROOT = ROOT + DATABASES_PATH_SEGMENT; + + public static final String USERS_PATH_SEGMENT = "users"; + public static final String PERMISSIONS_PATH_SEGMENT = "permissions"; + public static final String COLLECTIONS_PATH_SEGMENT = "colls"; + public static final String STORED_PROCEDURES_PATH_SEGMENT = "sprocs"; + public static final String TRIGGERS_PATH_SEGMENT = "triggers"; + public static final String USER_DEFINED_FUNCTIONS_PATH_SEGMENT = "udfs"; + public static final String CONFLICTS_PATH_SEGMENT = "conflicts"; + public static final String DOCUMENTS_PATH_SEGMENT = "docs"; + public static final String ATTACHMENTS_PATH_SEGMENT = "attachments"; + + // /offers + public static final String OFFERS_PATH_SEGMENT = "offers"; + public static final String OFFERS_ROOT = ROOT + OFFERS_PATH_SEGMENT + "/"; + + public static final String ADDRESS_PATH_SEGMENT = "addresses"; + public static final String PARTITIONS_PATH_SEGMENT = "partitions"; + public static final String DATABASE_ACCOUNT_PATH_SEGMENT = "databaseaccount"; + public static final String TOPOLOGY_PATH_SEGMENT = "topology"; + public static final String MEDIA_PATH_SEGMENT = "media"; + public static final String MEDIA_ROOT = ROOT + MEDIA_PATH_SEGMENT; + public static final String SCHEMAS_PATH_SEGMENT = "schemas"; + public static final String PARTITION_KEY_RANGES_PATH_SEGMENT = "pkranges"; + + public static final String USER_DEFINED_TYPES_PATH_SEGMENT = "udts"; + + public static final String RID_RANGE_PATH_SEGMENT = "ridranges"; + } + + public static class KeyValuePair implements Map.Entry + { + private K key; + private V value; + + public KeyValuePair(K key, V value) + { + this.key = key; + this.value = value; + } + + public K getKey() + { + return this.key; + } + + public V getValue() + { + return this.value; + } + + public K setKey(K key) + { + return this.key = key; + } + + public V setValue(V value) + { + return this.value = value; + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/ChangeFeedObserverContextImpl.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/ChangeFeedObserverContextImpl.java new file mode 100644 index 0000000000000..4454a1a493d54 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/ChangeFeedObserverContextImpl.java @@ -0,0 +1,86 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + + +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserverContext; +import com.azure.data.cosmos.internal.changefeed.PartitionCheckpointer; +import reactor.core.publisher.Mono; + +/** + * Implementation for ChangeFeedObserverContext. + */ +class ChangeFeedObserverContextImpl implements ChangeFeedObserverContext { + private final PartitionCheckpointer checkpointer; + private final String partitionKeyRangeId; + private final FeedResponse feedResponse; + private String responseContinuation; + + public ChangeFeedObserverContextImpl(String leaseToken) { + this.partitionKeyRangeId = leaseToken; + this.checkpointer = null; + this.feedResponse = null; + } + + public ChangeFeedObserverContextImpl(String leaseToken, FeedResponse feedResponse, PartitionCheckpointer checkpointer) + { + this.partitionKeyRangeId = leaseToken; + this.feedResponse = feedResponse; + this.checkpointer = checkpointer; + } + + /** + * Checkpoints progress of a stream. This method is valid only if manual checkpoint was configured. + *

+ * Client may accept multiple change feed batches to process in parallel. + * Once first N document processing was finished the client can call checkpoint on the last completed batches in the row. + * In case of automatic checkpointing this is method throws. + * + * @return a deferred computation of this call. + */ + @Override + public Mono checkpoint() { + this.responseContinuation = this.feedResponse.continuationToken(); + + return this.checkpointer.checkpointPartition(this.responseContinuation); + } + + /** + * @return the id of the partition for the current event. + */ + @Override + public String getPartitionKeyRangeId() { + return this.partitionKeyRangeId; + } + + /** + * @return the response from the underlying call. + */ + @Override + public FeedResponse getFeedResponse() { + return this.feedResponse; + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/ChangeFeedObserverFactoryImpl.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/ChangeFeedObserverFactoryImpl.java new file mode 100644 index 0000000000000..6c218c4caed60 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/ChangeFeedObserverFactoryImpl.java @@ -0,0 +1,47 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserver; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserverFactory; +import com.azure.data.cosmos.internal.changefeed.exceptions.ObserverException; + +/** + * DEFAULT implementation for {@link ChangeFeedObserverFactory}. + */ +public class ChangeFeedObserverFactoryImpl implements ChangeFeedObserverFactory { + private final Class observerType; + + public ChangeFeedObserverFactoryImpl(Class observerType) { + this.observerType = observerType; + } + + @Override + public ChangeFeedObserver createObserver() { + try { + return (ChangeFeedObserver) observerType.newInstance(); + } catch (IllegalAccessException | InstantiationException ex) { + throw new ObserverException(ex); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/ChangeFeedProcessorBuilderImpl.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/ChangeFeedProcessorBuilderImpl.java new file mode 100644 index 0000000000000..3d979e59c64bc --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/ChangeFeedProcessorBuilderImpl.java @@ -0,0 +1,471 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.ChangeFeedProcessor; +import com.azure.data.cosmos.ChangeFeedProcessorOptions; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.internal.changefeed.Bootstrapper; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedContextClient; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserver; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserverFactory; +import com.azure.data.cosmos.internal.changefeed.CheckpointFrequency; +import com.azure.data.cosmos.internal.changefeed.HealthMonitor; +import com.azure.data.cosmos.internal.changefeed.LeaseStoreManager; +import com.azure.data.cosmos.internal.changefeed.PartitionController; +import com.azure.data.cosmos.internal.changefeed.PartitionLoadBalancer; +import com.azure.data.cosmos.internal.changefeed.PartitionLoadBalancingStrategy; +import com.azure.data.cosmos.internal.changefeed.PartitionManager; +import com.azure.data.cosmos.internal.changefeed.PartitionProcessor; +import com.azure.data.cosmos.internal.changefeed.PartitionProcessorFactory; +import com.azure.data.cosmos.internal.changefeed.PartitionSupervisorFactory; +import com.azure.data.cosmos.internal.changefeed.RequestOptionsFactory; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Schedulers; + +import java.net.URI; +import java.time.Duration; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.function.Consumer; + +/** + * Helper class to build {@link ChangeFeedProcessor} instances + * as logical representation of the Azure Cosmos DB database service. + * + *

+ * {@code
+ *  ChangeFeedProcessor.Builder()
+ *     .hostName(hostName)
+ *     .feedContainer(feedContainer)
+ *     .leaseContainer(leaseContainer)
+ *     .handleChanges(docs -> {
+ *         // Implementation for handling and processing CosmosItemProperties list goes here
+ *      })
+ *     .observer(SampleObserverImpl.class)
+ *     .build();
+ * }
+ * 
+ */ +public class ChangeFeedProcessorBuilderImpl implements ChangeFeedProcessor.BuilderDefinition, ChangeFeedProcessor, AutoCloseable { + private static final long DefaultUnhealthinessDuration = Duration.ofMinutes(15).toMillis(); + private final Duration sleepTime = Duration.ofSeconds(15); + private final Duration lockTime = Duration.ofSeconds(30); + private static final int DefaultQueryPartitionsMaxBatchSize = 100; + + private int queryPartitionsMaxBatchSize = DefaultQueryPartitionsMaxBatchSize; + private int degreeOfParallelism = 25; // default + + + private String hostName; + private ChangeFeedContextClient feedContextClient; + private ChangeFeedProcessorOptions changeFeedProcessorOptions; + private ChangeFeedObserverFactory observerFactory; + private String databaseResourceId; + private String collectionResourceId; + private ChangeFeedContextClient leaseContextClient; + private PartitionLoadBalancingStrategy loadBalancingStrategy; + private PartitionProcessorFactory partitionProcessorFactory; + private LeaseStoreManager leaseStoreManager; + private HealthMonitor healthMonitor; + private PartitionManager partitionManager; + + private ExecutorService executorService; + + /** + * Start listening for changes asynchronously. + * + * @return a representation of the deferred computation of this call. + */ + @Override + public Mono start() { + return partitionManager.start(); + } + + /** + * Stops listening for changes asynchronously. + * + * @return a representation of the deferred computation of this call. + */ + @Override + public Mono stop() { + return partitionManager.stop(); + } + + /** + * Sets the host name. + * + * @param hostName the name to be used for the host. When using multiple hosts, each host must have a unique name. + * @return current Builder. + */ + @Override + public ChangeFeedProcessorBuilderImpl hostName(String hostName) { + this.hostName = hostName; + return this; + } + + /** + * Sets and existing {@link CosmosContainer} to be used to read from the monitored collection. + * + * @param feedDocumentClient the instance of {@link CosmosContainer} to be used. + * @return current Builder. + */ + @Override + public ChangeFeedProcessorBuilderImpl feedContainer(CosmosContainer feedDocumentClient) { + if (feedDocumentClient == null) { + throw new IllegalArgumentException("feedContextClient"); + } + + this.feedContextClient = new ChangeFeedContextClientImpl(feedDocumentClient); + return this; + } + + /** + * Sets the {@link ChangeFeedProcessorOptions} to be used. + * + * @param changeFeedProcessorOptions the change feed processor options to use. + * @return current Builder. + */ + @Override + public ChangeFeedProcessorBuilderImpl options(ChangeFeedProcessorOptions changeFeedProcessorOptions) { + if (changeFeedProcessorOptions == null) { + throw new IllegalArgumentException("changeFeedProcessorOptions"); + } + + this.changeFeedProcessorOptions = changeFeedProcessorOptions; + + return this; + } + + /** + * Sets the {@link ChangeFeedObserverFactory} to be used to generate {@link ChangeFeedObserver} + * + * @param observerFactory The instance of {@link ChangeFeedObserverFactory} to use. + * @return current Builder. + */ + public ChangeFeedProcessorBuilderImpl observerFactory(ChangeFeedObserverFactory observerFactory) { + if (observerFactory == null) { + throw new IllegalArgumentException("observerFactory"); + } + + this.observerFactory = observerFactory; + return this; + } + + /** + * Sets an existing {@link ChangeFeedObserver} type to be used by a {@link ChangeFeedObserverFactory} to process changes. + * @param type the type of {@link ChangeFeedObserver} to be used. + * @return current Builder. + */ + public ChangeFeedProcessorBuilderImpl observer(Class type) { + if (type == null) { + throw new IllegalArgumentException("type"); + } + + this.observerFactory = new ChangeFeedObserverFactoryImpl(type); + + return this; + } + + @Override + public ChangeFeedProcessorBuilderImpl handleChanges(Consumer> consumer) { + return this.observerFactory(new DefaultObserverFactory(consumer)); + } + + /** + * Sets the database resource ID of the monitored collection. + * + * @param databaseResourceId the database resource ID of the monitored collection. + * @return current Builder. + */ + public ChangeFeedProcessorBuilderImpl withDatabaseResourceId(String databaseResourceId) { + this.databaseResourceId = databaseResourceId; + return this; + } + + /** + * Sets the collection resource ID of the monitored collection. + * @param collectionResourceId the collection resource ID of the monitored collection. + * @return current Builder. + */ + public ChangeFeedProcessorBuilderImpl withCollectionResourceId(String collectionResourceId) { + this.collectionResourceId = collectionResourceId; + return this; + } + + /** + * Sets an existing {@link CosmosContainer} to be used to read from the leases collection. + * + * @param leaseDocumentClient the instance of {@link CosmosContainer} to use. + * @return current Builder. + */ + @Override + public ChangeFeedProcessorBuilderImpl leaseContainer(CosmosContainer leaseDocumentClient) { + if (leaseDocumentClient == null) { + throw new IllegalArgumentException("leaseContextClient"); + } + + this.leaseContextClient = new ChangeFeedContextClientImpl(leaseDocumentClient); + return this; + } + + /** + * Sets the {@link PartitionLoadBalancingStrategy} to be used for partition load balancing. + * + * @param loadBalancingStrategy the {@link PartitionLoadBalancingStrategy} to be used for partition load balancing. + * @return current Builder. + */ + public ChangeFeedProcessorBuilderImpl withPartitionLoadBalancingStrategy(PartitionLoadBalancingStrategy loadBalancingStrategy) { + if (loadBalancingStrategy == null) { + throw new IllegalArgumentException("loadBalancingStrategy"); + } + + this.loadBalancingStrategy = loadBalancingStrategy; + return this; + } + + /** + * Sets the {@link PartitionProcessorFactory} to be used to create {@link PartitionProcessor} for partition processing. + * + * @param partitionProcessorFactory the instance of {@link PartitionProcessorFactory} to use. + * @return current Builder. + */ + public ChangeFeedProcessorBuilderImpl withPartitionProcessorFactory(PartitionProcessorFactory partitionProcessorFactory) { + if (partitionProcessorFactory == null) { + throw new IllegalArgumentException("partitionProcessorFactory"); + } + + this.partitionProcessorFactory = partitionProcessorFactory; + return this; + } + + /** + * Sets the {@link LeaseStoreManager} to be used to manage leases. + * + * @param leaseStoreManager the instance of {@link LeaseStoreManager} to use. + * @return current Builder. + */ + public ChangeFeedProcessorBuilderImpl withLeaseStoreManager(LeaseStoreManager leaseStoreManager) { + if (leaseStoreManager == null) { + throw new IllegalArgumentException("leaseStoreManager"); + } + + this.leaseStoreManager = leaseStoreManager; + return this; + } + + /** + * Sets the {@link HealthMonitor} to be used to monitor unhealthiness situation. + * + * @param healthMonitor The instance of {@link HealthMonitor} to use. + * @return current Builder. + */ + public ChangeFeedProcessorBuilderImpl withHealthMonitor(HealthMonitor healthMonitor) { + if (healthMonitor == null) { + throw new IllegalArgumentException("healthMonitor"); + } + + this.healthMonitor = healthMonitor; + return this; + } + + /** + * Builds a new instance of the {@link ChangeFeedProcessor} with the specified configuration asynchronously. + * + * @return an instance of {@link ChangeFeedProcessor}. + */ + @Override + public ChangeFeedProcessor build() { + ChangeFeedProcessorBuilderImpl self = this; + + if (this.hostName == null) + { + throw new IllegalArgumentException("Host name was not specified"); + } + + if (this.observerFactory == null) + { + throw new IllegalArgumentException("Observer was not specified"); + } + + if (this.executorService == null) { + this.executorService = Executors.newCachedThreadPool(); + } + + // TBD: Move this initialization code as part of the start() call. + return this.initializeCollectionPropertiesForBuild() + .then(self.getLeaseStoreManager().flatMap(leaseStoreManager -> self.buildPartitionManager(leaseStoreManager))) + .map(partitionManager1 -> { + self.partitionManager = partitionManager1; + return self; + }).block(); + } + + public ChangeFeedProcessorBuilderImpl() { + this.queryPartitionsMaxBatchSize = DefaultQueryPartitionsMaxBatchSize; + this.degreeOfParallelism = 25; // default + } + + public ChangeFeedProcessorBuilderImpl(PartitionManager partitionManager) { + this.partitionManager = partitionManager; + } + + private Mono initializeCollectionPropertiesForBuild() { + ChangeFeedProcessorBuilderImpl self = this; + + if (this.changeFeedProcessorOptions == null) { + this.changeFeedProcessorOptions = new ChangeFeedProcessorOptions(); + } + + return this.feedContextClient + .readDatabase(this.feedContextClient.getDatabaseClient(), null) + .map( databaseResourceResponse -> { + self.databaseResourceId = databaseResourceResponse.database().id(); + return self.databaseResourceId; + }) + .flatMap( id -> self.feedContextClient + .readContainer(self.feedContextClient.getContainerClient(), null) + .map(documentCollectionResourceResponse -> { + self.collectionResourceId = documentCollectionResourceResponse.container().id(); + return self.collectionResourceId; + })) + .then(); + } + + private Mono getLeaseStoreManager() { + ChangeFeedProcessorBuilderImpl self = this; + + if (this.leaseStoreManager == null) { + + return this.leaseContextClient.readContainerSettings(this.leaseContextClient.getContainerClient(), null) + .flatMap( collectionSettings -> { + boolean isPartitioned = + collectionSettings.partitionKeyDefinition() != null && + collectionSettings.partitionKeyDefinition().paths() != null && + collectionSettings.partitionKeyDefinition().paths().size() > 0; + if (!isPartitioned || (collectionSettings.partitionKeyDefinition().paths().size() != 1 || !collectionSettings.partitionKeyDefinition().paths().get(0).equals("/id"))) { +// throw new IllegalArgumentException("The lease collection, if partitioned, must have partition key equal to id."); + return Mono.error(new IllegalArgumentException("The lease collection must have partition key equal to id.")); + } + + RequestOptionsFactory requestOptionsFactory = new PartitionedByIdCollectionRequestOptionsFactory(); + + String leasePrefix = self.getLeasePrefix(); + + return LeaseStoreManager.Builder() + .leasePrefix(leasePrefix) + .leaseCollectionLink(self.leaseContextClient.getContainerClient()) + .leaseContextClient(self.leaseContextClient) + .requestOptionsFactory(requestOptionsFactory) + .hostName(self.hostName) + .build() + .map(manager -> { + self.leaseStoreManager = manager; + return self.leaseStoreManager; + }); + }); + } + + return Mono.just(this.leaseStoreManager); + } + + private String getLeasePrefix() { + String optionsPrefix = this.changeFeedProcessorOptions.leasePrefix(); + + if (optionsPrefix == null) { + optionsPrefix = ""; + } + + URI uri = this.feedContextClient.getServiceEndpoint(); + + return String.format( + "%s%s_%s_%s", + optionsPrefix, + uri.getHost(), + this.databaseResourceId, + this.collectionResourceId); + } + + private Mono buildPartitionManager(LeaseStoreManager leaseStoreManager) { + ChangeFeedProcessorBuilderImpl self = this; + + CheckpointerObserverFactory factory = new CheckpointerObserverFactory(this.observerFactory, new CheckpointFrequency()); + + PartitionSynchronizerImpl synchronizer = new PartitionSynchronizerImpl( + this.feedContextClient, + this.feedContextClient.getContainerClient(), + leaseStoreManager, + leaseStoreManager, + this.degreeOfParallelism, + this.queryPartitionsMaxBatchSize + ); + + Bootstrapper bootstrapper = new BootstrapperImpl(synchronizer, leaseStoreManager, this.lockTime, this.sleepTime); + PartitionSupervisorFactory partitionSupervisorFactory = new PartitionSupervisorFactoryImpl( + factory, + leaseStoreManager, + this.partitionProcessorFactory != null ? this.partitionProcessorFactory : new PartitionProcessorFactoryImpl( + this.feedContextClient, + this.changeFeedProcessorOptions, + leaseStoreManager, + this.feedContextClient.getContainerClient()), + this.changeFeedProcessorOptions, + executorService + ); + + if (this.loadBalancingStrategy == null) { + this.loadBalancingStrategy = new EqualPartitionsBalancingStrategy( + this.hostName, + this.changeFeedProcessorOptions.minScaleCount(), + this.changeFeedProcessorOptions.maxScaleCount(), + this.changeFeedProcessorOptions.leaseExpirationInterval()); + } + + PartitionController partitionController = new PartitionControllerImpl(leaseStoreManager, leaseStoreManager, partitionSupervisorFactory, synchronizer, executorService); + + if (this.healthMonitor == null) { + this.healthMonitor = new TraceHealthMonitor(); + } + + PartitionController partitionController2 = new HealthMonitoringPartitionControllerDecorator(partitionController, this.healthMonitor); + + PartitionLoadBalancer partitionLoadBalancer = new PartitionLoadBalancerImpl( + partitionController2, + leaseStoreManager, + this.loadBalancingStrategy, + this.changeFeedProcessorOptions.leaseAcquireInterval(), + this.executorService + ); + + PartitionManager partitionManager = new PartitionManagerImpl(bootstrapper, partitionController, partitionLoadBalancer); + + return Mono.just(partitionManager); + } + + @Override + public void close() { + this.stop().subscribeOn(Schedulers.elastic()).subscribe(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/CheckpointerObserverFactory.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/CheckpointerObserverFactory.java new file mode 100644 index 0000000000000..8d982b76afbf0 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/CheckpointerObserverFactory.java @@ -0,0 +1,61 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserver; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserverFactory; +import com.azure.data.cosmos.internal.changefeed.CheckpointFrequency; + +/** + * Factory class used to create instance(s) of {@link ChangeFeedObserver}. + */ +class CheckpointerObserverFactory implements ChangeFeedObserverFactory { + private final ChangeFeedObserverFactory observerFactory; + private final CheckpointFrequency checkpointFrequency; + + /** + * Initializes a new instance of the {@link CheckpointerObserverFactory} class. + * + * @param observerFactory the instance of observer factory. + * @param checkpointFrequency the the frequency of lease event. + */ + public CheckpointerObserverFactory(ChangeFeedObserverFactory observerFactory, CheckpointFrequency checkpointFrequency) + { + if (observerFactory == null) throw new IllegalArgumentException("observerFactory"); + if (checkpointFrequency == null) throw new IllegalArgumentException("checkpointFrequency"); + + this.observerFactory = observerFactory; + this.checkpointFrequency = checkpointFrequency; + } + + /** + * @return a new instance of {@link ChangeFeedObserver}. + */ + @Override + public ChangeFeedObserver createObserver() { + ChangeFeedObserver observer = new ObserverExceptionWrappingChangeFeedObserverDecorator(this.observerFactory.createObserver()); + if (this.checkpointFrequency.isExplicitCheckpoint()) return observer; + + return new AutoCheckpointer(this.checkpointFrequency, observer); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/Constants.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/Constants.java new file mode 100644 index 0000000000000..8c1ba053b79f2 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/Constants.java @@ -0,0 +1,216 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + + +/** + * Used internally. Constants in the Azure Cosmos DB database service Java SDK. + */ +final class Constants { + + public static final class Quota { + // Quota Strings + public static final String DATABASE = "databases"; + public static final String COLLECTION = "collections"; + public static final String USER = "users"; + public static final String PERMISSION = "permissions"; + public static final String COLLECTION_SIZE = "collectionSize"; + public static final String DOCUMENTS_SIZE = "documentsSize"; + public static final String STORED_PROCEDURE = "storedProcedures"; + public static final String TRIGGER = "triggers"; + public static final String USER_DEFINED_FUNCTION = "functions"; + public static final String DELIMITER_CHARS = "=|;"; + public static final String DOCUMENTS_COUNT = "documentsCount"; + } + + public static final class Properties { + public static final String ID = "id"; + public static final String R_ID = "_rid"; + public static final String SELF_LINK = "_self"; + public static final String LAST_MODIFIED = "_ts"; + public static final String COUNT = "_count"; + public static final String E_TAG = "_etag"; + public static final String AGGREGATE = "_aggregate"; + + public static final String CONSISTENCY_POLICY = "consistencyPolicy"; + public static final String DEFAULT_CONSISTENCY_LEVEL = "defaultConsistencyLevel"; + public static final String MAX_STALENESS_PREFIX = "maxStalenessPrefix"; + public static final String MAX_STALENESS_INTERVAL_IN_SECONDS = "maxIntervalInSeconds"; + public static final String PARENTS = "parents"; + + public static final String DATABASES_LINK = "_dbs"; + public static final String COLLECTIONS_LINK = "_colls"; + public static final String USERS_LINK = "_users"; + public static final String PERMISSIONS_LINK = "_permissions"; + public static final String ATTACHMENTS_LINK = "_attachments"; + public static final String STORED_PROCEDURES_LINK = "_sprocs"; + public static final String TRIGGERS_LINK = "_triggers"; + public static final String USER_DEFINED_FUNCTIONS_LINK = "_udfs"; + public static final String CONFLICTS_LINK = "_conflicts"; + public static final String DOCUMENTS_LINK = "_docs"; + public static final String RESOURCE_LINK = "resource"; + public static final String MEDIA_LINK = "media"; + + public static final String PERMISSION_MODE = "permissionMode"; + public static final String RESOURCE_KEY = "key"; + public static final String TOKEN = "_token"; + public static final String SQL_API_TYPE = "0x10"; + + // Scripting + public static final String BODY = "body"; + public static final String TRIGGER_TYPE = "triggerType"; + public static final String TRIGGER_OPERATION = "triggerOperation"; + + public static final String MAX_SIZE = "maxSize"; + public static final String CURRENT_USAGE = "currentUsage"; + + public static final String CONTENT = "content"; + + public static final String CONTENT_TYPE = "contentType"; + + // ErrorResource. + public static final String CODE = "code"; + public static final String MESSAGE = "message"; + public static final String ERROR_DETAILS = "errorDetails"; + public static final String ADDITIONAL_ERROR_INFO = "additionalErrorInfo"; + + // PartitionInfo. + public static final String RESOURCE_TYPE = "resourceType"; + public static final String SERVICE_INDEX = "serviceIndex"; + public static final String PARTITION_INDEX = "partitionIndex"; + + public static final String ADDRESS_LINK = "addresses"; + public static final String USER_REPLICATION_POLICY = "userReplicationPolicy"; + public static final String USER_CONSISTENCY_POLICY = "userConsistencyPolicy"; + public static final String SYSTEM_REPLICATION_POLICY = "systemReplicationPolicy"; + public static final String READ_POLICY = "readPolicy"; + public static final String QUERY_ENGINE_CONFIGURATION = "queryEngineConfiguration"; + + //ReplicationPolicy + public static final String REPLICATION_POLICY = "replicationPolicy"; + public static final String ASYNC_REPLICATION = "asyncReplication"; + public static final String MAX_REPLICA_SET_SIZE = "maxReplicasetSize"; + public static final String MIN_REPLICA_SET_SIZE = "minReplicaSetSize"; + + //Indexing Policy. + public static final String INDEXING_POLICY = "indexingPolicy"; + public static final String AUTOMATIC = "automatic"; + public static final String STRING_PRECISION = "StringPrecision"; + public static final String NUMERIC_PRECISION = "NumericPrecision"; + public static final String MAX_PATH_DEPTH = "maxPathDepth"; + public static final String INDEXING_MODE = "indexingMode"; + public static final String INDEX_TYPE = "IndexType"; + public static final String INDEX_KIND = "kind"; + public static final String DATA_TYPE = "dataType"; + public static final String PRECISION = "precision"; + + public static final String PATHS = "paths"; + public static final String PATH = "path"; + public static final String INCLUDED_PATHS = "includedPaths"; + public static final String EXCLUDED_PATHS = "excludedPaths"; + public static final String INDEXES = "indexes"; + public static final String COMPOSITE_INDEXES = "compositeIndexes"; + public static final String ORDER = "order"; + public static final String SPATIAL_INDEXES = "spatialIndexes"; + public static final String TYPES = "types"; + + // Unique index. + public static final String UNIQUE_KEY_POLICY = "uniqueKeyPolicy"; + public static final String UNIQUE_KEYS = "uniqueKeys"; + + // Conflict. + public static final String CONFLICT = "conflict"; + public static final String OPERATION_TYPE = "operationType"; + public static final String SOURCE_RESOURCE_ID = "resourceId"; + + // Offer resource + public static final String OFFER_TYPE = "offerType"; + public static final String OFFER_VERSION = "offerVersion"; + public static final String OFFER_CONTENT = "content"; + public static final String OFFER_THROUGHPUT = "offerThroughput"; + public static final String OFFER_VERSION_V1 = "V1"; + public static final String OFFER_VERSION_V2 = "V2"; + public static final String OFFER_RESOURCE_ID = "offerResourceId"; + + // PartitionKey + public static final String PARTITION_KEY = "partitionKey"; + public static final String PARTITION_KEY_PATHS = "paths"; + public static final String PARTITION_KIND = "kind"; + public static final String PARTITION_KEY_DEFINITION_VERSION = "version"; + + public static final String RESOURCE_PARTITION_KEY = "resourcePartitionKey"; + public static final String PARTITION_KEY_RANGE_ID = "partitionKeyRangeId"; + public static final String MIN_INCLUSIVE_EFFECTIVE_PARTITION_KEY = "minInclusiveEffectivePartitionKey"; + public static final String MAX_EXCLUSIVE_EFFECTIVE_PARTITION_KEY = "maxExclusiveEffectivePartitionKey"; + + // AddressResource + public static final String IS_PRIMARY = "isPrimary"; + public static final String PROTOCOL = "protocol"; + public static final String LOGICAL_URI = "logicalUri"; + public static final String PHYISCAL_URI = "physcialUri"; + + // Time-to-Live + public static final String TTL = "ttl"; + public static final String DEFAULT_TTL = "defaultTtl"; + + // Global DB account item + public static final String Name = "name"; + public static final String WRITABLE_LOCATIONS = "writableLocations"; + public static final String READABLE_LOCATIONS = "readableLocations"; + public static final String DATABASE_ACCOUNT_ENDPOINT = "databaseAccountEndpoint"; + + //Authorization + public static final String MASTER_TOKEN = "master"; + public static final String RESOURCE_TOKEN = "resource"; + public static final String TOKEN_VERSION = "1.0"; + public static final String AUTH_SCHEMA_TYPE = "type"; + public static final String AUTH_VERSION = "ver"; + public static final String AUTH_SIGNATURE = "sig"; + public static final String READ_PERMISSION_MODE = "read"; + public static final String ALL_PERMISSION_MODE = "all"; + public static final String PATH_SEPARATOR = "/"; + + public static final int DEFAULT_MAX_PAGE_SIZE = 100; + public static final String ENABLE_MULTIPLE_WRITE_LOCATIONS = "enableMultipleWriteLocations"; + + // Conflict resolution policy + public static final String CONFLICT_RESOLUTION_POLICY = "conflictResolutionPolicy"; + public static final String MODE = "mode"; + public static final String CONFLICT_RESOLUTION_PATH = "conflictResolutionPath"; + public static final String CONFLICT_RESOLUTION_PROCEDURE = "conflictResolutionProcedure"; + + //Handler names for RXNetty httpClient + public static final String SSL_HANDLER_NAME = "ssl-handler"; + public static final String SSL_COMPLETION_HANDLER_NAME = "ssl-completion-handler"; + public static final String HTTP_PROXY_HANDLER_NAME = "http-proxy-handler"; + public static final String LOGGING_HANDLER_NAME = "logging-handler"; + } + + public static final class PartitionedQueryExecutionInfo { + public static final int VERSION_1 = 1; + } + + public static final class QueryExecutionContext { + public static final String INCREMENTAL_FEED_HEADER_VALUE = "Incremental feed"; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/DefaultObserver.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/DefaultObserver.java new file mode 100644 index 0000000000000..7b2f609a46279 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/DefaultObserver.java @@ -0,0 +1,59 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserver; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserverCloseReason; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserverContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.function.Consumer; + +class DefaultObserver implements ChangeFeedObserver { + private final Logger log = LoggerFactory.getLogger(DefaultObserver.class); + private Consumer> consumer; + + public DefaultObserver(Consumer> consumer) { + this.consumer = consumer; + } + + @Override + public void open(ChangeFeedObserverContext context) { + log.info("Open processing from thread {}", Thread.currentThread().getId()); + } + + @Override + public void close(ChangeFeedObserverContext context, ChangeFeedObserverCloseReason reason) { + log.info("Close processing from thread {}", Thread.currentThread().getId()); + } + + @Override + public void processChanges(ChangeFeedObserverContext context, List docs) { + log.info("Start processing from thread {}", Thread.currentThread().getId()); + consumer.accept(docs); + log.info("Done processing from thread {}", Thread.currentThread().getId()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/DefaultObserverFactory.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/DefaultObserverFactory.java new file mode 100644 index 0000000000000..73b70cf4fd0d9 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/DefaultObserverFactory.java @@ -0,0 +1,47 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserver; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserverFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.function.Consumer; + +class DefaultObserverFactory implements ChangeFeedObserverFactory { + private final Logger log = LoggerFactory.getLogger(DefaultObserverFactory.class); + + private Consumer> consumer; + + public DefaultObserverFactory(Consumer> consumer) { + this.consumer = consumer; + } + + @Override + public ChangeFeedObserver createObserver() { + return new DefaultObserver(consumer); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/DocumentServiceLeaseStore.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/DocumentServiceLeaseStore.java new file mode 100644 index 0000000000000..6ba222f2311e0 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/DocumentServiceLeaseStore.java @@ -0,0 +1,187 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.AccessCondition; +import com.azure.data.cosmos.AccessConditionType; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosItem; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.CosmosItemRequestOptions; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedContextClient; +import com.azure.data.cosmos.internal.changefeed.LeaseStore; +import com.azure.data.cosmos.internal.changefeed.RequestOptionsFactory; +import com.azure.data.cosmos.internal.changefeed.ServiceItemLease; +import reactor.core.publisher.Mono; + +import java.time.Duration; + +/** + * Implementation for LeaseStore. + */ +class DocumentServiceLeaseStore implements LeaseStore { + private ChangeFeedContextClient client; + private String containerNamePrefix; + private CosmosContainer leaseCollectionLink; + private RequestOptionsFactory requestOptionsFactory; + private String lockETag; + + // TODO: rename to LeaseStoreImpl + public DocumentServiceLeaseStore( + ChangeFeedContextClient client, + String containerNamePrefix, + CosmosContainer leaseCollectionLink, + RequestOptionsFactory requestOptionsFactory) { + + this.client = client; + this.containerNamePrefix = containerNamePrefix; + this.leaseCollectionLink = leaseCollectionLink; + this.requestOptionsFactory = requestOptionsFactory; + } + + @Override + public Mono isInitialized() { + String markerDocId = this.getStoreMarkerName(); + + CosmosItemProperties doc = new CosmosItemProperties(); + doc.id(markerDocId); + + CosmosItemRequestOptions requestOptions = this.requestOptionsFactory.createRequestOptions( + ServiceItemLease.fromDocument(doc)); + + CosmosItem docItem = this.client.getContainerClient().getItem(markerDocId, "/id"); + return this.client.readItem(docItem, requestOptions) + .flatMap(documentResourceResponse -> Mono.just(documentResourceResponse.item() != null)) + .onErrorResume(throwable -> { + if (throwable instanceof CosmosClientException) { + CosmosClientException e = (CosmosClientException) throwable; + if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { + return Mono.just(false); + } + } + return Mono.error(throwable); + }); + } + + @Override + public Mono markInitialized() { + String markerDocId = this.getStoreMarkerName(); + CosmosItemProperties containerDocument = new CosmosItemProperties(); + containerDocument.id(markerDocId); + + return this.client.createItem(this.leaseCollectionLink, containerDocument, null, false) + .map( item -> true) + .onErrorResume(throwable -> { + if (throwable instanceof CosmosClientException) { + CosmosClientException e = (CosmosClientException) throwable; + if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_CONFLICT) { + return Mono.just(true); + } + } + return Mono.just(false); + }); + } + + @Override + public Mono acquireInitializationLock(Duration lockExpirationTime) { + String lockId = this.getStoreLockName(); + CosmosItemProperties containerDocument = new CosmosItemProperties(); + containerDocument.id(lockId); + BridgeInternal.setProperty(containerDocument, com.azure.data.cosmos.internal.Constants.Properties.TTL, Long.valueOf(lockExpirationTime.getSeconds()).intValue()); + + DocumentServiceLeaseStore self = this; + + return this.client.createItem(this.leaseCollectionLink, containerDocument, null, false) + .map(documentResourceResponse -> { + if (documentResourceResponse.item() != null) { + self.lockETag = documentResourceResponse.properties().etag(); + return true; + } else { + return false; + } + }) + .onErrorResume(throwable -> { + if (throwable instanceof CosmosClientException) { + CosmosClientException e = (CosmosClientException) throwable; + if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_CONFLICT) { + return Mono.just(false); + } + } + return Mono.error(throwable); + }); + } + + @Override + public Mono releaseInitializationLock() { + String lockId = this.getStoreLockName(); + CosmosItemProperties doc = new CosmosItemProperties(); + doc.id(lockId); + + CosmosItemRequestOptions requestOptions = this.requestOptionsFactory.createRequestOptions( + ServiceItemLease.fromDocument(doc)); + + if (requestOptions == null) { + requestOptions = new CosmosItemRequestOptions(); + } + + AccessCondition accessCondition = new AccessCondition(); + accessCondition.type(AccessConditionType.IF_MATCH); + accessCondition.condition(this.lockETag); + requestOptions.accessCondition(accessCondition); + DocumentServiceLeaseStore self = this; + + CosmosItem docItem = this.client.getContainerClient().getItem(lockId, "/id"); + return this.client.deleteItem(docItem, requestOptions) + .map(documentResourceResponse -> { + if (documentResourceResponse.item() != null) { + self.lockETag = null; + return true; + } else { + return false; + } + }) + .onErrorResume(throwable -> { + if (throwable instanceof CosmosClientException) { + CosmosClientException e = (CosmosClientException) throwable; + if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_CONFLICT) { + return Mono.just(false); + } + } + + return Mono.error(throwable); + }); + } + + private String getStoreMarkerName() + { + return this.containerNamePrefix + ".info"; + } + + private String getStoreLockName() + { + return this.containerNamePrefix + ".lock"; + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/DocumentServiceLeaseUpdaterImpl.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/DocumentServiceLeaseUpdaterImpl.java new file mode 100644 index 0000000000000..eee04010a4c19 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/DocumentServiceLeaseUpdaterImpl.java @@ -0,0 +1,204 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.AccessCondition; +import com.azure.data.cosmos.AccessConditionType; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosItem; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.CosmosItemRequestOptions; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedContextClient; +import com.azure.data.cosmos.internal.changefeed.Lease; +import com.azure.data.cosmos.internal.changefeed.ServiceItemLease; +import com.azure.data.cosmos.internal.changefeed.ServiceItemLeaseUpdater; +import com.azure.data.cosmos.internal.changefeed.exceptions.LeaseLostException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.Exceptions; +import reactor.core.publisher.Mono; + +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.util.function.Function; + +import static com.azure.data.cosmos.internal.changefeed.implementation.ChangeFeedHelper.HTTP_STATUS_CODE_CONFLICT; +import static com.azure.data.cosmos.internal.changefeed.implementation.ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND; +import static com.azure.data.cosmos.internal.changefeed.implementation.ChangeFeedHelper.HTTP_STATUS_CODE_PRECONDITION_FAILED; + +/** + * Implementation for service lease updater interface. + */ +class DocumentServiceLeaseUpdaterImpl implements ServiceItemLeaseUpdater { + private final Logger logger = LoggerFactory.getLogger(DocumentServiceLeaseUpdaterImpl.class); + private final int RETRY_COUNT_ON_CONFLICT = 5; + private final ChangeFeedContextClient client; + + public DocumentServiceLeaseUpdaterImpl(ChangeFeedContextClient client) { + if (client == null) { + throw new IllegalArgumentException("client"); + } + + this.client = client; + } + + @Override + public Mono updateLease(Lease cachedLease, CosmosItem itemLink, CosmosItemRequestOptions requestOptions, Function updateLease) { + DocumentServiceLeaseUpdaterImpl self = this; + Lease arrayLease[] = {cachedLease}; + arrayLease[0] = updateLease.apply(cachedLease); + + if (arrayLease[0] == null) { + return Mono.empty(); + } + + arrayLease[0].setTimestamp(ZonedDateTime.now(ZoneId.of("UTC"))); + + return self.tryReplaceLease(arrayLease[0], itemLink) + .map(leaseDocument -> { + arrayLease[0] = ServiceItemLease.fromDocument(leaseDocument); + return arrayLease[0]; + }) + .hasElement() + .flatMap(hasItems -> { + if (hasItems) { + return Mono.just(arrayLease[0]); + } + // Partition lease update conflict. Reading the current version of lease. + return this.client.readItem(itemLink, requestOptions) + .onErrorResume(throwable -> { + if (throwable instanceof CosmosClientException) { + CosmosClientException ex = (CosmosClientException) throwable; + if (ex.statusCode() == HTTP_STATUS_CODE_NOT_FOUND) { + // Partition lease no longer exists + throw Exceptions.propagate(new LeaseLostException(arrayLease[0])); + } + } + return Mono.error(throwable); + }) + .map(cosmosItemResponse -> { + CosmosItemProperties document = cosmosItemResponse.properties(); + ServiceItemLease serverLease = ServiceItemLease.fromDocument(document); + logger.info( + "Partition {} update failed because the lease with token '{}' was updated by host '{}' with token '{}'.", + arrayLease[0].getLeaseToken(), + arrayLease[0].getConcurrencyToken(), + serverLease.getOwner(), + serverLease.getConcurrencyToken()); + arrayLease[0] = serverLease; + + throw Exceptions.propagate(new RuntimeException("")); + }); + }) + .retry(RETRY_COUNT_ON_CONFLICT, throwable -> { + if (throwable instanceof RuntimeException) { + return throwable instanceof LeaseLostException; + } + return false; + }); + +// Lease lease = cachedLease; +// +// for (int retryCount = RETRY_COUNT_ON_CONFLICT; retryCount > 0; retryCount--) { +// lease = updateLease.apply(lease); +// +// if (lease == null) { +// return Mono.empty(); +// } +// +// lease.setTimestamp(ZonedDateTime.now(ZoneId.of("UTC"))); +// CosmosItemProperties leaseDocument = this.tryReplaceLease(lease, itemLink).block(); +// +// if (leaseDocument != null) { +// return Mono.just(ServiceItemLease.fromDocument(leaseDocument)); +// } +// +// // Partition lease update conflict. Reading the current version of lease. +// CosmosItemProperties document = null; +// try { +// CosmosItemResponse response = this.client.readItem(itemLink, requestOptions) +// .block(); +// document = response.properties(); +// } catch (RuntimeException re) { +// if (re.getCause() instanceof CosmosClientException) { +// CosmosClientException ex = (CosmosClientException) re.getCause(); +// if (ex.statusCode() == HTTP_STATUS_CODE_NOT_FOUND) { +// // Partition lease no longer exists +// throw new LeaseLostException(lease); +// } +// } +// throw re; +// } +// +// ServiceItemLease serverLease = ServiceItemLease.fromDocument(document); +// logger.info( +// "Partition {} update failed because the lease with token '{}' was updated by host '{}' with token '{}'. Will retry, {} retry(s) left.", +// lease.getLeaseToken(), +// lease.getConcurrencyToken(), +// serverLease.getOwner(), +// serverLease.getConcurrencyToken(), +// retryCount); +// +// lease = serverLease; +// } +// +// throw new LeaseLostException(lease); + } + + private Mono tryReplaceLease(Lease lease, CosmosItem itemLink) throws LeaseLostException { + DocumentServiceLeaseUpdaterImpl self = this; + return this.client.replaceItem(itemLink, lease, this.getCreateIfMatchOptions(lease)) + .map(cosmosItemResponse -> cosmosItemResponse.properties()) + .onErrorResume(re -> { + if (re instanceof CosmosClientException) { + CosmosClientException ex = (CosmosClientException) re; + switch (ex.statusCode()) { + case HTTP_STATUS_CODE_PRECONDITION_FAILED: { + return Mono.empty(); + } + case HTTP_STATUS_CODE_CONFLICT: { + throw Exceptions.propagate( new LeaseLostException(lease, ex, false)); + } + case HTTP_STATUS_CODE_NOT_FOUND: { + throw Exceptions.propagate( new LeaseLostException(lease, ex, true)); + } + default: { + return Mono.error(re); + } + } + } + return Mono.error(re); + }); + } + + private CosmosItemRequestOptions getCreateIfMatchOptions(Lease lease) { + AccessCondition ifMatchCondition = new AccessCondition(); + ifMatchCondition.type(AccessConditionType.IF_MATCH); + ifMatchCondition.condition(lease.getConcurrencyToken()); + + CosmosItemRequestOptions createIfMatchOptions = new CosmosItemRequestOptions(); + createIfMatchOptions.accessCondition(ifMatchCondition); + + return createIfMatchOptions; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/EqualPartitionsBalancingStrategy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/EqualPartitionsBalancingStrategy.java new file mode 100644 index 0000000000000..730e161ac1c18 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/EqualPartitionsBalancingStrategy.java @@ -0,0 +1,189 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.internal.changefeed.Lease; +import com.azure.data.cosmos.internal.changefeed.PartitionLoadBalancingStrategy; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.time.Duration; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Implementation for {@link PartitionLoadBalancingStrategy}. + */ +class EqualPartitionsBalancingStrategy implements PartitionLoadBalancingStrategy { + private final Logger logger = LoggerFactory.getLogger(EqualPartitionsBalancingStrategy.class); + private final String hostName; + private final int minPartitionCount; + private final int maxPartitionCount; + private final Duration leaseExpirationInterval; + + public EqualPartitionsBalancingStrategy(String hostName, int minPartitionCount, int maxPartitionCount, Duration leaseExpirationInterval) { + if (hostName == null) throw new IllegalArgumentException("hostName"); + this.hostName = hostName; + this.minPartitionCount = minPartitionCount; + this.maxPartitionCount = maxPartitionCount; + this.leaseExpirationInterval = leaseExpirationInterval; + } + + @Override + public List selectLeasesToTake(List allLeases) { + Map workerToPartitionCount = new HashMap<>(); + List expiredLeases = new ArrayList<>(); + Map allPartitions = new HashMap<>(); + + this.categorizeLeases(allLeases, allPartitions, expiredLeases, workerToPartitionCount); + + int partitionCount = allPartitions.size(); + int workerCount = workerToPartitionCount.size(); + if (partitionCount <= 0) + return new ArrayList(); + + int target = this.calculateTargetPartitionCount(partitionCount, workerCount); + int myCount = workerToPartitionCount.get(this.hostName); + int partitionsNeededForMe = target - myCount; + + /* + Logger.InfoFormat( + "Host '{0}' {1} partitions, {2} hosts, {3} available leases, target = {4}, min = {5}, max = {6}, mine = {7}, will try to take {8} lease(s) for myself'.", + this.hostName, + partitionCount, + workerCount, + expiredLeases.Count, + target, + this.minScaleCount, + this.maxScaleCount, + myCount, + Math.Max(partitionsNeededForMe, 0)); + */ + + if (partitionsNeededForMe <= 0) + return new ArrayList(); + + if (expiredLeases.size() > 0) { + return expiredLeases.subList(0, partitionsNeededForMe); + } + + Lease stolenLease = getLeaseToSteal(workerToPartitionCount, target, partitionsNeededForMe, allPartitions); + List stolenLeases = new ArrayList<>(); + + if (stolenLease == null) { + stolenLeases.add(stolenLease); + } + + return stolenLeases; + } + + private static Lease getLeaseToSteal( + Map workerToPartitionCount, + int target, + int partitionsNeededForMe, + Map allPartitions) { + + Map.Entry workerToStealFrom = findWorkerWithMostPartitions(workerToPartitionCount); + + if (workerToStealFrom.getValue() > target - (partitionsNeededForMe > 1 ? 1 : 0)) { + for (Map.Entry entry : allPartitions.entrySet()) { + if (entry.getValue().getOwner().equalsIgnoreCase(workerToStealFrom.getKey())) { + return entry.getValue(); + } + } + } + + return null; + } + + private static Map.Entry findWorkerWithMostPartitions(Map workerToPartitionCount) { + Map.Entry workerToStealFrom = new ChangeFeedHelper.KeyValuePair<>("", 0); + for (Map.Entry entry : workerToPartitionCount.entrySet()) { + if (workerToStealFrom.getValue() <= entry.getValue()) { + workerToStealFrom = entry; + } + } + + return workerToStealFrom; + } + + private int calculateTargetPartitionCount(int partitionCount, int workerCount) { + int target = 1; + if (partitionCount > workerCount) { + target = (int)Math.ceil((double)partitionCount / workerCount); + } + + if (this.maxPartitionCount > 0 && target > this.maxPartitionCount) { + target = this.maxPartitionCount; + } + + if (this.minPartitionCount > 0 && target < this.minPartitionCount) { + target = this.minPartitionCount; + } + + return target; + } + + private void categorizeLeases( + List allLeases, + Map allPartitions, + List expiredLeases, + Map workerToPartitionCount) { + + for (Lease lease : allLeases) { + // Debug.Assert(lease.LeaseToken != null, "TakeLeasesAsync: lease.LeaseToken cannot be null."); + + allPartitions.put(lease.getLeaseToken(), lease); + + if (lease.getOwner() == null || lease.getOwner().isEmpty() || this.isExpired(lease)) { + // Logger.DebugFormat("Found unused or expired lease: {0}", lease); + expiredLeases.add(lease); + } else { + String assignedTo = lease.getOwner(); + Integer count = workerToPartitionCount.get(assignedTo); + + if (count != null) { + workerToPartitionCount.replace(assignedTo, count + 1); + } else { + workerToPartitionCount.put(assignedTo, 1); + } + } + } + + if (!workerToPartitionCount.containsKey(this.hostName)) { + workerToPartitionCount.put(this.hostName, 0); + } + } + + private boolean isExpired(Lease lease) { + if (lease.getOwner() == null || lease.getOwner().isEmpty() || lease.getTimestamp() == null) { + return true; + } + ZonedDateTime time = ZonedDateTime.parse(lease.getTimestamp()); + return time.plus(this.leaseExpirationInterval).isBefore(ZonedDateTime.now(ZoneId.of("UTC"))); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/ExceptionClassifier.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/ExceptionClassifier.java new file mode 100644 index 0000000000000..f6b9468690907 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/ExceptionClassifier.java @@ -0,0 +1,62 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.CosmosClientException; + +/** + * Classifies exceptions based on the status codes. + */ +class ExceptionClassifier { + public static final int SubStatusCode_Undefined = -1; + + // 410: partition key range is gone. + public static final int SubStatusCode_PartitionKeyRangeGone = 1002; + + // 410: partition splitting. + public static final int SubStatusCode_Splitting = 1007; + + // 404: LSN in session token is higher. + public static final int SubStatusCode_ReadSessionNotAvailable = 1002; + + + public static StatusCodeErrorType classifyClientException(CosmosClientException clientException) { + Integer subStatusCode = clientException.subStatusCode(); + + if (clientException.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND && subStatusCode != SubStatusCode_ReadSessionNotAvailable) + return StatusCodeErrorType.PARTITION_NOT_FOUND; + + if (clientException.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_GONE && (subStatusCode == SubStatusCode_PartitionKeyRangeGone || subStatusCode == SubStatusCode_Splitting)) + return StatusCodeErrorType.PARTITION_SPLIT; + + if (clientException.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_TOO_MANY_REQUESTS || clientException.statusCode() >= ChangeFeedHelper.HTTP_STATUS_CODE_INTERNAL_SERVER_ERROR) + return StatusCodeErrorType.TRANSIENT_ERROR; + + // Temporary workaround to compare exception message, until server provides better way of handling this case. + if (clientException.getMessage().contains("Reduce page size and try again.")) + return StatusCodeErrorType.MAX_ITEM_COUNT_TOO_LARGE; + + return StatusCodeErrorType.UNDEFINED; + + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/HealthMonitoringPartitionControllerDecorator.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/HealthMonitoringPartitionControllerDecorator.java new file mode 100644 index 0000000000000..8ea9440903f46 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/HealthMonitoringPartitionControllerDecorator.java @@ -0,0 +1,72 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.internal.changefeed.HealthMonitor; +import com.azure.data.cosmos.internal.changefeed.HealthMonitoringRecord; +import com.azure.data.cosmos.internal.changefeed.Lease; +import com.azure.data.cosmos.internal.changefeed.PartitionController; +import reactor.core.publisher.Mono; + +/** + * Monitors partition controller health. + */ +class HealthMonitoringPartitionControllerDecorator implements PartitionController { + private final PartitionController inner; + private final HealthMonitor monitor; + + public HealthMonitoringPartitionControllerDecorator(PartitionController inner, HealthMonitor monitor) { + if (inner == null) throw new IllegalArgumentException("inner"); + if (monitor == null) throw new IllegalArgumentException("monitor"); + + this.inner = inner; + this.monitor = monitor; + } + + @Override + public Mono addOrUpdateLease(Lease lease) { + return this.inner.addOrUpdateLease(lease) + .onErrorResume(throwable -> { + if (throwable instanceof CosmosClientException) { + // do nothing. + } else { + monitor.inspect(new HealthMonitoringRecord( + HealthMonitoringRecord.HealthSeverity.INFORMATIONAL, + HealthMonitoringRecord.MonitoredOperation.ACQUIRE_LEASE, + lease, throwable)); + } + return Mono.empty(); + }); + } + + @Override + public Mono initialize() { + return this.inner.initialize(); + } + + @Override + public Mono shutdown() { + return this.inner.shutdown(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/LeaseRenewerImpl.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/LeaseRenewerImpl.java new file mode 100644 index 0000000000000..dddba5eb9d257 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/LeaseRenewerImpl.java @@ -0,0 +1,119 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.internal.changefeed.CancellationToken; +import com.azure.data.cosmos.internal.changefeed.Lease; +import com.azure.data.cosmos.internal.changefeed.LeaseManager; +import com.azure.data.cosmos.internal.changefeed.LeaseRenewer; +import com.azure.data.cosmos.internal.changefeed.exceptions.LeaseLostException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.time.Duration; + +/** + * Implementation for the {@link LeaseRenewer}. + */ +class LeaseRenewerImpl implements LeaseRenewer { + private final Logger logger = LoggerFactory.getLogger(LeaseRenewerImpl.class); + private final LeaseManager leaseManager; + private final Duration leaseRenewInterval; + private Lease lease; + private RuntimeException resultException; + + public LeaseRenewerImpl(Lease lease, LeaseManager leaseManager, Duration leaseRenewInterval) + { + this.lease = lease; + this.leaseManager = leaseManager; + this.leaseRenewInterval = leaseRenewInterval; + } + + @Override + public Mono run(CancellationToken cancellationToken) { + LeaseRenewerImpl self = this; + + return Mono.fromRunnable( () -> { + try { + logger.info(String.format("Partition %s: renewer task started.", self.lease.getLeaseToken())); + long remainingWork = this.leaseRenewInterval.toMillis() / 2; + + try { + while (!cancellationToken.isCancellationRequested() && remainingWork > 0) { + Thread.sleep(100); + remainingWork -= 100; + } + } catch (InterruptedException ex) { + // exception caught + logger.info(String.format("Partition %s: renewer task stopped.", self.lease.getLeaseToken())); + } + + while (!cancellationToken.isCancellationRequested()) { + self.renew().block(); + + remainingWork = this.leaseRenewInterval.toMillis(); + + try { + while (!cancellationToken.isCancellationRequested() && remainingWork > 0) { + Thread.sleep(100); + remainingWork -= 100; + } + } catch (InterruptedException ex) { + // exception caught + logger.info(String.format("Partition %s: renewer task stopped.", self.lease.getLeaseToken())); + break; + } + } + } catch (RuntimeException ex) { + logger.error(String.format("Partition %s: renew lease loop failed.", self.lease.getLeaseToken()), ex); + self.resultException = ex; + } + }); + } + + @Override + public RuntimeException getResultException() { + return this.resultException; + } + + private Mono renew() { + LeaseRenewerImpl self = this; + + return Mono.fromRunnable( () -> { + try { + Lease renewedLease = self.leaseManager.renew(this.lease).block(); + if (renewedLease != null) this.lease = renewedLease; + + logger.info(String.format("Partition %s: renewed lease with result %s", self.lease.getLeaseToken(), renewedLease != null)); + } catch (LeaseLostException leaseLostException) { + logger.error(String.format("Partition %s: lost lease on renew.", self.lease.getLeaseToken()), leaseLostException); + self.resultException = leaseLostException; + throw leaseLostException; + } catch (Exception ex) { + logger.error(String.format("Partition %s: failed to renew lease.", self.lease.getLeaseToken()), ex); + } + }); + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/LeaseStoreManagerImpl.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/LeaseStoreManagerImpl.java new file mode 100644 index 0000000000000..224c69f7f7311 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/LeaseStoreManagerImpl.java @@ -0,0 +1,443 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosItem; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.SqlParameter; +import com.azure.data.cosmos.SqlParameterList; +import com.azure.data.cosmos.SqlQuerySpec; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedContextClient; +import com.azure.data.cosmos.internal.changefeed.Lease; +import com.azure.data.cosmos.internal.changefeed.LeaseStore; +import com.azure.data.cosmos.internal.changefeed.LeaseStoreManager; +import com.azure.data.cosmos.internal.changefeed.LeaseStoreManagerSettings; +import com.azure.data.cosmos.internal.changefeed.RequestOptionsFactory; +import com.azure.data.cosmos.internal.changefeed.ServiceItemLease; +import com.azure.data.cosmos.internal.changefeed.ServiceItemLeaseUpdater; +import com.azure.data.cosmos.internal.changefeed.exceptions.LeaseLostException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.Exceptions; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.time.Duration; + +/** + * Provides flexible way to build lease manager constructor parameters. + * For the actual creation of lease manager instance, delegates to lease manager factory. + */ +public class LeaseStoreManagerImpl implements LeaseStoreManager, LeaseStoreManager.LeaseStoreManagerBuilderDefinition { + + private final Logger logger = LoggerFactory.getLogger(LeaseStoreManagerImpl.class); + private LeaseStoreManagerSettings settings; + private ChangeFeedContextClient leaseDocumentClient; + private RequestOptionsFactory requestOptionsFactory; + private ServiceItemLeaseUpdater leaseUpdater; + private LeaseStore leaseStore; + + + public static LeaseStoreManagerBuilderDefinition Builder() { + return new LeaseStoreManagerImpl(); + } + + public LeaseStoreManagerImpl() { + this.settings = new LeaseStoreManagerSettings(); + } + + @Override + public LeaseStoreManagerBuilderDefinition leaseContextClient(ChangeFeedContextClient leaseContextClient) { + if (leaseContextClient == null) { + throw new IllegalArgumentException("leaseContextClient"); + } + + this.leaseDocumentClient = leaseContextClient; + return this; + } + + @Override + public LeaseStoreManagerBuilderDefinition leasePrefix(String leasePrefix) { + if (leasePrefix == null) { + throw new IllegalArgumentException("leasePrefix"); + } + + this.settings.withContainerNamePrefix(leasePrefix); + return this; + } + + @Override + public LeaseStoreManagerBuilderDefinition leaseCollectionLink(CosmosContainer leaseCollectionLink) { + if (leaseCollectionLink == null) { + throw new IllegalArgumentException("leaseCollectionLink"); + } + + this.settings.withLeaseCollectionLink(leaseCollectionLink); + return this; + } + + @Override + public LeaseStoreManagerBuilderDefinition requestOptionsFactory(RequestOptionsFactory requestOptionsFactory) { + if (requestOptionsFactory == null) { + throw new IllegalArgumentException("requestOptionsFactory"); + } + + this.requestOptionsFactory = requestOptionsFactory; + return this; + } + + @Override + public LeaseStoreManagerBuilderDefinition hostName(String hostName) { + if (hostName == null) { + throw new IllegalArgumentException("hostName"); + } + + this.settings.withHostName(hostName); + return this; + } + + @Override + public Mono build() { + if (this.settings == null) throw new IllegalArgumentException("properties"); + if (this.settings.getContainerNamePrefix() == null) throw new IllegalArgumentException("properties.containerNamePrefix"); + if (this.settings.getLeaseCollectionLink() == null) throw new IllegalArgumentException("properties.leaseCollectionLink"); + if (this.settings.getHostName() == null || this.settings.getHostName().isEmpty()) throw new IllegalArgumentException("properties.hostName"); + if (this.leaseDocumentClient == null) throw new IllegalArgumentException("leaseDocumentClient"); + if (this.requestOptionsFactory == null) throw new IllegalArgumentException("requestOptionsFactory"); + if (this.leaseUpdater == null) { + this.leaseUpdater = new DocumentServiceLeaseUpdaterImpl(leaseDocumentClient); + } + + this.leaseStore = new DocumentServiceLeaseStore( + this.leaseDocumentClient, + this.settings.getContainerNamePrefix(), + this.settings.getLeaseCollectionLink(), + this.requestOptionsFactory); + + LeaseStoreManagerImpl self = this; + if (this.settings.getLeaseCollectionLink() == null) + throw new IllegalArgumentException("leaseCollectionLink was not specified"); + if (this.requestOptionsFactory == null) + throw new IllegalArgumentException("requestOptionsFactory was not specified"); + + return Mono.just(self); + } + + @Override + public Flux getAllLeases() { + return this.listDocuments(this.getPartitionLeasePrefix()) + .map(documentServiceLease -> documentServiceLease); + } + + @Override + public Flux getOwnedLeases() { + LeaseStoreManagerImpl self = this; + return this.getAllLeases() + .filter(lease -> lease.getOwner() != null && lease.getOwner().equalsIgnoreCase(self.settings.getHostName())); + } + + @Override + public Mono createLeaseIfNotExist(String leaseToken, String continuationToken) { + if (leaseToken == null) throw new IllegalArgumentException("leaseToken"); + + String leaseDocId = this.getDocumentId(leaseToken); + ServiceItemLease documentServiceLease = new ServiceItemLease() + .withId(leaseDocId) + .withLeaseToken(leaseToken) + .withContinuationToken(continuationToken); + + return this.leaseDocumentClient.createItem(this.settings.getLeaseCollectionLink(), documentServiceLease, null, false) + .onErrorResume( ex -> { + if (ex instanceof CosmosClientException) { + CosmosClientException e = (CosmosClientException) ex; + if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_CONFLICT) { + logger.info("Some other host created lease for {}.", leaseToken); + return Mono.empty(); + } + } + + return Mono.error(ex); + }) + .map(documentResourceResponse -> { + if (documentResourceResponse == null) return null; + + CosmosItemProperties document = documentResourceResponse.properties(); + + logger.info("Created lease for partition {}.", leaseToken); + + return documentServiceLease + .withId(document.id()) + .withEtag(document.etag()) + .withTs(document.getString(Constants.Properties.LAST_MODIFIED)); + }); + } + + @Override + public Mono delete(Lease lease) { + if (lease == null || lease.getId() == null) throw Exceptions.propagate(new IllegalArgumentException("lease")); + + CosmosItem itemForLease = this.createItemForLease(lease.getId()); + + return this.leaseDocumentClient + .deleteItem(itemForLease, this.requestOptionsFactory.createRequestOptions(lease)) + .onErrorResume( ex -> { + if (ex instanceof CosmosClientException) { + CosmosClientException e = (CosmosClientException) ex; + if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { + // Ignore - document was already deleted. + return Mono.empty(); + } + } + + return Mono.error(ex); + }) + // return some add-hoc value since we don't actually care about the result. + .map( documentResourceResponse -> true) + .then(); + } + + @Override + public Mono acquire(Lease lease) { + if (lease == null) throw Exceptions.propagate(new IllegalArgumentException("lease")); + + String oldOwner = lease.getOwner(); + + return this.leaseUpdater.updateLease( + lease, + this.createItemForLease(lease.getId()), + this.requestOptionsFactory.createRequestOptions(lease), + serverLease -> { + if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(oldOwner)) { + logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); + throw Exceptions.propagate(new LeaseLostException(lease)); + } + serverLease.setOwner(this.settings.getHostName()); + serverLease.setProperties(lease.getProperties()); + + return serverLease; + }); + } + + @Override + public Mono release(Lease lease) { + if (lease == null) throw Exceptions.propagate(new IllegalArgumentException("lease")); + + CosmosItem itemForLease = this.createItemForLease(lease.getId()); + LeaseStoreManagerImpl self = this; + + return this.leaseDocumentClient.readItem(itemForLease, this.requestOptionsFactory.createRequestOptions(lease)) + .onErrorResume( ex -> { + if (ex instanceof CosmosClientException) { + CosmosClientException e = (CosmosClientException) ex; + if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { + logger.info("Partition {} failed to renew lease. The lease is gone already.", lease.getLeaseToken()); + throw Exceptions.propagate(new LeaseLostException(lease)); + } + } + + return Mono.error(ex); + }) + .map( documentResourceResponse -> ServiceItemLease.fromDocument(documentResourceResponse.properties())) + .flatMap( refreshedLease -> self.leaseUpdater.updateLease( + refreshedLease, + self.createItemForLease(refreshedLease.getId()), + self.requestOptionsFactory.createRequestOptions(lease), + serverLease -> + { + if (serverLease.getOwner() != null) { + if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { + logger.info("Partition {} no need to release lease. The lease was already taken by another host '{}'.", lease.getLeaseToken(), serverLease.getOwner()); + throw Exceptions.propagate(new LeaseLostException(lease)); + } + } + + serverLease.setOwner(null); + + return serverLease; + }) + ).then(); + } + + @Override + public Mono renew(Lease lease) { + if (lease == null) throw Exceptions.propagate(new IllegalArgumentException("lease")); + + // Get fresh lease. The assumption here is that check-pointing is done with higher frequency than lease renewal so almost + // certainly the lease was updated in between. + CosmosItem itemForLease = this.createItemForLease(lease.getId()); + LeaseStoreManagerImpl self = this; + + return this.leaseDocumentClient.readItem(itemForLease, this.requestOptionsFactory.createRequestOptions(lease)) + .onErrorResume( ex -> { + if (ex instanceof CosmosClientException) { + CosmosClientException e = (CosmosClientException) ex; + if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { + logger.info("Partition {} failed to renew lease. The lease is gone already.", lease.getLeaseToken()); + throw Exceptions.propagate(new LeaseLostException(lease)); + } + } + + return Mono.error(ex); + }) + .map( documentResourceResponse -> ServiceItemLease.fromDocument(documentResourceResponse.properties())) + .flatMap( refreshedLease -> self.leaseUpdater.updateLease( + refreshedLease, + self.createItemForLease(refreshedLease.getId()), + self.requestOptionsFactory.createRequestOptions(lease), + serverLease -> + { + if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { + logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); + throw Exceptions.propagate(new LeaseLostException(lease)); + } + + return serverLease; + }) + ); + } + + @Override + public Mono updateProperties(Lease lease) { + if (lease == null) throw Exceptions.propagate(new IllegalArgumentException("lease")); + + if (!lease.getOwner().equalsIgnoreCase(this.settings.getHostName())) + { + logger.info("Partition '{}' lease was taken over by owner '{}' before lease item update", lease.getLeaseToken(), lease.getOwner()); + throw new LeaseLostException(lease); + } + + return this.leaseUpdater.updateLease( + lease, + this.createItemForLease(lease.getId()), + this.requestOptionsFactory.createRequestOptions(lease), + serverLease -> { + if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { + logger.info("Partition '{}' lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); + throw Exceptions.propagate(new LeaseLostException(lease)); + } + serverLease.setProperties(lease.getProperties()); + return serverLease; + }); + } + + @Override + public Mono checkpoint(Lease lease, String continuationToken) { + if (lease == null) throw Exceptions.propagate(new IllegalArgumentException("lease")); + + if (continuationToken == null || continuationToken.isEmpty()) { + throw new IllegalArgumentException("continuationToken must be a non-empty string"); + } + + return this.leaseUpdater.updateLease( + lease, + this.createItemForLease(lease.getId()), + this.requestOptionsFactory.createRequestOptions(lease), + serverLease -> { + if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { + logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); + throw Exceptions.propagate(new LeaseLostException(lease)); + } + serverLease.setContinuationToken(continuationToken); + + return serverLease; + }); + } + + @Override + public Mono isInitialized() { + return this.leaseStore.isInitialized(); + } + + @Override + public Mono markInitialized() { + return this.leaseStore.markInitialized(); + } + + @Override + public Mono acquireInitializationLock(Duration lockExpirationTime) { + return this.leaseStore.acquireInitializationLock(lockExpirationTime); + } + + @Override + public Mono releaseInitializationLock() { + return this.leaseStore.releaseInitializationLock(); + } + + private Mono tryGetLease(Lease lease) { + CosmosItem itemForLease = this.createItemForLease(lease.getId()); + + return this.leaseDocumentClient.readItem(itemForLease, this.requestOptionsFactory.createRequestOptions(lease)) + .onErrorResume( ex -> { + if (ex instanceof CosmosClientException) { + CosmosClientException e = (CosmosClientException) ex; + if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { + return Mono.empty(); + } + } + + return Mono.error(ex); + }) + .map( documentResourceResponse -> { + if (documentResourceResponse == null) return null; + return ServiceItemLease.fromDocument(documentResourceResponse.properties()); + }); + } + + private Flux listDocuments(String prefix) { + if (prefix == null || prefix.isEmpty()) { + throw new IllegalArgumentException("prefix"); + } + + SqlParameter param = new SqlParameter(); + param.name("@PartitionLeasePrefix"); + param.value(prefix); + SqlQuerySpec querySpec = new SqlQuerySpec( + "SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", + new SqlParameterList(param)); + + Flux> query = this.leaseDocumentClient.queryItems( + this.settings.getLeaseCollectionLink(), + querySpec, + this.requestOptionsFactory.createFeedOptions()); + + return query.flatMap( documentFeedResponse -> Flux.fromIterable(documentFeedResponse.results())) + .map( ServiceItemLease::fromDocument); + } + + private String getDocumentId(String leaseToken) + { + return this.getPartitionLeasePrefix() + leaseToken; + } + + private String getPartitionLeasePrefix() + { + return this.settings.getContainerNamePrefix() + ".."; + } + + private CosmosItem createItemForLease(String leaseId) { + return this.leaseDocumentClient.getContainerClient().getItem(leaseId, "/id"); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/ObserverExceptionWrappingChangeFeedObserverDecorator.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/ObserverExceptionWrappingChangeFeedObserverDecorator.java new file mode 100644 index 0000000000000..f228389e59514 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/ObserverExceptionWrappingChangeFeedObserverDecorator.java @@ -0,0 +1,82 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserver; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserverCloseReason; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserverContext; +import com.azure.data.cosmos.internal.changefeed.exceptions.ObserverException; + +import java.util.List; + +/** + * Exception wrapping decorator implementation for {@link ChangeFeedObserver}. + */ +class ObserverExceptionWrappingChangeFeedObserverDecorator implements ChangeFeedObserver { + private ChangeFeedObserver changeFeedObserver; + + public ObserverExceptionWrappingChangeFeedObserverDecorator(ChangeFeedObserver changeFeedObserver) + { + this.changeFeedObserver = changeFeedObserver; + } + + @Override + public void open(ChangeFeedObserverContext context) { + try + { + this.changeFeedObserver.open(context); + } + catch (RuntimeException userException) + { + // Logger.WarnException("Exception happened on Observer.OpenAsync", userException); + throw new ObserverException(userException); + } + } + + @Override + public void close(ChangeFeedObserverContext context, ChangeFeedObserverCloseReason reason) { + try + { + this.changeFeedObserver.close(context, reason); + } + catch (RuntimeException userException) + { + // Logger.WarnException("Exception happened on Observer.CloseAsync", userException); + throw new ObserverException(userException); + } + } + + @Override + public void processChanges(ChangeFeedObserverContext context, List docs) { + try + { + this.changeFeedObserver.processChanges(context, docs); + } + catch (Exception userException) + { + // Logger.WarnException("Exception happened on Observer.OpenAsync", userException); + throw new ObserverException(userException); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionCheckpointerImpl.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionCheckpointerImpl.java new file mode 100644 index 0000000000000..08519d7c4dedb --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionCheckpointerImpl.java @@ -0,0 +1,56 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.internal.changefeed.Lease; +import com.azure.data.cosmos.internal.changefeed.LeaseCheckpointer; +import com.azure.data.cosmos.internal.changefeed.PartitionCheckpointer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +/** + * Checkpoint the given partition up to the given continuation token. + */ +class PartitionCheckpointerImpl implements PartitionCheckpointer { + private final Logger logger = LoggerFactory.getLogger(PartitionCheckpointerImpl.class); + private final LeaseCheckpointer leaseCheckpointer; + private Lease lease; + + public PartitionCheckpointerImpl(LeaseCheckpointer leaseCheckpointer, Lease lease) { + this.leaseCheckpointer = leaseCheckpointer; + this.lease = lease; + } + + @Override + public Mono checkpointPartition(String сontinuationToken) { + PartitionCheckpointerImpl self = this; + return this.leaseCheckpointer.checkpoint(this.lease, сontinuationToken) + .map(lease1 -> { + self.lease = lease1; + logger.info(String.format("Checkpoint: partition %s, new continuation %s", self.lease.getLeaseToken(), self.lease.getContinuationToken())); + return lease1; + }) + .then(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionControllerImpl.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionControllerImpl.java new file mode 100644 index 0000000000000..b290114d48dc0 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionControllerImpl.java @@ -0,0 +1,186 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.internal.changefeed.CancellationToken; +import com.azure.data.cosmos.internal.changefeed.CancellationTokenSource; +import com.azure.data.cosmos.internal.changefeed.Lease; +import com.azure.data.cosmos.internal.changefeed.LeaseContainer; +import com.azure.data.cosmos.internal.changefeed.LeaseManager; +import com.azure.data.cosmos.internal.changefeed.PartitionController; +import com.azure.data.cosmos.internal.changefeed.PartitionSupervisor; +import com.azure.data.cosmos.internal.changefeed.PartitionSupervisorFactory; +import com.azure.data.cosmos.internal.changefeed.PartitionSynchronizer; +import com.azure.data.cosmos.internal.changefeed.exceptions.PartitionSplitException; +import com.azure.data.cosmos.internal.changefeed.exceptions.TaskCancelledException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; + +/** + * Implementation for {@link PartitionController}. + */ +class PartitionControllerImpl implements PartitionController { + private final Logger logger = LoggerFactory.getLogger(PartitionControllerImpl.class); + private final Map currentlyOwnedPartitions = new ConcurrentHashMap<>(); + + private final LeaseContainer leaseContainer; + private final LeaseManager leaseManager; + private final PartitionSupervisorFactory partitionSupervisorFactory; + private final PartitionSynchronizer synchronizer; + private CancellationTokenSource shutdownCts; + + private final ExecutorService executorService; + + public PartitionControllerImpl( + LeaseContainer leaseContainer, + LeaseManager leaseManager, + PartitionSupervisorFactory partitionSupervisorFactory, + PartitionSynchronizer synchronizer, + ExecutorService executorService) { + + this.leaseContainer = leaseContainer; + this.leaseManager = leaseManager; + this.partitionSupervisorFactory = partitionSupervisorFactory; + this.synchronizer = synchronizer; + this.executorService = executorService; + } + + @Override + public Mono initialize() { + this.shutdownCts = new CancellationTokenSource(); + return this.loadLeases(); + } + + @Override + public synchronized Mono addOrUpdateLease(Lease lease) { + WorkerTask workerTask = this.currentlyOwnedPartitions.get(lease.getLeaseToken()); + if ( workerTask != null && workerTask.isRunning()) { + Lease updatedLease = this.leaseManager.updateProperties(lease).block(); + logger.debug(String.format("Partition %s: updated.", lease.getLeaseToken())); + return Mono.just(updatedLease); + } + + try { + Lease updatedLease = this.leaseManager.acquire(lease).block(); + if (updatedLease != null) lease = updatedLease; + + logger.info(String.format("Partition %s: acquired.", lease.getLeaseToken())); + } catch (RuntimeException ex) { + this.removeLease(lease).block(); + throw ex; + } + + PartitionSupervisor supervisor = this.partitionSupervisorFactory.create(lease); + this.currentlyOwnedPartitions.put(lease.getLeaseToken(), this.processPartition(supervisor, lease)); + + return Mono.just(lease); + } + + @Override + public Mono shutdown() { + // TODO: wait for the threads to finish. + this.shutdownCts.cancel(); +// this.currentlyOwnedPartitions.clear(); + + return Mono.empty(); + } + + private Mono loadLeases() { + PartitionControllerImpl self = this; + logger.debug("Starting renew leases assigned to this host on initialize."); + + return this.leaseContainer.getOwnedLeases() + .flatMap( lease -> { + logger.info(String.format("Acquired lease for PartitionId '%s' on startup.", lease.getLeaseToken())); + return self.addOrUpdateLease(lease); + }).then(); + } + + private Mono removeLease(Lease lease) { + if (this.currentlyOwnedPartitions.get(lease.getLeaseToken()) != null) { + WorkerTask workerTask = this.currentlyOwnedPartitions.remove(lease.getLeaseToken()); + + if (workerTask.isRunning()) { + workerTask.interrupt(); + } + + logger.info(String.format("Partition %s: released.", lease.getLeaseToken())); + } + + return this.leaseManager.release(lease) + .onErrorResume(e -> { + logger.warn(String.format("Partition %s: failed to remove lease.", lease.getLeaseToken()), e); + return Mono.empty(); + } + ).doOnSuccess(aVoid -> { + logger.info("Partition {}: successfully removed lease.", lease.getLeaseToken()); + }); + } + + private WorkerTask processPartition(PartitionSupervisor partitionSupervisor, Lease lease) { + PartitionControllerImpl self = this; + + CancellationToken cancellationToken = this.shutdownCts.getToken(); + + WorkerTask partitionSupervisorTask = new WorkerTask(lease, () -> { + partitionSupervisor.run(cancellationToken) + .onErrorResume(throwable -> { + if (throwable instanceof PartitionSplitException) { + PartitionSplitException ex = (PartitionSplitException) throwable; + return self.handleSplit(lease, ex.getLastContinuation()); + } else if (throwable instanceof TaskCancelledException) { + logger.debug(String.format("Partition %s: processing canceled.", lease.getLeaseToken())); + } else { + logger.warn(String.format("Partition %s: processing failed.", lease.getLeaseToken()), throwable); + } + + return Mono.empty(); + }) + .then(self.removeLease(lease)).subscribe(); + }); + + this.executorService.execute(partitionSupervisorTask); + + return partitionSupervisorTask; + } + + private Mono handleSplit(Lease lease, String lastContinuationToken) { + PartitionControllerImpl self = this; + + lease.setContinuationToken(lastContinuationToken); + return this.synchronizer.splitPartition(lease) + .flatMap(l -> { + l.setProperties(lease.getProperties()); + return self.addOrUpdateLease(l); + }).then(self.leaseManager.delete(lease)) + .onErrorResume(throwable -> { + logger.warn(String.format("partition %s: failed to split", lease.getLeaseToken()), throwable); + return Mono.empty(); + }); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionLoadBalancerImpl.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionLoadBalancerImpl.java new file mode 100644 index 0000000000000..316a9fbce4231 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionLoadBalancerImpl.java @@ -0,0 +1,143 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.internal.changefeed.CancellationToken; +import com.azure.data.cosmos.internal.changefeed.CancellationTokenSource; +import com.azure.data.cosmos.internal.changefeed.Lease; +import com.azure.data.cosmos.internal.changefeed.LeaseContainer; +import com.azure.data.cosmos.internal.changefeed.PartitionController; +import com.azure.data.cosmos.internal.changefeed.PartitionLoadBalancer; +import com.azure.data.cosmos.internal.changefeed.PartitionLoadBalancingStrategy; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.time.Duration; +import java.util.List; +import java.util.concurrent.ExecutorService; + +/** + * Implementation for {@link PartitionLoadBalancer}. + */ +class PartitionLoadBalancerImpl implements PartitionLoadBalancer { + private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); + private final PartitionController partitionController; + private final LeaseContainer leaseContainer; + private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; + private final Duration leaseAcquireInterval; + private final ExecutorService executorService; + + private CancellationTokenSource cancellationTokenSource; + + private volatile boolean started; + + private final Object lock; + + public PartitionLoadBalancerImpl( + PartitionController partitionController, + LeaseContainer leaseContainer, + PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, + Duration leaseAcquireInterval, + ExecutorService executorService) { + + if (partitionController == null) throw new IllegalArgumentException("partitionController"); + if (leaseContainer == null) throw new IllegalArgumentException("leaseContainer"); + if (partitionLoadBalancingStrategy == null) throw new IllegalArgumentException("partitionLoadBalancingStrategy"); + if (executorService == null) throw new IllegalArgumentException("executorService"); + + this.partitionController = partitionController; + this.leaseContainer = leaseContainer; + this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; + this.leaseAcquireInterval = leaseAcquireInterval; + this.executorService = executorService; + + this.started = false; + this.lock = new Object(); + } + + @Override + public Mono start() { + PartitionLoadBalancerImpl self = this; + + return Mono.fromRunnable( () -> { + synchronized (lock) { + if (this.started) { + throw new IllegalStateException("Partition load balancer already started"); + } + + this.started = true; + this.cancellationTokenSource = new CancellationTokenSource(); + } + + CancellationToken cancellationToken = this.cancellationTokenSource.getToken(); + + this.executorService.execute(() -> self.run(cancellationToken).block()); + }); + } + + @Override + public Mono stop() { + return Mono.fromRunnable( () -> { + synchronized (lock) { + this.started = false; + this.cancellationTokenSource.cancel(); + } + + this.partitionController.shutdown().block(); + this.cancellationTokenSource = null; + }); + } + + private Mono run(CancellationToken cancellationToken) { + PartitionLoadBalancerImpl self = this; + + return Mono.fromRunnable( () -> { + try { + while (!cancellationToken.isCancellationRequested()) { + List allLeases = self.leaseContainer.getAllLeases().collectList().block(); + List leasesToTake = self.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); + for (Lease lease : leasesToTake) { + self.partitionController.addOrUpdateLease(lease).block(); + } + + long remainingWork = this.leaseAcquireInterval.toMillis(); + + try { + while (!cancellationToken.isCancellationRequested() && remainingWork > 0) { + Thread.sleep(100); + remainingWork -= 100; + } + } catch (InterruptedException ex) { + // exception caught + logger.warn("Partition load balancer caught an interrupted exception", ex); + } + } + } catch (Exception ex) { + // We should not get here. + logger.info("Partition load balancer task stopped."); + this.stop(); + } + }); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionManagerImpl.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionManagerImpl.java new file mode 100644 index 0000000000000..3ca256c544573 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionManagerImpl.java @@ -0,0 +1,59 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.internal.changefeed.Bootstrapper; +import com.azure.data.cosmos.internal.changefeed.PartitionController; +import com.azure.data.cosmos.internal.changefeed.PartitionLoadBalancer; +import com.azure.data.cosmos.internal.changefeed.PartitionManager; +import reactor.core.publisher.Mono; + +/** + * Implementation for {@link PartitionManager}. + */ +class PartitionManagerImpl implements PartitionManager { + private final Bootstrapper bootstrapper; + private final PartitionController partitionController; + private final PartitionLoadBalancer partitionLoadBalancer; + + public PartitionManagerImpl(Bootstrapper bootstrapper, PartitionController partitionController, PartitionLoadBalancer partitionLoadBalancer) { + this.bootstrapper = bootstrapper; + this.partitionController = partitionController; + this.partitionLoadBalancer = partitionLoadBalancer; + } + + @Override + public Mono start() { + PartitionManagerImpl self = this; + + return self.bootstrapper.initialize() + .then(self.partitionController.initialize()) + .then(self.partitionLoadBalancer.start()); + } + + @Override + public Mono stop() { + PartitionManagerImpl self = this; + return self.partitionLoadBalancer.stop(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionProcessorFactoryImpl.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionProcessorFactoryImpl.java new file mode 100644 index 0000000000000..87e6bc3405883 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionProcessorFactoryImpl.java @@ -0,0 +1,85 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.ChangeFeedProcessorOptions; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedContextClient; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserver; +import com.azure.data.cosmos.internal.changefeed.Lease; +import com.azure.data.cosmos.internal.changefeed.LeaseCheckpointer; +import com.azure.data.cosmos.internal.changefeed.PartitionCheckpointer; +import com.azure.data.cosmos.internal.changefeed.PartitionProcessor; +import com.azure.data.cosmos.internal.changefeed.PartitionProcessorFactory; +import com.azure.data.cosmos.internal.changefeed.ProcessorSettings; + +/** + * Implementation for {@link PartitionProcessorFactory}. + */ +class PartitionProcessorFactoryImpl implements PartitionProcessorFactory { + private final ChangeFeedContextClient documentClient; + private final ChangeFeedProcessorOptions changeFeedProcessorOptions; + private final LeaseCheckpointer leaseCheckpointer; + private final CosmosContainer collectionSelfLink; + + public PartitionProcessorFactoryImpl( + ChangeFeedContextClient documentClient, + ChangeFeedProcessorOptions changeFeedProcessorOptions, + LeaseCheckpointer leaseCheckpointer, + CosmosContainer collectionSelfLink) { + + if (documentClient == null) throw new IllegalArgumentException("documentClient"); + if (changeFeedProcessorOptions == null) throw new IllegalArgumentException("changeFeedProcessorOptions"); + if (leaseCheckpointer == null) throw new IllegalArgumentException("leaseCheckpointer"); + if (collectionSelfLink == null) throw new IllegalArgumentException("collectionSelfLink"); + + this.documentClient = documentClient; + this.changeFeedProcessorOptions = changeFeedProcessorOptions; + this.leaseCheckpointer = leaseCheckpointer; + this.collectionSelfLink = collectionSelfLink; + } + + @Override + public PartitionProcessor create(Lease lease, ChangeFeedObserver observer) { + if (observer == null) throw new IllegalArgumentException("observer"); + if (lease == null) throw new IllegalArgumentException("lease"); + + String startContinuation = lease.getContinuationToken(); + + if (startContinuation == null || startContinuation.isEmpty()) { + startContinuation = this.changeFeedProcessorOptions.startContinuation(); + } + + ProcessorSettings settings = new ProcessorSettings() + .withCollectionLink(this.collectionSelfLink) + .withStartContinuation(startContinuation) + .withPartitionKeyRangeId(lease.getLeaseToken()) + .withFeedPollDelay(this.changeFeedProcessorOptions.feedPollDelay()) + .withMaxItemCount(this.changeFeedProcessorOptions.maxItemCount()) + .withStartFromBeginning(this.changeFeedProcessorOptions.startFromBeginning()) + .withStartTime(this.changeFeedProcessorOptions.startTime()); // .sessionToken(this.changeFeedProcessorOptions.sessionToken()); + + PartitionCheckpointer checkpointer = new PartitionCheckpointerImpl(this.leaseCheckpointer, lease); + return new PartitionProcessorImpl(observer, this.documentClient, settings, checkpointer); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionProcessorImpl.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionProcessorImpl.java new file mode 100644 index 0000000000000..8f7c563d8fe78 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionProcessorImpl.java @@ -0,0 +1,175 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.ChangeFeedOptions; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.changefeed.CancellationToken; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedContextClient; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserver; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserverContext; +import com.azure.data.cosmos.internal.changefeed.PartitionCheckpointer; +import com.azure.data.cosmos.internal.changefeed.PartitionProcessor; +import com.azure.data.cosmos.internal.changefeed.ProcessorSettings; +import com.azure.data.cosmos.internal.changefeed.exceptions.PartitionNotFoundException; +import com.azure.data.cosmos.internal.changefeed.exceptions.PartitionSplitException; +import com.azure.data.cosmos.internal.changefeed.exceptions.TaskCancelledException; +import reactor.core.publisher.Mono; + +import java.time.Duration; +import java.util.List; + +import static com.azure.data.cosmos.CommonsBridgeInternal.partitionKeyRangeIdInternal; + +/** + * Implementation for {@link PartitionProcessor}. + */ +class PartitionProcessorImpl implements PartitionProcessor { + private static final int DefaultMaxItemCount = 100; + // private final Observable> query; + private final ProcessorSettings settings; + private final PartitionCheckpointer checkpointer; + private final ChangeFeedObserver observer; + private final ChangeFeedOptions options; + private final ChangeFeedContextClient documentClient; + private RuntimeException resultException; + + private String lastContinuation; + + public PartitionProcessorImpl(ChangeFeedObserver observer, ChangeFeedContextClient documentClient, ProcessorSettings settings, PartitionCheckpointer checkpointer) { + this.observer = observer; + this.documentClient = documentClient; + this.settings = settings; + this.checkpointer = checkpointer; + + this.options = new ChangeFeedOptions(); + this.options.maxItemCount(settings.getMaxItemCount()); + partitionKeyRangeIdInternal(this.options, settings.getPartitionKeyRangeId()); + // this.options.sessionToken(properties.sessionToken()); + this.options.startFromBeginning(settings.isStartFromBeginning()); + this.options.requestContinuation(settings.getStartContinuation()); + this.options.startDateTime(settings.getStartTime()); + + //this.query = documentClient.createDocumentChangeFeedQuery(self.properties.getCollectionSelfLink(), this.options); + } + + @Override + public Mono run(CancellationToken cancellationToken) { + PartitionProcessorImpl self = this; + this.lastContinuation = this.settings.getStartContinuation(); + + return Mono.fromRunnable( () -> { + while (!cancellationToken.isCancellationRequested()) { + Duration delay = self.settings.getFeedPollDelay(); + + try { + self.options.requestContinuation(self.lastContinuation); + List> documentFeedResponseList = self.documentClient.createDocumentChangeFeedQuery(self.settings.getCollectionSelfLink(), self.options) + .collectList() + .block(); + + for (FeedResponse documentFeedResponse : documentFeedResponseList) { + self.lastContinuation = documentFeedResponse.continuationToken(); + if (documentFeedResponse.results() != null && documentFeedResponse.results().size() > 0) { + self.dispatchChanges(documentFeedResponse); + } + + self.options.requestContinuation(self.lastContinuation); + + if (cancellationToken.isCancellationRequested()) { + // Observation was cancelled. + throw new TaskCancelledException(); + } + } + + if (this.options.maxItemCount().compareTo(this.settings.getMaxItemCount()) == 0) { + this.options.maxItemCount(this.settings.getMaxItemCount()); // Reset after successful execution. + } + } catch (RuntimeException ex) { + if (ex.getCause() instanceof CosmosClientException) { + + CosmosClientException clientException = (CosmosClientException) ex.getCause(); + // this.logger.WarnException("exception: partition '{0}'", clientException, this.properties.PartitionKeyRangeId); + StatusCodeErrorType docDbError = ExceptionClassifier.classifyClientException(clientException); + + switch (docDbError) { + case PARTITION_NOT_FOUND: { + self.resultException = new PartitionNotFoundException("Partition not found.", self.lastContinuation); + } + case PARTITION_SPLIT: { + self.resultException = new PartitionSplitException("Partition split.", self.lastContinuation); + } + case UNDEFINED: { + self.resultException = ex; + } + case MAX_ITEM_COUNT_TOO_LARGE: { + if (this.options.maxItemCount() == null) { + this.options.maxItemCount(DefaultMaxItemCount); + } else if (this.options.maxItemCount() <= 1) { + // this.logger.ErrorFormat("Cannot reduce maxItemCount further as it's already at {0}.", this.options.MaxItemCount); + throw ex; + } + + this.options.maxItemCount(this.options.maxItemCount() / 2); + // this.logger.WarnFormat("Reducing maxItemCount, new value: {0}.", this.options.MaxItemCount); + break; + } + default: { + // this.logger.Fatal($"Unrecognized DocDbError enum value {docDbError}"); + // Debug.Fail($"Unrecognized DocDbError enum value {docDbError}"); + self.resultException = ex; + } + } + } else if (ex instanceof TaskCancelledException) { + // this.logger.WarnException("exception: partition '{0}'", canceledException, this.properties.PartitionKeyRangeId); + self.resultException = ex; + } + } + + long remainingWork = delay.toMillis(); + + try { + while (!cancellationToken.isCancellationRequested() && remainingWork > 0) { + Thread.sleep(100); + remainingWork -= 100; + } + } catch (InterruptedException iex) { + // exception caught + } + } + }); + } + + @Override + public RuntimeException getResultException() { + return this.resultException; + } + + private void dispatchChanges(FeedResponse response) { + ChangeFeedObserverContext context = new ChangeFeedObserverContextImpl(this.settings.getPartitionKeyRangeId(), response, this.checkpointer); + + this.observer.processChanges(context, response.results()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionSupervisorFactoryImpl.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionSupervisorFactoryImpl.java new file mode 100644 index 0000000000000..2f86448ca9e60 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionSupervisorFactoryImpl.java @@ -0,0 +1,77 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.ChangeFeedProcessorOptions; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserver; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserverFactory; +import com.azure.data.cosmos.internal.changefeed.Lease; +import com.azure.data.cosmos.internal.changefeed.LeaseManager; +import com.azure.data.cosmos.internal.changefeed.LeaseRenewer; +import com.azure.data.cosmos.internal.changefeed.PartitionProcessor; +import com.azure.data.cosmos.internal.changefeed.PartitionProcessorFactory; +import com.azure.data.cosmos.internal.changefeed.PartitionSupervisor; +import com.azure.data.cosmos.internal.changefeed.PartitionSupervisorFactory; + +import java.util.concurrent.ExecutorService; + +/** + * Implementation for the partition supervisor factory. + */ +class PartitionSupervisorFactoryImpl implements PartitionSupervisorFactory { + private final ChangeFeedObserverFactory observerFactory; + private final LeaseManager leaseManager; + private final ChangeFeedProcessorOptions changeFeedProcessorOptions; + private final PartitionProcessorFactory partitionProcessorFactory; + private final ExecutorService executorService; + + + public PartitionSupervisorFactoryImpl( + ChangeFeedObserverFactory observerFactory, + LeaseManager leaseManager, + PartitionProcessorFactory partitionProcessorFactory, + ChangeFeedProcessorOptions options, + ExecutorService executorService) { + if (observerFactory == null) throw new IllegalArgumentException("observerFactory"); + if (leaseManager == null) throw new IllegalArgumentException("leaseManager"); + if (options == null) throw new IllegalArgumentException("options"); + if (partitionProcessorFactory == null) throw new IllegalArgumentException("partitionProcessorFactory"); + + this.observerFactory = observerFactory; + this.leaseManager = leaseManager; + this.changeFeedProcessorOptions = options; + this.partitionProcessorFactory = partitionProcessorFactory; + this.executorService = executorService; + } + + @Override + public PartitionSupervisor create(Lease lease) { + if (lease == null) throw new IllegalArgumentException("lease"); + + ChangeFeedObserver changeFeedObserver = this.observerFactory.createObserver(); + PartitionProcessor processor = this.partitionProcessorFactory.create(lease, changeFeedObserver); + LeaseRenewer renewer = new LeaseRenewerImpl(lease, this.leaseManager, this.changeFeedProcessorOptions.leaseRenewInterval()); + + return new PartitionSupervisorImpl(lease, changeFeedObserver, processor, renewer, this.executorService); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionSupervisorImpl.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionSupervisorImpl.java new file mode 100644 index 0000000000000..e2945bef2f7db --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionSupervisorImpl.java @@ -0,0 +1,166 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.internal.changefeed.CancellationToken; +import com.azure.data.cosmos.internal.changefeed.CancellationTokenSource; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserver; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserverCloseReason; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserverContext; +import com.azure.data.cosmos.internal.changefeed.Lease; +import com.azure.data.cosmos.internal.changefeed.LeaseRenewer; +import com.azure.data.cosmos.internal.changefeed.PartitionProcessor; +import com.azure.data.cosmos.internal.changefeed.PartitionSupervisor; +import com.azure.data.cosmos.internal.changefeed.exceptions.LeaseLostException; +import com.azure.data.cosmos.internal.changefeed.exceptions.ObserverException; +import com.azure.data.cosmos.internal.changefeed.exceptions.PartitionSplitException; +import com.azure.data.cosmos.internal.changefeed.exceptions.TaskCancelledException; +import reactor.core.publisher.Mono; + +import java.io.Closeable; +import java.io.IOException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +/** + * Implementation for {@link PartitionSupervisor}. + */ +class PartitionSupervisorImpl implements PartitionSupervisor, Closeable { + private final Lease lease; + private final ChangeFeedObserver observer; + private final PartitionProcessor processor; + private final LeaseRenewer renewer; + private CancellationTokenSource renewerCancellation; + private CancellationTokenSource processorCancellation; + + private RuntimeException resultException; + + private ExecutorService executorService; + + public PartitionSupervisorImpl(Lease lease, ChangeFeedObserver observer, PartitionProcessor processor, LeaseRenewer renewer, ExecutorService executorService) { + this.lease = lease; + this.observer = observer; + this.processor = processor; + this.renewer = renewer; + this.executorService = executorService; + + if (executorService == null) { + this.executorService = Executors.newFixedThreadPool(3); + } + } + + @Override + public Mono run(CancellationToken shutdownToken) { + PartitionSupervisorImpl self = this; + this.resultException = null; + + ChangeFeedObserverContext context = new ChangeFeedObserverContextImpl(self.lease.getLeaseToken()); + + self.observer.open(context); + + ChangeFeedObserverCloseReason closeReason = ChangeFeedObserverCloseReason.UNKNOWN; + + try { + self.processorCancellation = new CancellationTokenSource(); + + Thread processorThread = new Thread(new Runnable() { + @Override + public void run() { + self.processor.run(self.processorCancellation.getToken()).block(); + } + }); + + self.renewerCancellation = new CancellationTokenSource(); + + Thread renewerThread = new Thread(new Runnable() { + @Override + public void run() { + self.renewer.run(self.renewerCancellation.getToken()).block(); + } + }); + + self.executorService.execute(processorThread); + self.executorService.execute(renewerThread); + + while (!shutdownToken.isCancellationRequested() && self.processor.getResultException() == null && self.renewer.getResultException() == null) { + try { + Thread.sleep(100); + } catch (InterruptedException iex) { + break; + } + } + + this.processorCancellation.cancel(); + this.renewerCancellation.cancel(); + + if (self.processor.getResultException() != null) { + throw self.processor.getResultException(); + } + + if (self.renewer.getResultException() != null) { + throw self.renewer.getResultException(); + } + + closeReason = shutdownToken.isCancellationRequested() ? + ChangeFeedObserverCloseReason.SHUTDOWN : + ChangeFeedObserverCloseReason.UNKNOWN; + + } catch (LeaseLostException llex) { + closeReason = ChangeFeedObserverCloseReason.LEASE_LOST; + self.resultException = llex; + } catch (PartitionSplitException pex) { + closeReason = ChangeFeedObserverCloseReason.LEASE_GONE; + self.resultException = pex; + } catch (TaskCancelledException tcex) { + closeReason = ChangeFeedObserverCloseReason.SHUTDOWN; + self.resultException = null; + } catch (ObserverException oex) { + closeReason = ChangeFeedObserverCloseReason.OBSERVER_ERROR; + self.resultException = oex; + } catch (Exception ex) { + closeReason = ChangeFeedObserverCloseReason.UNKNOWN; + } finally { + self.observer.close(context, closeReason); + } + + if (self.resultException != null) { + return Mono.error(self.resultException); + } else { + return Mono.empty(); + } + } + + @Override + public RuntimeException getResultException() { + return this.resultException; + } + + @Override + public void close() throws IOException { + if (this.processorCancellation != null) { + this.processorCancellation.close(); + } + + this.renewerCancellation.close(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionSynchronizerImpl.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionSynchronizerImpl.java new file mode 100644 index 0000000000000..68c71488da960 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionSynchronizerImpl.java @@ -0,0 +1,174 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.changefeed.ChangeFeedContextClient; +import com.azure.data.cosmos.internal.changefeed.Lease; +import com.azure.data.cosmos.internal.changefeed.LeaseContainer; +import com.azure.data.cosmos.internal.changefeed.LeaseManager; +import com.azure.data.cosmos.internal.changefeed.PartitionSynchronizer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.util.HashSet; +import java.util.Set; + +import static com.azure.data.cosmos.BridgeInternal.extractContainerSelfLink; + +/** + * Implementation for the partition synchronizer. + */ +class PartitionSynchronizerImpl implements PartitionSynchronizer { + private final Logger logger = LoggerFactory.getLogger(PartitionSynchronizerImpl.class); + private final ChangeFeedContextClient documentClient; + private final CosmosContainer collectionSelfLink; + private final LeaseContainer leaseContainer; + private final LeaseManager leaseManager; + private final int degreeOfParallelism; + private final int maxBatchSize; + + public PartitionSynchronizerImpl( + ChangeFeedContextClient documentClient, + CosmosContainer collectionSelfLink, + LeaseContainer leaseContainer, + LeaseManager leaseManager, + int degreeOfParallelism, + int maxBatchSize) + { + this.documentClient = documentClient; + this.collectionSelfLink = collectionSelfLink; + this.leaseContainer = leaseContainer; + this.leaseManager = leaseManager; + this.degreeOfParallelism = degreeOfParallelism; + this.maxBatchSize = maxBatchSize; + } + + @Override + public Mono createMissingLeases() { + PartitionSynchronizerImpl self = this; + + return this.enumPartitionKeyRanges() + .map(partitionKeyRange -> { + // TODO: log the partition key ID found. + return partitionKeyRange.id(); + }) + .collectList() + .flatMap( partitionKeyRangeIds -> { + Set leaseTokens = new HashSet<>(partitionKeyRangeIds); + return self.createLeases(leaseTokens).then(); + }) + .onErrorResume( throwable -> { + // TODO: log the exception. + return Mono.empty(); + }); + } + + @Override + public Flux splitPartition(Lease lease) { + if (lease == null) throw new IllegalArgumentException("lease"); + + PartitionSynchronizerImpl self = this; + String leaseToken = lease.getLeaseToken(); + String lastContinuationToken = lease.getContinuationToken(); + + logger.info(String.format("Partition %s is gone due to split.", leaseToken)); + + // After a split, the children are either all or none available + return this.enumPartitionKeyRanges() + .filter(range -> range != null && range.getParents() != null && range.getParents().contains(leaseToken)) + .map(PartitionKeyRange::id) + .collectList() + .flatMapMany(addedLeaseTokens -> { + if (addedLeaseTokens.size() == 0) { + logger.error(String.format("Partition %s had split but we failed to find at least one child partition", leaseToken)); + throw new RuntimeException(String.format("Partition %s had split but we failed to find at least one child partition", leaseToken)); + } + return Flux.fromIterable(addedLeaseTokens); + }) + .flatMap(addedRangeId -> { + // Creating new lease. + return self.leaseManager.createLeaseIfNotExist(addedRangeId, lastContinuationToken); + }, self.degreeOfParallelism) + .map(newLease -> { + logger.info(String.format("Partition %s split into new partition with lease token %s.", leaseToken, newLease.getLeaseToken())); + return newLease; + }); + } + + private Flux enumPartitionKeyRanges() { + // STRING partitionKeyRangesPath = STRING.format("%spkranges", this.collectionSelfLink); + String partitionKeyRangesPath = extractContainerSelfLink(this.collectionSelfLink); + FeedOptions feedOptions = new FeedOptions(); + feedOptions.maxItemCount(this.maxBatchSize); + feedOptions.requestContinuation(null); + + return this.documentClient.readPartitionKeyRangeFeed(partitionKeyRangesPath, feedOptions) + .map(partitionKeyRangeFeedResponse -> partitionKeyRangeFeedResponse.results()) + .flatMap(partitionKeyRangeList -> Flux.fromIterable(partitionKeyRangeList)) + .onErrorResume(throwable -> { + // TODO: Log the exception. + return Flux.empty(); + }); + } + + /** + * Creates leases if they do not exist. This might happen on initial start or if some lease was unexpectedly lost. + *

+ * Leases are created without the continuation token. It means partitions will be read according to + * 'From Beginning' or 'From current time'. + * Same applies also to split partitions. We do not search for parent lease and take continuation token since this + * might end up of reprocessing all the events since the split. + * + * @param leaseTokens a hash set of all the lease tokens. + * @return a deferred computation of this call. + */ + private Flux createLeases(Set leaseTokens) + { + PartitionSynchronizerImpl self = this; + Set addedLeaseTokens = new HashSet<>(leaseTokens); + + return this.leaseContainer.getAllLeases() + .map(lease -> { + if (lease != null) { + // Get leases after getting ranges, to make sure that no other hosts checked in continuation for + // split partition after we got leases. + addedLeaseTokens.remove(lease.getLeaseToken()); + } + + return lease; + }) + .thenMany(Flux.fromIterable(addedLeaseTokens) + .flatMap( addedRangeId -> + self.leaseManager.createLeaseIfNotExist(addedRangeId, null), self.degreeOfParallelism) + .map( lease -> { + // TODO: log the lease info that was added. + return lease; + }) + ); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionedByIdCollectionRequestOptionsFactory.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionedByIdCollectionRequestOptionsFactory.java new file mode 100644 index 0000000000000..c0fb1fcaf04c6 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/PartitionedByIdCollectionRequestOptionsFactory.java @@ -0,0 +1,50 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.CosmosItemRequestOptions; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.internal.changefeed.Lease; +import com.azure.data.cosmos.internal.changefeed.RequestOptionsFactory; + +/** + * Used to create request options for partitioned lease collections, when partition key is defined as /id. + */ +class PartitionedByIdCollectionRequestOptionsFactory implements RequestOptionsFactory { + @Override + public CosmosItemRequestOptions createRequestOptions(Lease lease) { + CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); + requestOptions.partitionKey(new PartitionKey(lease.getId())); + + return requestOptions; + } + + @Override + public FeedOptions createFeedOptions() { + FeedOptions feedOptions = new FeedOptions(); + feedOptions.enableCrossPartitionQuery(true); + + return feedOptions; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/RemainingPartitionWorkImpl.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/RemainingPartitionWorkImpl.java new file mode 100644 index 0000000000000..c770966f9d8cf --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/RemainingPartitionWorkImpl.java @@ -0,0 +1,57 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.internal.changefeed.RemainingPartitionWork; + +/** + * Implements the {@link RemainingPartitionWork} interface. + */ +class RemainingPartitionWorkImpl implements RemainingPartitionWork { + private final String partitionKeyRangeId; + private final long remainingWork; + + /** + * Initializes a new instance of the {@link RemainingPartitionWork} object. + * + * @param partitionKeyRangeId the partition key range ID for which the remaining work is calculated. + * @param remainingWork the amount of documents remaining to be processed. + */ + public RemainingPartitionWorkImpl(String partitionKeyRangeId, long remainingWork) { + if (partitionKeyRangeId == null || partitionKeyRangeId.isEmpty()) throw new IllegalArgumentException("partitionKeyRangeId"); + + this.partitionKeyRangeId = partitionKeyRangeId; + this.remainingWork = remainingWork; + } + + + @Override + public String getPartitionKeyRangeId() { + return this.partitionKeyRangeId; + } + + @Override + public long getRemainingWork() { + return this.remainingWork; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/RemainingWorkEstimatorImpl.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/RemainingWorkEstimatorImpl.java new file mode 100644 index 0000000000000..c92785af3911f --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/RemainingWorkEstimatorImpl.java @@ -0,0 +1,85 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.internal.changefeed.ChangeFeedContextClient; +import com.azure.data.cosmos.internal.changefeed.LeaseContainer; +import com.azure.data.cosmos.internal.changefeed.RemainingPartitionWork; +import com.azure.data.cosmos.internal.changefeed.RemainingWorkEstimator; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +/** + * Implementation for {@link RemainingWorkEstimator}. + */ +class RemainingWorkEstimatorImpl implements RemainingWorkEstimator { + private final char PKRangeIdSeparator = ':'; + private final char SegmentSeparator = '#'; + private final String LSNPropertyName = "_lsn"; + private final ChangeFeedContextClient feedDocumentClient; + private final LeaseContainer leaseContainer; + private final String collectionSelfLink; + private final int degreeOfParallelism; + + public RemainingWorkEstimatorImpl( + LeaseContainer leaseContainer, + ChangeFeedContextClient feedDocumentClient, + String collectionSelfLink, + int degreeOfParallelism) { + + if (leaseContainer == null) throw new IllegalArgumentException("leaseContainer"); + if (collectionSelfLink == null || collectionSelfLink.isEmpty()) throw new IllegalArgumentException("collectionSelfLink"); + if (feedDocumentClient == null) throw new IllegalArgumentException("feedDocumentClient"); + if (degreeOfParallelism < 1) throw new IllegalArgumentException("degreeOfParallelism - Degree of parallelism is out of range"); + + this.leaseContainer = leaseContainer; + this.collectionSelfLink = collectionSelfLink; + this.feedDocumentClient = feedDocumentClient; + this.degreeOfParallelism = degreeOfParallelism; + } + + @Override + public Mono estimatedRemainingWork() { + return this.estimatedRemainingWorkPerPartition() + .map(RemainingPartitionWork::getRemainingWork) + .collectList() + .map(list -> { + long sum; + if (list.size() == 0) { + sum = 1; + } else { + sum = 0; + for (long value : list) { + sum += value; + } + } + + return sum; + }); + } + + @Override + public Flux estimatedRemainingWorkPerPartition() { + return null; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/StatusCodeErrorType.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/StatusCodeErrorType.java new file mode 100644 index 0000000000000..51a5dfeba3607 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/StatusCodeErrorType.java @@ -0,0 +1,34 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +/** + * Groups types of status code errors returned while processing the change feeds. + */ +enum StatusCodeErrorType { + UNDEFINED, + PARTITION_NOT_FOUND, + PARTITION_SPLIT, + TRANSIENT_ERROR, + MAX_ITEM_COUNT_TOO_LARGE +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/TraceHealthMonitor.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/TraceHealthMonitor.java new file mode 100644 index 0000000000000..8ac8060bb709f --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/TraceHealthMonitor.java @@ -0,0 +1,44 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.internal.changefeed.HealthMonitor; +import com.azure.data.cosmos.internal.changefeed.HealthMonitoringRecord; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +/** + * Implementation for trace health monitor. + */ +class TraceHealthMonitor implements HealthMonitor { + private final Logger logger = LoggerFactory.getLogger(TraceHealthMonitor.class); + @Override + public Mono inspect(HealthMonitoringRecord record) { + return Mono.fromRunnable(() -> { + if (record.getSeverity() == HealthMonitoringRecord.HealthSeverity.ERROR) { + logger.error(String.format("Unhealthiness detected in the operation %s for %s.", record.operation.name(), record.lease.getId()), record.throwable); + } + }); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/WorkerTask.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/WorkerTask.java new file mode 100644 index 0000000000000..be23b47ae7107 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/changefeed/implementation/WorkerTask.java @@ -0,0 +1,62 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.changefeed.implementation; + +import com.azure.data.cosmos.internal.changefeed.Lease; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Worker task that executes in a separate thread. + */ +class WorkerTask extends Thread { + private final Logger logger = LoggerFactory.getLogger(WorkerTask.class); + private boolean done = false; + private Runnable job; + private Lease lease; + + WorkerTask(Lease lease, Runnable job) { + this.lease = lease; + this.job = job; + } + + @Override + public void run() { + try { + job.run(); + logger.info("Partition controller worker task {} has finished running.", lease.getLeaseToken()); + } finally { + logger.info("Partition controller worker task {} has exited.", lease.getLeaseToken()); + job = null; + this.done = true; + } + } + + public Lease lease() { + return this.lease; + } + + public boolean isRunning() { + return !this.done; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/Address.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/Address.java new file mode 100644 index 0000000000000..bdfc3c190d4e8 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/Address.java @@ -0,0 +1,97 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.internal.Constants; + +/** + * Used internally to represent a physical address in the Azure Cosmos DB database service. + */ +public class Address extends Resource { + /** + * Initialize an offer object. + */ + public Address() { + super(); + } + + /** + * Initialize an address object from json string. + * + * @param jsonString the json string that represents the address. + */ + public Address(String jsonString) { + super(jsonString); + } + + public boolean IsPrimary() { + return super.getBoolean(Constants.Properties.IS_PRIMARY); + } + + void setIsPrimary(boolean isPrimary) { + BridgeInternal.setProperty(this, Constants.Properties.IS_PRIMARY, isPrimary); + } + + public String getProtocolScheme() { + return super.getString(Constants.Properties.PROTOCOL); + } + + void setProtocol(String protocol) { + BridgeInternal.setProperty(this, Constants.Properties.PROTOCOL, protocol); + } + + public String getLogicalUri() { + return super.getString(Constants.Properties.LOGICAL_URI); + } + + void setLogicalUri(String logicalUri) { + BridgeInternal.setProperty(this, Constants.Properties.LOGICAL_URI, logicalUri); + } + + public String getPhyicalUri() { + return super.getString(Constants.Properties.PHYISCAL_URI); + } + + void setPhysicalUri(String phyicalUri) { + BridgeInternal.setProperty(this, Constants.Properties.PHYISCAL_URI, phyicalUri); + } + + public String getPartitionIndex() { + return super.getString(Constants.Properties.PARTITION_INDEX); + } + + void setPartitionIndex(String partitionIndex) { + BridgeInternal.setProperty(this, Constants.Properties.PARTITION_INDEX, partitionIndex); + } + + public String getParitionKeyRangeId() { + return super.getString(Constants.Properties.PARTITION_KEY_RANGE_ID); + } + + public void setPartitionKeyRangeId(String partitionKeyRangeId) { + BridgeInternal.setProperty(this, Constants.Properties.PARTITION_KEY_RANGE_ID, partitionKeyRangeId); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/AddressInformation.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/AddressInformation.java new file mode 100644 index 0000000000000..044832816380c --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/AddressInformation.java @@ -0,0 +1,96 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import java.util.Objects; + +/** + * Used internally to encapsulate a physical address information in the Azure Cosmos DB database service. + */ +public class AddressInformation { + private Protocol protocol; + private boolean isPublic; + private boolean isPrimary; + private String physicalUri; + + public AddressInformation(boolean isPublic, boolean isPrimary, String physicalUri, Protocol protocol) { + Objects.requireNonNull(protocol); + this.protocol = protocol; + this.isPublic = isPublic; + this.isPrimary = isPrimary; + this.physicalUri = physicalUri; + } + + public AddressInformation(boolean isPublic, boolean isPrimary, String physicalUri, String protocolScheme) { + this(isPublic, isPrimary, physicalUri, scheme2protocol(protocolScheme)); + } + + public boolean isPublic() { + return isPublic; + } + + public boolean isPrimary() { + return isPrimary; + } + + public String getPhysicalUri() { + return physicalUri; + } + + public Protocol getProtocol() { + return this.protocol; + } + + public String getProtocolName() { + return this.protocol.toString(); + } + + public String getProtocolScheme() { + return this.protocol.scheme(); + } + + @Override + public String toString() { + return "AddressInformation{" + + "protocol='" + protocol + '\'' + + ", isPublic=" + isPublic + + ", isPrimary=" + isPrimary + + ", physicalUri='" + physicalUri + '\'' + + '}'; + } + + private static Protocol scheme2protocol(String scheme) { + + Objects.requireNonNull(scheme, "scheme"); + + switch (scheme.toLowerCase()) { + case "https": + return Protocol.HTTPS; + case "rntbd": + return Protocol.TCP; + default: + throw new IllegalArgumentException(String.format("scheme: %s", scheme)); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/AddressResolver.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/AddressResolver.java new file mode 100644 index 0000000000000..d02517152cded --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/AddressResolver.java @@ -0,0 +1,706 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.BadRequestException; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.InternalServerErrorException; +import com.azure.data.cosmos.InvalidPartitionException; +import com.azure.data.cosmos.NotFoundException; +import com.azure.data.cosmos.PartitionKeyRangeGoneException; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.ICollectionRoutingMapCache; +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.ResourceId; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.Strings; +import com.azure.data.cosmos.internal.caches.RxCollectionCache; +import com.azure.data.cosmos.internal.routing.CollectionRoutingMap; +import com.azure.data.cosmos.internal.routing.PartitionKeyInternal; +import com.azure.data.cosmos.internal.routing.PartitionKeyInternalHelper; +import com.azure.data.cosmos.internal.routing.PartitionKeyRangeIdentity; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.util.concurrent.Callable; +import java.util.function.Function; + +/** + * Abstracts out the logic to resolve physical replica addresses for the given {@link RxDocumentServiceRequest} + *

+ * AddressCache internally maintains CollectionCache, CollectionRoutingMapCache and BackendAddressCache. + * Logic in this class mainly joins these 3 caches and deals with potential staleness of the caches. + */ +public class AddressResolver implements IAddressResolver { + private static Logger logger = LoggerFactory.getLogger(AddressResolver.class); + + private final static PartitionKeyRangeIdentity masterPartitionKeyRangeIdentity = + new PartitionKeyRangeIdentity(PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID); + + private RxCollectionCache collectionCache; + private ICollectionRoutingMapCache collectionRoutingMapCache; + private IAddressCache addressCache; + + public AddressResolver() { + } + + public void initializeCaches( + RxCollectionCache collectionCache, + ICollectionRoutingMapCache collectionRoutingMapCache, + IAddressCache addressCache) { + this.collectionCache = collectionCache; + this.addressCache = addressCache; + this.collectionRoutingMapCache = collectionRoutingMapCache; + } + + public Mono resolveAsync( + RxDocumentServiceRequest request, + boolean forceRefreshPartitionAddresses) { + + Mono resultObs = this.resolveAddressesAndIdentityAsync(request, forceRefreshPartitionAddresses); + + return resultObs.flatMap(result -> { + + try { + this.throwIfTargetChanged(request, result.TargetPartitionKeyRange); + } catch (Exception e) { + return Mono.error(e); + } + + request.requestContext.resolvedPartitionKeyRange = result.TargetPartitionKeyRange; + + return Mono.just(result.Addresses); + }); + } + + private static boolean isSameCollection(PartitionKeyRange initiallyResolved, PartitionKeyRange newlyResolved) { + if (initiallyResolved == null) { + throw new IllegalArgumentException("parent"); + } + + if (newlyResolved == null) { + return false; + } + + if (Strings.areEqual(initiallyResolved.id(), PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID) && + Strings.areEqual(newlyResolved.id(), PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) { + return true; + } + + if (Strings.areEqual(initiallyResolved.id(), PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID) + || Strings.areEqual(newlyResolved.id(), PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) { + String message = + "Request was resolved to master partition and then to server partition."; + assert false : message; + logger.warn(message); + return false; + } + + if (ResourceId.parse(initiallyResolved.resourceId()).getDocumentCollection() + != ResourceId.parse(newlyResolved.resourceId()).getDocumentCollection()) { + return false; + } + + if (!Strings.areEqual(initiallyResolved.id(), newlyResolved.id()) && + !(newlyResolved.getParents() != null && newlyResolved.getParents().contains(initiallyResolved.id()))) { + // the above condition should be always false in current codebase. + // We don't need to refresh any caches if we resolved to a range which is child of previously resolved range. + // Quorum reads should be handled transparently as child partitions share LSNs with parent partitions which are gone. + String message = + "Request is targeted at a partition key range which is not child of previously targeted range."; + assert false : message; + logger.warn(message); + + return false; + } + + return true; + } + + /** + * Validates if the target partition to which the request is being sent has changed during retry. + *

+ * If that happens, the request is no more valid and need to be retried. + * + * @param request Request in progress + * @param targetRange Target partition key range determined by address resolver + * @*/ + private void throwIfTargetChanged(RxDocumentServiceRequest request, PartitionKeyRange targetRange) throws CosmosClientException { + // If new range is child of previous range, we don't need to throw any exceptions + // as LSNs are continued on child ranges. + if (request.requestContext.resolvedPartitionKeyRange != null && + !isSameCollection(request.requestContext.resolvedPartitionKeyRange, targetRange)) { + if (!request.getIsNameBased()) { + String message = String.format( + "Target should not change for non name based requests. Previous target {}, Current {}", + request.requestContext.resolvedPartitionKeyRange, targetRange); + assert false : message; + logger.warn(message); + } + + request.requestContext.resolvedPartitionKeyRange = null; + throw new InvalidPartitionException(RMResources.InvalidTarget, request.getResourceAddress()); + } + } + + private static void ensureRoutingMapPresent( + RxDocumentServiceRequest request, + CollectionRoutingMap routingMap, + DocumentCollection collection) throws CosmosClientException { + if (routingMap == null && request.getIsNameBased() && request.getPartitionKeyRangeIdentity() != null + && request.getPartitionKeyRangeIdentity().getCollectionRid() != null) { + // By design, if partitionkeyrangeid header is present and it contains collectionrid for collection + // which doesn't exist, we return InvalidPartitionException. Backend does the same. + // Caller (client SDK or whoever attached the header) supposedly has outdated collection cache and will refresh it. + // We cannot retry here, as the logic for retry in this case is use-case specific. + logger.debug( + "Routing map for request with partitionkeyrageid {} was not found", + request.getPartitionKeyRangeIdentity().toHeader()); + + InvalidPartitionException invalidPartitionException = new InvalidPartitionException(); + BridgeInternal.setResourceAddress(invalidPartitionException, request.getResourceAddress()); + throw invalidPartitionException; + } + + if (routingMap == null) { + logger.debug( + "Routing map was not found although collection cache is upto date for collection {}", + collection.resourceId()); + // Routing map not found although collection was resolved correctly. + NotFoundException e = new NotFoundException(); + BridgeInternal.setResourceAddress(e, request.getResourceAddress()); + throw e; + } + } + + private Mono tryResolveServerPartitionAsync( + RxDocumentServiceRequest request, + DocumentCollection collection, + CollectionRoutingMap routingMap, + boolean collectionCacheIsUptodate, + boolean collectionRoutingMapCacheIsUptodate, + boolean forceRefreshPartitionAddresses) { + + try { + // Check if this request partitionkeyrange-aware routing logic. We cannot retry here in this case + // and need to bubble up errors. + if (request.getPartitionKeyRangeIdentity() != null) { + return this.tryResolveServerPartitionByPartitionKeyRangeIdAsync( + request, + collection, + routingMap, + collectionCacheIsUptodate, + collectionRoutingMapCacheIsUptodate, + forceRefreshPartitionAddresses); + } + + if (!request.getResourceType().isPartitioned() && + !(request.getResourceType() == ResourceType.StoredProcedure && request.getOperationType() == OperationType.ExecuteJavaScript) && + // Collection head is sent internally for strong consistency given routing hints from original requst, which is for partitioned resource. + !(request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Head)) { + logger.error( + "Shouldn't come here for non partitioned resources. resourceType : {}, operationtype:{}, resourceaddress:{}", + request.getResourceType(), + request.getOperationType(), + request.getResourceAddress()); + return Mono.error(BridgeInternal.setResourceAddress(new InternalServerErrorException(RMResources.InternalServerError), request.getResourceAddress())); + } + + PartitionKeyRange range; + String partitionKeyString = request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY); + + if (partitionKeyString != null) { + range = this.tryResolveServerPartitionByPartitionKey( + request, + partitionKeyString, + collectionCacheIsUptodate, + collection, + routingMap); + } else { + range = this.tryResolveSinglePartitionCollection(request, routingMap, collectionCacheIsUptodate); + } + + if (range == null) { + // Collection cache or routing map cache is potentially outdated. Return empty - + // upper logic will refresh cache and retry. + return Mono.empty(); + } + + Mono addressesObs = this.addressCache.tryGetAddresses( + request, + new PartitionKeyRangeIdentity(collection.resourceId(), range.id()), + forceRefreshPartitionAddresses); + + return addressesObs.flatMap(addresses -> Mono.just(new ResolutionResult(range, addresses))).switchIfEmpty(Mono.defer(() -> { + logger.info( + "Could not resolve addresses for identity {}/{}. Potentially collection cache or routing map cache is outdated. Return empty - upper logic will refresh and retry. ", + new PartitionKeyRangeIdentity(collection.resourceId(), range.id())); + return Mono.empty(); + })); + + } catch (Exception e) { + return Mono.error(e); + } + } + + private PartitionKeyRange tryResolveSinglePartitionCollection( + RxDocumentServiceRequest request, + CollectionRoutingMap routingMap, + boolean collectionCacheIsUptoDate) throws CosmosClientException { + // Neither partitionkey nor partitionkeyrangeid is specified. + // Three options here: + // * This is non-partitioned collection and old client SDK which doesn't send partition key. In + // this case there's single entry in routing map. But can be multiple entries if before that + // existed partitioned collection with same name. + // * This is partitioned collection and old client SDK which doesn't send partition key. + // In this case there can be multiple ranges in routing map. + // * This is partitioned collection and this is custom written REST sdk, which has a bug and doesn't send + // partition key. + // We cannot know for sure whether this is partitioned collection or not, because + // partition key definition cache can be outdated. + // So we route request to the first partition. If this is non-partitioned collection - request will succeed. + // If it is partitioned collection - backend will return bad request as partition key header is required in this case. + if (routingMap.getOrderedPartitionKeyRanges().size() == 1) { + return routingMap.getOrderedPartitionKeyRanges().get(0); + } + + if (collectionCacheIsUptoDate) { + throw BridgeInternal.setResourceAddress(new BadRequestException(RMResources.MissingPartitionKeyValue), request.getResourceAddress()); + } else { + return null; + } + } + + private Mono resolveMasterResourceAddress(RxDocumentServiceRequest request, + boolean forceRefreshPartitionAddresses) { + assert ReplicatedResourceClient.isReadingFromMaster(request.getResourceType(), request.getOperationType()) + && request.getPartitionKeyRangeIdentity() == null; + + // ServiceIdentity serviceIdentity = this.masterServiceIdentity; + Mono addressesObs = this.addressCache.tryGetAddresses(request, + masterPartitionKeyRangeIdentity,forceRefreshPartitionAddresses); + + return addressesObs.flatMap(addresses -> { + PartitionKeyRange partitionKeyRange = new PartitionKeyRange(); + partitionKeyRange.id(PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID); + return Mono.just(new ResolutionResult(partitionKeyRange, addresses)); + }).switchIfEmpty(Mono.defer(() -> { + logger.warn("Could not get addresses for master partition"); + + // return Observable.error() + NotFoundException e = new NotFoundException(); + BridgeInternal.setResourceAddress(e, request.getResourceAddress()); + return Mono.error(e); + })); + } + + private class RefreshState { + + volatile boolean collectionCacheIsUptoDate; + volatile boolean collectionRoutingMapCacheIsUptoDate; + volatile DocumentCollection collection; + volatile CollectionRoutingMap routingMap; + volatile ResolutionResult resolutionResult; + } + + private Mono getOrRefreshRoutingMap(RxDocumentServiceRequest request, boolean forceRefreshPartitionAddresses) { + + RefreshState state = new RefreshState(); + + state.collectionCacheIsUptoDate = !request.getIsNameBased() || + (request.getPartitionKeyRangeIdentity() != null && request.getPartitionKeyRangeIdentity().getCollectionRid() != null); + state.collectionRoutingMapCacheIsUptoDate = false; + + Mono collectionObs = this.collectionCache.resolveCollectionAsync(request); + + Mono stateObs = collectionObs.flatMap(collection -> { + state.collection = collection; + Mono routingMapObs = + this.collectionRoutingMapCache.tryLookupAsync(collection.resourceId(), null, request.forceCollectionRoutingMapRefresh, request.properties); + final DocumentCollection underlyingCollection = collection; + return routingMapObs.flatMap(routingMap -> { + state.routingMap = routingMap; + + if (request.forcePartitionKeyRangeRefresh) { + state.collectionRoutingMapCacheIsUptoDate = true; + request.forcePartitionKeyRangeRefresh = false; + if (routingMap != null) { + return this.collectionRoutingMapCache.tryLookupAsync(underlyingCollection.resourceId(), routingMap, request.properties) + .map(newRoutingMap -> { + state.routingMap = newRoutingMap; + return state; + }); + } + } + + return Mono.just(state); + }).switchIfEmpty(Mono.defer(() -> { + if (request.forcePartitionKeyRangeRefresh) { + state.collectionRoutingMapCacheIsUptoDate = true; + request.forcePartitionKeyRangeRefresh = false; + } + return Mono.just(state); + })); + }); + + return stateObs.flatMap(newState -> { + + if (newState.routingMap == null && !newState.collectionCacheIsUptoDate) { + // Routing map was not found by resolved collection rid. Maybe collection rid is outdated. + // Refresh collection cache and reresolve routing map. + request.forceNameCacheRefresh = true; + newState.collectionCacheIsUptoDate = true; + newState.collectionRoutingMapCacheIsUptoDate = false; + + Mono newCollectionObs = this.collectionCache.resolveCollectionAsync(request); + + return newCollectionObs.flatMap(collection -> { + newState.collection = collection; + Mono newRoutingMapObs = this.collectionRoutingMapCache.tryLookupAsync( + collection.resourceId(), + null, + request.properties); + + return newRoutingMapObs.map(routingMap -> { + newState.routingMap = routingMap; + return newState; + }); + } + ); + + } + + return Mono.just(newState); + }); + } + + private Mono getStateWithNewRoutingMap(RefreshState state, Mono routingMapSingle) { + return routingMapSingle.map(r -> { + state.routingMap = r; + return state; + }).switchIfEmpty(Mono.fromSupplier(() -> { + state.routingMap = null; + return state; + })); + } + + /** + * Resolves the endpoint of the partition for the given request + * + * @param request Request for which the partition endpoint resolution is to be performed + * @param forceRefreshPartitionAddresses Force refresh the partition's endpoint + * @return ResolutionResult + */ + private Mono resolveAddressesAndIdentityAsync( + RxDocumentServiceRequest request, + boolean forceRefreshPartitionAddresses) { + + if (ReplicatedResourceClient.isReadingFromMaster(request.getResourceType(), request.getOperationType()) + && request.getPartitionKeyRangeIdentity() == null) { + return resolveMasterResourceAddress(request, forceRefreshPartitionAddresses); + } + + Mono refreshStateObs = this.getOrRefreshRoutingMap(request, forceRefreshPartitionAddresses); + + return refreshStateObs.flatMap( + state -> { + try { + AddressResolver.ensureRoutingMapPresent(request, state.routingMap, state.collection); + + } catch (Exception e) { + return Mono.error(e); + } + + // At this point we have both collection and routingMap. + Mono resultObs = this.tryResolveServerPartitionAsync( + request, + state.collection, + state.routingMap, + state.collectionCacheIsUptoDate, + state.collectionRoutingMapCacheIsUptoDate, + forceRefreshPartitionAddresses); + + + Function> addCollectionRidIfNameBased = funcResolutionResult -> { + assert funcResolutionResult != null; + if (request.getIsNameBased()) { + // Append collection rid. + // If we resolved collection rid incorrectly because of outdated cache, this can lead + // to incorrect routing decisions. But backend will validate collection rid and throw + // InvalidPartitionException if we reach wrong collection. + // Also this header will be used by backend to inject collection rid into metrics for + // throttled requests. + request.getHeaders().put(WFConstants.BackendHeaders.COLLECTION_RID, state.collection.resourceId()); + } + + return Mono.just(funcResolutionResult); + }; + + return resultObs.flatMap(addCollectionRidIfNameBased).switchIfEmpty(Mono.defer(() -> { + // result is empty + + Function> ensureCollectionRoutingMapCacheIsUptoDateFunc = funcState -> { + if (!funcState.collectionRoutingMapCacheIsUptoDate) { + funcState.collectionRoutingMapCacheIsUptoDate = true; + Mono newRoutingMapObs = this.collectionRoutingMapCache.tryLookupAsync( + funcState.collection.resourceId(), + funcState.routingMap, + request.properties); + + return getStateWithNewRoutingMap(funcState, newRoutingMapObs); + } else { + return Mono.just(state); + } + }; + + Function> resolveServerPartition = funcState -> { + + try { + AddressResolver.ensureRoutingMapPresent(request, funcState.routingMap, funcState.collection); + } catch (Exception e) { + return Mono.error(e); + } + + return this.tryResolveServerPartitionAsync( + request, + funcState.collection, + funcState.routingMap, + true, + true, + forceRefreshPartitionAddresses); + }; + + Function> onNullThrowNotFound = funcResolutionResult -> { + if (funcResolutionResult == null) { + logger.debug("Couldn't route partitionkeyrange-oblivious request after retry/cache refresh. Collection doesn't exist."); + + // At this point collection cache and routing map caches are refreshed. + // The only reason we will get here is if collection doesn't exist. + // Case when partition-key-range doesn't exist is handled in the corresponding method. + + return Mono.error(BridgeInternal.setResourceAddress(new NotFoundException(), request.getResourceAddress())); + } + + return Mono.just(funcResolutionResult); + }; + + // Couldn't resolve server partition or its addresses. + // Either collection cache is outdated or routing map cache is outdated. + if (!state.collectionCacheIsUptoDate) { + request.forceNameCacheRefresh = true; + state.collectionCacheIsUptoDate = true; + + Mono newCollectionObs = this.collectionCache.resolveCollectionAsync(request); + Mono newRefreshStateObs = newCollectionObs.flatMap(collection -> { + state.collection = collection; + + if (collection.resourceId() != state.routingMap.getCollectionUniqueId()) { + // Collection cache was stale. We resolved to new Rid. routing map cache is potentially stale + // for this new collection rid. Mark it as such. + state.collectionRoutingMapCacheIsUptoDate = false; + Mono newRoutingMap = this.collectionRoutingMapCache.tryLookupAsync( + collection.resourceId(), + null, + request.properties); + + return getStateWithNewRoutingMap(state, newRoutingMap); + } + + return Mono.just(state); + }); + + Mono newResultObs = newRefreshStateObs.flatMap(ensureCollectionRoutingMapCacheIsUptoDateFunc) + .flatMap(resolveServerPartition); + + return newResultObs.flatMap(onNullThrowNotFound).flatMap(addCollectionRidIfNameBased); + + } else { + return ensureCollectionRoutingMapCacheIsUptoDateFunc.apply(state) + .flatMap(resolveServerPartition) + .flatMap(onNullThrowNotFound) + .flatMap(addCollectionRidIfNameBased); + } + })); + } + ); + } + + private ResolutionResult handleRangeAddressResolutionFailure( + RxDocumentServiceRequest request, + boolean collectionCacheIsUpToDate, + boolean routingMapCacheIsUpToDate, + CollectionRoutingMap routingMap) throws CosmosClientException { + // Optimization to not refresh routing map unnecessary. As we keep track of parent child relationships, + // we can determine that a range is gone just by looking up in the routing map. + if (collectionCacheIsUpToDate && routingMapCacheIsUpToDate || + collectionCacheIsUpToDate && routingMap.IsGone(request.getPartitionKeyRangeIdentity().getPartitionKeyRangeId())) { + String errorMessage = String.format( + RMResources.PartitionKeyRangeNotFound, + request.getPartitionKeyRangeIdentity().getPartitionKeyRangeId(), + request.getPartitionKeyRangeIdentity().getCollectionRid()); + throw BridgeInternal.setResourceAddress(new PartitionKeyRangeGoneException(errorMessage), request.getResourceAddress()); + } + + return null; + } + + private Mono returnOrError(Callable function) { + try { + return Mono.justOrEmpty(function.call()); + } catch (Exception e) { + return Mono.error(e); + } + } + + private Mono tryResolveServerPartitionByPartitionKeyRangeIdAsync( + RxDocumentServiceRequest request, + DocumentCollection collection, + CollectionRoutingMap routingMap, + boolean collectionCacheIsUpToDate, + boolean routingMapCacheIsUpToDate, + boolean forceRefreshPartitionAddresses) { + + PartitionKeyRange partitionKeyRange = routingMap.getRangeByPartitionKeyRangeId(request.getPartitionKeyRangeIdentity().getPartitionKeyRangeId()); + if (partitionKeyRange == null) { + logger.debug("Cannot resolve range '{}'", request.getPartitionKeyRangeIdentity().toHeader()); + return returnOrError(() -> this.handleRangeAddressResolutionFailure(request, collectionCacheIsUpToDate, routingMapCacheIsUpToDate, routingMap)); + } + + Mono addressesObs = this.addressCache.tryGetAddresses( + request, + new PartitionKeyRangeIdentity(collection.resourceId(), request.getPartitionKeyRangeIdentity().getPartitionKeyRangeId()), + forceRefreshPartitionAddresses); + + return addressesObs.flatMap(addresses -> Mono.just(new ResolutionResult(partitionKeyRange, addresses))).switchIfEmpty(Mono.defer(() -> { + logger.debug("Cannot resolve addresses for range '{}'", request.getPartitionKeyRangeIdentity().toHeader()); + + try { + return Mono.justOrEmpty(this.handleRangeAddressResolutionFailure(request, collectionCacheIsUpToDate, routingMapCacheIsUpToDate, routingMap)); + } catch (CosmosClientException e) { + return Mono.error(e); + } + })); + } + + private PartitionKeyRange tryResolveServerPartitionByPartitionKey( + RxDocumentServiceRequest request, + String partitionKeyString, + boolean collectionCacheUptoDate, + DocumentCollection collection, + CollectionRoutingMap routingMap) throws CosmosClientException { + if (request == null) { + throw new NullPointerException("request"); + } + + if (partitionKeyString == null) { + throw new NullPointerException("partitionKeyString"); + } + + if (collection == null) { + throw new NullPointerException("collection"); + } + + if (routingMap == null) { + throw new NullPointerException("routingMap"); + } + + PartitionKeyInternal partitionKey; + + try { + partitionKey = PartitionKeyInternal.fromJsonString(partitionKeyString); + } catch (Exception ex) { + throw BridgeInternal.setResourceAddress(new BadRequestException( + String.format(RMResources.InvalidPartitionKey, partitionKeyString), + ex), request.getResourceAddress()); + } + + if (partitionKey == null) { + throw new InternalServerErrorException(String.format("partition key is null '%s'", partitionKeyString)); + } + + if (partitionKey.equals(PartitionKeyInternal.Empty) || partitionKey.getComponents().size() == collection.getPartitionKey().paths().size()) { + // Although we can compute effective partition key here, in general case this GATEWAY can have outdated + // partition key definition cached - like if collection with same name but with RANGE partitioning is created. + // In this case server will not pass x-ms-documentdb-collection-rid check and will return back InvalidPartitionException. + // GATEWAY will refresh its cache and retry. + String effectivePartitionKey = PartitionKeyInternalHelper.getEffectivePartitionKeyString(partitionKey, collection.getPartitionKey()); + + // There should be exactly one range which contains a partition key. Always. + return routingMap.getRangeByEffectivePartitionKey(effectivePartitionKey); + } + + if (collectionCacheUptoDate) { + BadRequestException badRequestException = BridgeInternal.setResourceAddress(new BadRequestException(RMResources.PartitionKeyMismatch), request.getResourceAddress()); + badRequestException.responseHeaders().put(WFConstants.BackendHeaders.SUB_STATUS, Integer.toString(HttpConstants.SubStatusCodes.PARTITION_KEY_MISMATCH)); + + throw badRequestException; + } + + // Partition key supplied has different number paths than locally cached partition key definition. + // Three things can happen: + // 1. User supplied wrong partition key. + // 2. Client SDK has outdated partition key definition cache and extracted wrong value from the document. + // 3. GATEWAY's cache is outdated. + // + // What we will do is append x-ms-documentdb-collection-rid header and forward it to random collection partition. + // * If collection rid matches, server will send back 400.1001, because it also will not be able to compute + // effective partition key. GATEWAY will forward this status code to client - client will handle it. + // * If collection rid doesn't match, server will send back InvalidPartiitonException and GATEWAY will + // refresh name routing cache - this will refresh partition key definition as well, and retry. + + logger.debug( + "Cannot compute effective partition key. Definition has '{}' paths, values supplied has '{}' paths. Will refresh cache and retry.", + collection.getPartitionKey().paths().size(), + partitionKey.getComponents().size()); + + return null; + } + + private class ResolutionResult { + final PartitionKeyRange TargetPartitionKeyRange; + final AddressInformation[] Addresses; + + ResolutionResult( + PartitionKeyRange targetPartitionKeyRange, + AddressInformation[] addresses) { + if (targetPartitionKeyRange == null) { + throw new NullPointerException("targetPartitionKeyRange"); + } + + if (addresses == null) { + throw new NullPointerException("addresses"); + } + + this.TargetPartitionKeyRange = targetPartitionKeyRange; + this.Addresses = addresses; + } + } +} + diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/AddressSelector.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/AddressSelector.java new file mode 100644 index 0000000000000..05c72415d6671 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/AddressSelector.java @@ -0,0 +1,105 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.GoneException; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.Strings; +import reactor.core.publisher.Mono; + +import java.net.URI; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +public class AddressSelector { + private final IAddressResolver addressResolver; + private final Protocol protocol; + + public AddressSelector(IAddressResolver addressResolver, Protocol protocol) { + this.addressResolver = addressResolver; + this.protocol = protocol; + } + + public Mono> resolveAllUriAsync( + RxDocumentServiceRequest request, + boolean includePrimary, + boolean forceRefresh) { + Mono> allReplicaAddressesObs = this.resolveAddressesAsync(request, forceRefresh); + return allReplicaAddressesObs.map(allReplicaAddresses -> allReplicaAddresses.stream().filter(a -> includePrimary || !a.isPrimary()) + .map(a -> HttpUtils.toURI(a.getPhysicalUri())).collect(Collectors.toList())); + } + + public Mono resolvePrimaryUriAsync(RxDocumentServiceRequest request, boolean forceAddressRefresh) { + Mono> replicaAddressesObs = this.resolveAddressesAsync(request, forceAddressRefresh); + return replicaAddressesObs.flatMap(replicaAddresses -> { + try { + return Mono.just(AddressSelector.getPrimaryUri(request, replicaAddresses)); + } catch (Exception e) { + return Mono.error(e); + } + }); + } + + public static URI getPrimaryUri(RxDocumentServiceRequest request, List replicaAddresses) throws GoneException { + AddressInformation primaryAddress = null; + + if (request.getDefaultReplicaIndex() != null) { + int defaultReplicaIndex = request.getDefaultReplicaIndex(); + if (defaultReplicaIndex >= 0 && defaultReplicaIndex < replicaAddresses.size()) { + primaryAddress = replicaAddresses.get(defaultReplicaIndex); + } + } else { + primaryAddress = replicaAddresses.stream().filter(address -> address.isPrimary() && !address.getPhysicalUri().contains("[")) + .findAny().orElse(null); + } + + if (primaryAddress == null) { + // Primary endpoint (of the desired protocol) was not found. + throw new GoneException(String.format("The requested resource is no longer available at the server. Returned addresses are {%s}", + replicaAddresses.stream().map(AddressInformation::getPhysicalUri).collect(Collectors.joining(","))), null); + } + + return HttpUtils.toURI(primaryAddress.getPhysicalUri()); + } + + public Mono> resolveAddressesAsync(RxDocumentServiceRequest request, boolean forceAddressRefresh) { + Mono> resolvedAddressesObs = + (this.addressResolver.resolveAsync(request, forceAddressRefresh)) + .map(addresses -> Arrays.stream(addresses) + .filter(address -> !Strings.isNullOrEmpty(address.getPhysicalUri()) && Strings.areEqualIgnoreCase(address.getProtocolScheme(), this.protocol.scheme())) + .collect(Collectors.toList())); + + return resolvedAddressesObs.map( + resolvedAddresses -> { + List r = resolvedAddresses.stream().filter(address -> !address.isPublic()).collect(Collectors.toList()); + if (r.size() > 0) { + return r; + } else { + return resolvedAddresses.stream().filter(AddressInformation::isPublic).collect(Collectors.toList()); + } + } + ); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/BarrierRequestHelper.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/BarrierRequestHelper.java new file mode 100644 index 0000000000000..2eca282760cfd --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/BarrierRequestHelper.java @@ -0,0 +1,168 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.InternalServerErrorException; +import com.azure.data.cosmos.internal.AuthorizationTokenType; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.IAuthorizationTokenProvider; +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.internal.PathsHelper; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.ResourceId; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.Strings; +import com.azure.data.cosmos.internal.Utils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.Exceptions; +import reactor.core.publisher.Mono; + +import java.util.Map; + +public class BarrierRequestHelper { + private final static Logger logger = LoggerFactory.getLogger(BarrierRequestHelper.class); + + public static Mono createAsync( + RxDocumentServiceRequest request, + IAuthorizationTokenProvider authorizationTokenProvider, + Long targetLsn, + Long targetGlobalCommittedLsn) { + + boolean isCollectionHeadRequest = BarrierRequestHelper.isCollectionHeadBarrierRequest( + request.getResourceType(), + request.getOperationType()); + + AuthorizationTokenType originalRequestTokenType = request.authorizationTokenType; + + if (originalRequestTokenType == AuthorizationTokenType.Invalid) { + String message = "AuthorizationTokenType not set for the read request"; + assert false : message; + logger.error(message); + } + + String authorizationToken = Strings.Emtpy; + RxDocumentServiceRequest barrierLsnRequest = null; + if (!isCollectionHeadRequest) { + // DB Feed + barrierLsnRequest = RxDocumentServiceRequest.create( + OperationType.HeadFeed, + (String) null, + (ResourceType) ResourceType.Database, + (Map) null); + } else if (request.getIsNameBased()) { + // Name based server request + + // get the collection full name + // dbs/{id}/colls/{collid}/ + String collectionLink = PathsHelper.getCollectionPath(request.getResourceAddress()); + barrierLsnRequest = RxDocumentServiceRequest.createFromName( + OperationType.Head, + collectionLink, + ResourceType.DocumentCollection); + } else { + // RID based Server request + barrierLsnRequest = RxDocumentServiceRequest.create( + OperationType.Head, + ResourceId.parse(request.getResourceId()).getDocumentCollectionId().toString(), + ResourceType.DocumentCollection, null); + } + + barrierLsnRequest.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); + + if (targetLsn != null && targetLsn > 0) { + barrierLsnRequest.getHeaders().put(HttpConstants.HttpHeaders.TARGET_LSN, targetLsn.toString()); + } + + if (targetGlobalCommittedLsn != null && targetGlobalCommittedLsn > 0) { + barrierLsnRequest.getHeaders().put(HttpConstants.HttpHeaders.TARGET_GLOBAL_COMMITTED_LSN, targetGlobalCommittedLsn.toString()); + } + + switch (originalRequestTokenType) { + case PrimaryMasterKey: + case PrimaryReadonlyMasterKey: + case SecondaryMasterKey: + case SecondaryReadonlyMasterKey: + authorizationToken = authorizationTokenProvider.getUserAuthorizationToken( + barrierLsnRequest.getResourceAddress(), + isCollectionHeadRequest ? ResourceType.DocumentCollection : ResourceType.Database, + HttpConstants.HttpMethods.HEAD, + barrierLsnRequest.getHeaders(), + originalRequestTokenType, + request.properties); + break; + + + case ResourceToken: + authorizationToken = request.getHeaders().get(HttpConstants.HttpHeaders.AUTHORIZATION); + break; + + default: + String unknownAuthToken = "Unknown authorization token kind for read request"; + assert false : unknownAuthToken; + logger.error(unknownAuthToken); + throw Exceptions.propagate(new InternalServerErrorException(RMResources.InternalServerError)); + } + + barrierLsnRequest.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorizationToken); + barrierLsnRequest.requestContext = request.requestContext.clone(); + + if (request.getPartitionKeyRangeIdentity() != null) { + barrierLsnRequest.routeTo(request.getPartitionKeyRangeIdentity()); + } + if (request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) != null) { + barrierLsnRequest.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY)); + } + if (request.getHeaders().get(WFConstants.BackendHeaders.COLLECTION_RID) != null) { + barrierLsnRequest.getHeaders().put(WFConstants.BackendHeaders.COLLECTION_RID, request.getHeaders().get(WFConstants.BackendHeaders.COLLECTION_RID)); + } + + return Mono.just(barrierLsnRequest); + } + + static boolean isCollectionHeadBarrierRequest(ResourceType resourceType, OperationType operationType) { + switch (resourceType) { + case Attachment: + case Document: + case Conflict: + case StoredProcedure: + case UserDefinedFunction: + case Trigger: + return true; + case DocumentCollection: + if (operationType != OperationType.ReadFeed && operationType != OperationType.Query && operationType != OperationType.SqlQuery) { + return true; + } else { + return false; + } + case PartitionKeyRange: + // no logic for OperationType.GetSplitPoint and OperationType.AbortSplit + // as they are not applicable to SDK + return false; + default: + return false; + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ConsistencyReader.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ConsistencyReader.java new file mode 100644 index 0000000000000..3ddf572d09643 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ConsistencyReader.java @@ -0,0 +1,426 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.GoneException; +import com.azure.data.cosmos.internal.ISessionContainer; +import com.azure.data.cosmos.NotFoundException; +import com.azure.data.cosmos.RequestTimeoutException; +import com.azure.data.cosmos.internal.Configs; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.IAuthorizationTokenProvider; +import com.azure.data.cosmos.internal.ISessionToken; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.RequestChargeTracker; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.util.HashMap; +import java.util.List; + +import static com.azure.data.cosmos.internal.Utils.ValueHolder; + +/* + ConsistencyLevel Replication Mode Desired ReadMode + ------------------- -------------------- --------------------------------------------------------------------------- + STRONG Synchronous READ from READ Quorum + Asynchronous Not supported + + Bounded Staleness Synchronous READ from READ Quorum + Asynchronous READ from READ Quorum. Performing read barrier on Primary is unsupported. + + SESSION Sync/Async READ Any (With LSN Cookie) + DEFAULT to Primary as last resort (which should succeed always) + + EVENTUAL Sync/Async READ Any + + Client does validation of unsupported combinations. + + + Preliminaries + ============= + 1. We do primary copy/single master replication. + 2. We do sync or async replication depending on the value of DefaultConsistencyLevel on a database account. + If the database account is configured with DefaultConsistencyLevel = STRONG, we do sync replication. By default, for all other values of DefaultConsistencyLevel, we do asynchronous replication. + + Replica set + =========== + We define N as the current number of replicas protecting a partition. + At any given point, the value of N can fluctuate between NMax and NMin. + NMax is called the target replica set size and NMin is called the minimum write availability set size. + NMin and NMax are statically defined whereas N is dynamic. + Dynamic replica set is great for dealing with successive failures. + Since N fluctuates between NMax and NMin, the value of N at the time of calculation of W may not be the same when R is calculated. + This is a side effect of dynamic quorum and requires careful consideration. + + NMin = 2, NMax >= 3 + + Simultaneous Failures + ===================== + In general N replicas imply 2f+1 simultaneous failures + N = 5 allows for 2 simultaneous failures + N = 4 allows for 1 failure + N = 3 allows for 1 failure + N < 3 allows for 0 failures + + Quorums + ======= + W = Write Quorum = NUMBER of replicas which acknowledge a write before the primary can ack the client. It is majority set i.e. N/2 + 1 + R = READ Quorum = Set of replicas such that there is non-empty intersection between W and R that constitute N i.e. R = N -W + 1 + + For sync replication, W is used as a majority quorum. + For async replication, W = 1. We have two LSNs, one is quorum acknowledged LSN (LSN-Q) and another is what is visible to the client (LSN-C). + LSN-Q is the stable LSN which corresponds to the write quorum of Windows Fabric. LSN-C is unstable and corresponds to W=1. + + Assumptions + =========== + Nmin <= N <= Nmax + W >= N/2 + 1 + R = N -W + 1 + + N from read standpoint means number of address from BE which is returning successful response. + Successful reponse: Any BE response containing LSN response header is considered successful reponse. Typically every response other than 410 is treated as succesful response. + + STRONG Consistency + ================== + STRONG READ requires following guarantees. + * READ value is the latest that has been written. If a write operation finished. Any subsequent reads should see that value. + * Monotonic guarantee. Any read that starts after a previous read operation, should see atleast return equal or higher version of the value. + + To perform strong read we require that atleast R i.e. READ Quorum number of replicas have the value committed. To acheve that such read : + * READ R replicas. If they have the same LSN, use the read result + * If they don't have the same LSN, we will either return the result with the highest LSN observed from those R replicas, after ensuring that LSN + becomes available with R replicas. + * Secondary replicas are always preferred for reading. If R secondaries have returned the result but cannot agree on the resulting LSN, we can include Primary to satisfy read quorum. + * If we only have R replicas (i.e. N==R), we include primary in reading the result and validate N==R. + + Bounded Staleness + ================= + Sync Replication: + Bounded staleness uses the same logic as STRONG for cases where the server is using sync replication. + + Async Replication: + For async replication, we make sure that we do not use the Primary as barrier for read quorum. This is because Primary is always going to run ahead (async replication uses W=1 on Primary). + Using primary would voilate the monotonic read guarantees when we fall back to reading from secondary in the subsequent reads as they are always running slower as compared to Primary. + + SESSION + ======= + We read from secondaries one by one until we find a match for the client's session token (LSN-C). + We go to primary as a last resort which should satisfy LSN-C. + + Availability for Bounded Staleness (for NMax = 4 and NMin = 2): + When there is a partition, the minority quorum can remain available for read as long as N >= 1 + When there is a partition, the minority quorum can remain available for writes as long as N >= 2 + + EVENTUAL + ======== + We can read from any replicas. + + Availability for Bounded Staleness (for NMax = 4 and NMin = 2): + When there is a partition, the minority quorum can remain available for read as long as N >= 1 + When there is a partition, the minority quorum can remain available for writes as long as N >= 2 + + READ Retry logic + ----------------- + For Any NonQuorum Reads(A.K.A ReadAny); AddressCache is refreshed for following condition. + 1) No Secondary Address is found in Address Cache. + 2) Chosen Secondary Returned GoneException/EndpointNotFoundException. + + For Quorum READ address cache is refreshed on following condition. + 1) We found only R secondary where R < RMAX. + 2) We got GoneException/EndpointNotFoundException on all the secondary we contacted. + + */ +/** + * ConsistencyReader has a dependency on both StoreReader and QuorumReader. For Bounded Staleness and STRONG Consistency, it uses the Quorum Reader + * to converge on a read from read quorum number of replicas. + * For SESSION and EVENTUAL Consistency, it directly uses the store reader. + */ +public class ConsistencyReader { + private final static int MAX_NUMBER_OF_SECONDARY_READ_RETRIES = 3; + private final static Logger logger = LoggerFactory.getLogger(ConsistencyReader.class); + + private final AddressSelector addressSelector; + private final GatewayServiceConfigurationReader serviceConfigReader; + private final IAuthorizationTokenProvider authorizationTokenProvider; + private final StoreReader storeReader; + private final QuorumReader quorumReader; + private final Configs configs; + + public ConsistencyReader( + Configs configs, + AddressSelector addressSelector, + ISessionContainer sessionContainer, + TransportClient transportClient, + GatewayServiceConfigurationReader serviceConfigReader, + IAuthorizationTokenProvider authorizationTokenProvider) { + this.configs = configs; + this.addressSelector = addressSelector; + this.serviceConfigReader = serviceConfigReader; + this.authorizationTokenProvider = authorizationTokenProvider; + this.storeReader = createStoreReader(transportClient, addressSelector, sessionContainer); + this.quorumReader = createQuorumReader(transportClient, addressSelector, this.storeReader, serviceConfigReader, authorizationTokenProvider); + } + + public Mono readAsync(RxDocumentServiceRequest entity, + TimeoutHelper timeout, + boolean isInRetry, + boolean forceRefresh) { + if (!isInRetry) { + if (timeout.isElapsed()) { + return Mono.error(new RequestTimeoutException()); + } + + } else { + if (timeout.isElapsed()) { + return Mono.error(new GoneException()); + } + } + + entity.requestContext.timeoutHelper = timeout; + + if (entity.requestContext.requestChargeTracker == null) { + entity.requestContext.requestChargeTracker = new RequestChargeTracker(); + } + + if(entity.requestContext.cosmosResponseDiagnostics == null) { + entity.requestContext.cosmosResponseDiagnostics = BridgeInternal.createCosmosResponseDiagnostics(); + } + + entity.requestContext.forceRefreshAddressCache = forceRefresh; + + ValueHolder targetConsistencyLevel = ValueHolder.initialize(null); + ValueHolder useSessionToken = ValueHolder.initialize(null); + ReadMode desiredReadMode; + try { + desiredReadMode = this.deduceReadMode(entity, targetConsistencyLevel, useSessionToken); + } catch (CosmosClientException e) { + return Mono.error(e); + } + int maxReplicaCount = this.getMaxReplicaSetSize(entity); + int readQuorumValue = maxReplicaCount - (maxReplicaCount / 2); + + switch (desiredReadMode) { + case Primary: + return this.readPrimaryAsync(entity, useSessionToken.v); + + case Strong: + entity.requestContext.performLocalRefreshOnGoneException = true; + return this.quorumReader.readStrongAsync(entity, readQuorumValue, desiredReadMode); + + case BoundedStaleness: + entity.requestContext.performLocalRefreshOnGoneException = true; + + // for bounded staleness, we are defaulting to read strong for local region reads. + // this can be done since we are always running with majority quorum w = 3 (or 2 during quorum downshift). + // This means that the primary will always be part of the write quorum, and + // therefore can be included for barrier reads. + + // NOTE: this assumes that we are running with SYNC replication (i.e. majority quorum). + // When we run on a minority write quorum(w=2), to ensure monotonic read guarantees + // we always contact two secondary replicas and exclude primary. + // However, this model significantly reduces availability and available throughput for serving reads for bounded staleness during reconfiguration. + // Therefore, to ensure monotonic read guarantee from any replica set we will just use regular quorum read(R=2) since our write quorum is always majority(W=3) + return this.quorumReader.readStrongAsync(entity, readQuorumValue, desiredReadMode); + + case Any: + if (targetConsistencyLevel.v == ConsistencyLevel.SESSION) { + return this.readSessionAsync(entity, desiredReadMode); + } else { + return this.readAnyAsync(entity, desiredReadMode); + } + + default: + throw new IllegalStateException("invalid operation " + desiredReadMode); + } + } + + private Mono readPrimaryAsync(RxDocumentServiceRequest entity, + boolean useSessionToken) { + + Mono responseObs = this.storeReader.readPrimaryAsync( + entity, + false /*required valid LSN*/, + useSessionToken); + return responseObs.flatMap(response -> { + try { + return Mono.just(response.toResponse()); + } catch (CosmosClientException e) { + return Mono.error(e); + } + }); + } + + private Mono readAnyAsync(RxDocumentServiceRequest entity, + ReadMode readMode) { + Mono> responsesObs = this.storeReader.readMultipleReplicaAsync( + entity, + /* includePrimary */ true, + /* replicaCountToRead */ 1, + /* requiresValidLSN*/ false, + /* useSessionToken */ false, + /* readMode */ readMode); + + return responsesObs.flatMap( + responses -> { + if (responses.size() == 0) { + return Mono.error(new GoneException(RMResources.Gone)); + } + + try { + return Mono.just(responses.get(0).toResponse()); + } catch (CosmosClientException e) { + return Mono.error(e); + } + } + ); + } + + private Mono readSessionAsync(RxDocumentServiceRequest entity, + ReadMode readMode) { + + if (entity.requestContext.timeoutHelper.isElapsed()) { + return Mono.error(new GoneException()); + } + + Mono> responsesObs = this.storeReader.readMultipleReplicaAsync( + entity, + /* includePrimary */ true, + /* replicaCountToRead */ 1, + /* requiresValidLSN */ true, + /* useSessionToken */ true, + /* readMode */ readMode, + /* checkMinLsn */ true, + /* forceReadAll */ false); + + return responsesObs.flatMap(responses -> { + + if (responses.size() > 0) { + try { + return Mono.just(responses.get(0).toResponse(entity.requestContext.requestChargeTracker)); + } catch (NotFoundException notFoundException) { + try { + if (entity.requestContext.sessionToken != null + && responses.get(0).sessionToken != null + && !entity.requestContext.sessionToken.isValid(responses.get(0).sessionToken)) { + logger.warn("Convert to session read exception, request {} SESSION Lsn {}, responseLSN {}", entity.getResourceAddress(), entity.requestContext.sessionToken.convertToString(), responses.get(0).lsn); + notFoundException.responseHeaders().put(WFConstants.BackendHeaders.SUB_STATUS, Integer.toString(HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)); + } + return Mono.error(notFoundException); + } catch (CosmosClientException e) { + return Mono.error(e); + } + } catch (CosmosClientException dce) { + return Mono.error(dce); + } + + } + + // else + HashMap responseHeaders = new HashMap<>(); + responseHeaders.put(WFConstants.BackendHeaders.SUB_STATUS, Integer.toString(HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)); + ISessionToken requestSessionToken = entity.requestContext.sessionToken; + logger.warn("Fail the session read {}, request session token {}", entity.getResourceAddress(), requestSessionToken == null ? "" : requestSessionToken.convertToString()); + return Mono.error(new NotFoundException(RMResources.ReadSessionNotAvailable, responseHeaders, null)); + }); + } + + ReadMode deduceReadMode(RxDocumentServiceRequest request, + ValueHolder targetConsistencyLevel, + ValueHolder useSessionToken) throws CosmosClientException { + targetConsistencyLevel.v = RequestHelper.GetConsistencyLevelToUse(this.serviceConfigReader, request); + useSessionToken.v = (targetConsistencyLevel.v == ConsistencyLevel.SESSION); + + if (request.getDefaultReplicaIndex() != null) { + // Don't use session token - this is used by internal scenarios which technically don't intend session read when they target + // request to specific replica. + useSessionToken.v = false; + return ReadMode.Primary; //Let the addressResolver decides which replica to connect to. + } + + switch (targetConsistencyLevel.v) { + case EVENTUAL: + return ReadMode.Any; + + case CONSISTENT_PREFIX: + return ReadMode.Any; + + case SESSION: + return ReadMode.Any; + + case BOUNDED_STALENESS: + return ReadMode.BoundedStaleness; + + case STRONG: + return ReadMode.Strong; + + default: + throw new IllegalStateException("INVALID Consistency Level " + targetConsistencyLevel.v); + } + } + + public int getMaxReplicaSetSize(RxDocumentServiceRequest entity) { + boolean isMasterResource = ReplicatedResourceClient.isReadingFromMaster(entity.getResourceType(), entity.getOperationType()); + if (isMasterResource) { + return this.serviceConfigReader.getSystemReplicationPolicy().getMaxReplicaSetSize(); + } else { + return this.serviceConfigReader.getUserReplicationPolicy().getMaxReplicaSetSize(); + } + } + + public int getMinReplicaSetSize(RxDocumentServiceRequest entity) { + boolean isMasterResource = ReplicatedResourceClient.isReadingFromMaster(entity.getResourceType(), entity.getOperationType()); + if (isMasterResource) { + return this.serviceConfigReader.getSystemReplicationPolicy().getMinReplicaSetSize(); + } else { + return this.serviceConfigReader.getUserReplicationPolicy().getMinReplicaSetSize(); + } + } + + public StoreReader createStoreReader(TransportClient transportClient, + AddressSelector addressSelector, + ISessionContainer sessionContainer) { + return new StoreReader(transportClient, + addressSelector, + sessionContainer); + } + + public QuorumReader createQuorumReader(TransportClient transportClient, + AddressSelector addressSelector, + StoreReader storeReader, + GatewayServiceConfigurationReader serviceConfigurationReader, + IAuthorizationTokenProvider authorizationTokenProvider) { + return new QuorumReader(transportClient, + addressSelector, + storeReader, + serviceConfigurationReader, + authorizationTokenProvider, + configs); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ConsistencyWriter.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ConsistencyWriter.java new file mode 100644 index 0000000000000..2202c5c5ccfff --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ConsistencyWriter.java @@ -0,0 +1,394 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.GoneException; +import com.azure.data.cosmos.internal.ISessionContainer; +import com.azure.data.cosmos.RequestTimeoutException; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.IAuthorizationTokenProvider; +import com.azure.data.cosmos.internal.Integers; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.RequestChargeTracker; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.SessionTokenHelper; +import com.azure.data.cosmos.internal.Strings; +import com.azure.data.cosmos.internal.Utils; +import org.apache.commons.collections4.ComparatorUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Schedulers; + +import java.net.URI; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +/* + * ConsistencyWriter has two modes for writing - local quorum-acked write and globally strong write. + * + * The determination of whether a request is a local quorum-acked write or a globally strong write is through several factors: + * 1. Request.RequestContext.OriginalRequestConsistencyLevel - ensure that original request's consistency level, if set, is strong. + * 2. DEFAULT consistency level of the accoutn should be strong. + * 3. NUMBER of read regions returned by write response > 0. + * + * For quorum-acked write: + * We send single request to primary of a single partition, which will take care of replicating to its secondaries. Once write quorum number of replicas commits the write, the write request returns to the user with success. There is no additional handling for this case. + * + * For globally strong write: + * Similarly, we send single request to primary of write region, which will take care of replicating to its secondaries, one of which is XPPrimary. XPPrimary will then replicate to all remote regions, which will all ack from within their region. In the write region, the request returns from the backend once write quorum number of replicas commits the write - but at this time, the response cannot be returned to caller, since linearizability guarantees will be violated. ConsistencyWriter will continuously issue barrier head requests against the partition in question, until GlobalCommittedLsn is at least as big as the lsn of the original response. + * 1. Issue write request to write region + * 2. Receive response from primary of write region, look at GlobalCommittedLsn and LSN headers. + * 3. If GlobalCommittedLSN == LSN, return response to caller + * 4. If GlobalCommittedLSN < LSN, cache LSN in request as SelectedGlobalCommittedLSN, and issue barrier requests against any/all replicas. + * 5. Each barrier response will contain its own LSN and GlobalCommittedLSN, check for any response that satisfies GlobalCommittedLSN >= SelectedGlobalCommittedLSN + * 6. Return to caller on success. + */ +public class ConsistencyWriter { + private final static int MAX_NUMBER_OF_WRITE_BARRIER_READ_RETRIES = 30; + private final static int DELAY_BETWEEN_WRITE_BARRIER_CALLS_IN_MS = 30; + private final static int MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION = 4; + private final static int SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 10; + + private final Logger logger = LoggerFactory.getLogger(ConsistencyWriter.class); + private final TransportClient transportClient; + private final AddressSelector addressSelector; + private final ISessionContainer sessionContainer; + private final IAuthorizationTokenProvider authorizationTokenProvider; + private final boolean useMultipleWriteLocations; + private final GatewayServiceConfigurationReader serviceConfigReader; + private final StoreReader storeReader; + + public ConsistencyWriter( + AddressSelector addressSelector, + ISessionContainer sessionContainer, + TransportClient transportClient, + IAuthorizationTokenProvider authorizationTokenProvider, + GatewayServiceConfigurationReader serviceConfigReader, + boolean useMultipleWriteLocations) { + this.transportClient = transportClient; + this.addressSelector = addressSelector; + this.sessionContainer = sessionContainer; + this.authorizationTokenProvider = authorizationTokenProvider; + this.useMultipleWriteLocations = useMultipleWriteLocations; + this.serviceConfigReader = serviceConfigReader; + this.storeReader = new StoreReader(transportClient, addressSelector, null /*we need store reader only for global strong, no session is needed*/); + } + + public Mono writeAsync( + RxDocumentServiceRequest entity, + TimeoutHelper timeout, + boolean forceRefresh) { + + if (timeout.isElapsed()) { + return Mono.error(new RequestTimeoutException()); + } + + String sessionToken = entity.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); + + return this.writePrivateAsync(entity, timeout, forceRefresh).doOnEach( + arg -> { + try { + SessionTokenHelper.setOriginalSessionToken(entity, sessionToken); + } catch (Throwable throwable) { + logger.error("Unexpected failure in handling orig [{}]: new [{}]", arg, throwable.getMessage(), throwable); + } + } + ); + } + + Mono writePrivateAsync( + RxDocumentServiceRequest request, + TimeoutHelper timeout, + boolean forceRefresh) { + if (timeout.isElapsed()) { + return Mono.error(new RequestTimeoutException()); + } + + request.requestContext.timeoutHelper = timeout; + + if (request.requestContext.requestChargeTracker == null) { + request.requestContext.requestChargeTracker = new RequestChargeTracker(); + } + + if (request.requestContext.cosmosResponseDiagnostics == null) { + request.requestContext.cosmosResponseDiagnostics = BridgeInternal.createCosmosResponseDiagnostics(); + } + + request.requestContext.forceRefreshAddressCache = forceRefresh; + + if (request.requestContext.globalStrongWriteResponse == null) { + + Mono> replicaAddressesObs = this.addressSelector.resolveAddressesAsync(request, forceRefresh); + AtomicReference primaryURI = new AtomicReference<>(); + + return replicaAddressesObs.flatMap(replicaAddresses -> { + try { + List contactedReplicas = new ArrayList<>(); + replicaAddresses.forEach(replicaAddress -> contactedReplicas.add(HttpUtils.toURI(replicaAddress.getPhysicalUri()))); + BridgeInternal.setContactedReplicas(request.requestContext.cosmosResponseDiagnostics, contactedReplicas); + return Mono.just(AddressSelector.getPrimaryUri(request, replicaAddresses)); + } catch (GoneException e) { + // RxJava1 doesn't allow throwing checked exception from Observable operators + return Mono.error(e); + } + }).flatMap(primaryUri -> { + try { + primaryURI.set(primaryUri); + if (this.useMultipleWriteLocations && + RequestHelper.GetConsistencyLevelToUse(this.serviceConfigReader, request) == ConsistencyLevel.SESSION) { + // Set session token to ensure session consistency for write requests + // when writes can be issued to multiple locations + SessionTokenHelper.setPartitionLocalSessionToken(request, this.sessionContainer); + } else { + // When writes can only go to single location, there is no reason + // to session session token to the server. + SessionTokenHelper.validateAndRemoveSessionToken(request); + } + + } catch (Exception e) { + return Mono.error(e); + } + + return this.transportClient.invokeResourceOperationAsync(primaryUri, request) + .doOnError( + t -> { + try { + CosmosClientException ex = Utils.as(t, CosmosClientException.class); + try { + BridgeInternal.recordResponse(request.requestContext.cosmosResponseDiagnostics, request, + storeReader.createStoreResult(null, ex, false, false, primaryUri)); + } catch (CosmosClientException e) { + logger.error("Error occurred while recording response", e); + } + String value = ex.responseHeaders().get(HttpConstants.HttpHeaders.WRITE_REQUEST_TRIGGER_ADDRESS_REFRESH); + if (!Strings.isNullOrWhiteSpace(value)) { + Integer result = Integers.tryParse(value); + if (result != null && result == 1) { + startBackgroundAddressRefresh(request); + } + } + } catch (Throwable throwable) { + logger.error("Unexpected failure in handling orig [{}]", t.getMessage(), t); + logger.error("Unexpected failure in handling orig [{}] : new [{}]", t.getMessage(), throwable.getMessage(), throwable); + } + } + ); + + }).flatMap(response -> { + try { + BridgeInternal.recordResponse(request.requestContext.cosmosResponseDiagnostics, request, + storeReader.createStoreResult(response, null, false, false, primaryURI.get())); + } catch (CosmosClientException e) { + logger.error("Error occurred while recording response", e); + } + return barrierForGlobalStrong(request, response); + }); + } else { + + Mono barrierRequestObs = BarrierRequestHelper.createAsync(request, this.authorizationTokenProvider, null, request.requestContext.globalCommittedSelectedLSN); + return barrierRequestObs.flatMap(barrierRequest -> waitForWriteBarrierAsync(barrierRequest, request.requestContext.globalCommittedSelectedLSN) + .flatMap(v -> { + + if (!v) { + logger.warn("ConsistencyWriter: Write barrier has not been met for global strong request. SelectedGlobalCommittedLsn: {}", request.requestContext.globalCommittedSelectedLSN); + return Mono.error(new GoneException(RMResources.GlobalStrongWriteBarrierNotMet)); + } + + return Mono.just(request); + })).map(req -> req.requestContext.globalStrongWriteResponse); + } + } + + boolean isGlobalStrongRequest(RxDocumentServiceRequest request, StoreResponse response) { + if (this.serviceConfigReader.getDefaultConsistencyLevel() == ConsistencyLevel.STRONG) { + int numberOfReadRegions = -1; + String headerValue = null; + if ((headerValue = response.getHeaderValue(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS)) != null) { + numberOfReadRegions = Integer.parseInt(headerValue); + } + + if (numberOfReadRegions > 0 && this.serviceConfigReader.getDefaultConsistencyLevel() == ConsistencyLevel.STRONG) { + return true; + } + } + + return false; + } + + Mono barrierForGlobalStrong(RxDocumentServiceRequest request, StoreResponse response) { + try { + if (ReplicatedResourceClient.isGlobalStrongEnabled() && this.isGlobalStrongRequest(request, response)) { + Utils.ValueHolder lsn = Utils.ValueHolder.initialize(-1l); + Utils.ValueHolder globalCommittedLsn = Utils.ValueHolder.initialize(-1l); + + getLsnAndGlobalCommittedLsn(response, lsn, globalCommittedLsn); + if (lsn.v == -1 || globalCommittedLsn.v == -1) { + logger.error("ConsistencyWriter: lsn {} or GlobalCommittedLsn {} is not set for global strong request", + lsn, globalCommittedLsn); + throw new GoneException(RMResources.Gone); + } + + request.requestContext.globalStrongWriteResponse = response; + request.requestContext.globalCommittedSelectedLSN = lsn.v; + + //if necessary we would have already refreshed cache by now. + request.requestContext.forceRefreshAddressCache = false; + + logger.debug("ConsistencyWriter: globalCommittedLsn {}, lsn {}", globalCommittedLsn, lsn); + //barrier only if necessary, i.e. when write region completes write, but read regions have not. + + if (globalCommittedLsn.v < lsn.v) { + Mono barrierRequestObs = BarrierRequestHelper.createAsync(request, + this.authorizationTokenProvider, + null, + request.requestContext.globalCommittedSelectedLSN); + + return barrierRequestObs.flatMap(barrierRequest -> { + Mono barrierWait = this.waitForWriteBarrierAsync(barrierRequest, request.requestContext.globalCommittedSelectedLSN); + + return barrierWait.flatMap(res -> { + if (!res) { + logger.error("ConsistencyWriter: Write barrier has not been met for global strong request. SelectedGlobalCommittedLsn: {}", + request.requestContext.globalCommittedSelectedLSN); + // RxJava1 doesn't allow throwing checked exception + return Mono.error(new GoneException(RMResources.GlobalStrongWriteBarrierNotMet)); + } + + return Mono.just(request.requestContext.globalStrongWriteResponse); + }); + + }); + + } else { + return Mono.just(request.requestContext.globalStrongWriteResponse); + } + } else { + return Mono.just(response); + } + + } catch (CosmosClientException e) { + // RxJava1 doesn't allow throwing checked exception from Observable operators + return Mono.error(e); + } + } + + private Mono waitForWriteBarrierAsync(RxDocumentServiceRequest barrierRequest, long selectedGlobalCommittedLsn) { + AtomicInteger writeBarrierRetryCount = new AtomicInteger(ConsistencyWriter.MAX_NUMBER_OF_WRITE_BARRIER_READ_RETRIES); + AtomicLong maxGlobalCommittedLsnReceived = new AtomicLong(0); + return Flux.defer(() -> { + if (barrierRequest.requestContext.timeoutHelper.isElapsed()) { + return Flux.error(new RequestTimeoutException()); + } + + Mono> storeResultListObs = this.storeReader.readMultipleReplicaAsync( + barrierRequest, + true /*allowPrimary*/, + 1 /*any replica with correct globalCommittedLsn is good enough*/, + false /*requiresValidLsn*/, + false /*useSessionToken*/, + ReadMode.Strong, + false /*checkMinLsn*/, + false /*forceReadAll*/); + return storeResultListObs.flatMap( + responses -> { + if (responses != null && responses.stream().anyMatch(response -> response.globalCommittedLSN >= selectedGlobalCommittedLsn)) { + return Mono.just(Boolean.TRUE); + } + + //get max global committed lsn from current batch of responses, then update if greater than max of all batches. + long maxGlobalCommittedLsn = (responses != null || !responses.isEmpty()) ? + (Long) responses.stream().map(s -> s.globalCommittedLSN).max(ComparatorUtils.NATURAL_COMPARATOR).get() : + 0L; + maxGlobalCommittedLsnReceived.set(maxGlobalCommittedLsnReceived.get() > maxGlobalCommittedLsn ? + maxGlobalCommittedLsnReceived.get() : maxGlobalCommittedLsn); + + //only refresh on first barrier call, set to false for subsequent attempts. + barrierRequest.requestContext.forceRefreshAddressCache = false; + + //trace on last retry. + if (writeBarrierRetryCount.getAndDecrement() == 0) { + logger.debug("ConsistencyWriter: WaitForWriteBarrierAsync - Last barrier multi-region strong. Responses: {}", + responses.stream().map(StoreResult::toString).collect(Collectors.joining("; "))); + } + + return Mono.empty(); + }).flux(); + }).repeatWhen(s -> { + if (writeBarrierRetryCount.get() == 0) { + return Flux.empty(); + } else { + + if ((ConsistencyWriter.MAX_NUMBER_OF_WRITE_BARRIER_READ_RETRIES - writeBarrierRetryCount.get()) > ConsistencyWriter.MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION) { + return Flux.just(0L).delayElements(Duration.ofMillis(ConsistencyWriter.DELAY_BETWEEN_WRITE_BARRIER_CALLS_IN_MS)); + } else { + return Flux.just(0L).delayElements(Duration.ofMillis(ConsistencyWriter.SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION)); + } + } + }).take(1) + .switchIfEmpty(Mono.defer(() -> { + // after retries exhausted print this log and return false + logger.debug("ConsistencyWriter: Highest global committed lsn received for write barrier call is {}", maxGlobalCommittedLsnReceived); + + return Mono.just(false); + })) + .map(r -> r) + .single(); + } + + static void getLsnAndGlobalCommittedLsn(StoreResponse response, Utils.ValueHolder lsn, Utils.ValueHolder globalCommittedLsn) { + lsn.v = -1L; + globalCommittedLsn.v = -1L; + + String headerValue; + + if ((headerValue = response.getHeaderValue(WFConstants.BackendHeaders.LSN)) != null) { + lsn.v = Long.parseLong(headerValue); + } + + if ((headerValue = response.getHeaderValue(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN)) != null) { + globalCommittedLsn.v = Long.parseLong(headerValue); + } + } + + void startBackgroundAddressRefresh(RxDocumentServiceRequest request) { + this.addressSelector.resolvePrimaryUriAsync(request, true) + .publishOn(Schedulers.elastic()) + .subscribe( + r -> { + }, + e -> logger.warn( + "Background refresh of the primary address failed with {}", e.getMessage(), e) + ); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/CustomHeaders.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/CustomHeaders.java new file mode 100644 index 0000000000000..7f89468297bb2 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/CustomHeaders.java @@ -0,0 +1,32 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +public final class CustomHeaders { + + public static final class HttpHeaders { + // Specify whether to exclude system properties while storing the document + public static final String EXCLUDE_SYSTEM_PROPERTIES = "x-ms-exclude-system-properties"; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ErrorUtils.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ErrorUtils.java new file mode 100644 index 0000000000000..0408e7e7b9dc7 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ErrorUtils.java @@ -0,0 +1,66 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.http.HttpRequest; +import com.azure.data.cosmos.internal.http.HttpResponse; +import io.netty.handler.codec.http.HttpMethod; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.net.URI; + +public class ErrorUtils { + private static final Logger logger = LoggerFactory.getLogger(ErrorUtils.class); + + static Mono getErrorResponseAsync(HttpResponse responseMessage, HttpRequest request) { + Mono responseAsString = ResponseUtils.toString(responseMessage.body()); + if (request.httpMethod() == HttpMethod.DELETE) { + return Mono.just(StringUtils.EMPTY); + } + return responseAsString; + } + + static void logGoneException(URI physicalAddress, String activityId) { + logger.trace("Listener not found. Store Physical Address {} ActivityId {}", + physicalAddress, activityId); + } + + protected static void logGoneException(String physicalAddress, String activityId) { + logger.trace("Listener not found. Store Physical Address {} ActivityId {}", + physicalAddress, activityId); + } + + static void logException(URI physicalAddress, String activityId) { + logger.trace("Store Request Failed. Store Physical Address {} ActivityId {}", + physicalAddress, activityId); + } + + protected static void logException(String physicalAddress, String activityId) { + logger.trace("Store Request Failed. Store Physical Address {} ActivityId {}", + physicalAddress, activityId); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/GatewayAddressCache.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/GatewayAddressCache.java new file mode 100644 index 0000000000000..e07c3d8c7253a --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/GatewayAddressCache.java @@ -0,0 +1,541 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.PartitionKeyRangeGoneException; +import com.azure.data.cosmos.internal.AuthorizationTokenType; +import com.azure.data.cosmos.internal.Constants; +import com.azure.data.cosmos.internal.Exceptions; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.IAuthorizationTokenProvider; +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.Paths; +import com.azure.data.cosmos.internal.PathsHelper; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.RxDocumentServiceResponse; +import com.azure.data.cosmos.internal.UserAgentContainer; +import com.azure.data.cosmos.internal.Utils; +import com.azure.data.cosmos.internal.caches.AsyncCache; +import com.azure.data.cosmos.internal.http.HttpClient; +import com.azure.data.cosmos.internal.http.HttpHeaders; +import com.azure.data.cosmos.internal.http.HttpRequest; +import com.azure.data.cosmos.internal.http.HttpResponse; +import com.azure.data.cosmos.internal.routing.PartitionKeyRangeIdentity; +import io.netty.handler.codec.http.HttpMethod; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.Pair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.net.MalformedURLException; +import java.net.URISyntaxException; +import java.net.URL; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; + +public class GatewayAddressCache implements IAddressCache { + private final static Logger logger = LoggerFactory.getLogger(GatewayAddressCache.class); + private final static String protocolFilterFormat = "%s eq %s"; + private final static int DefaultBatchSize = 50; + + private final static int DefaultSuboptimalPartitionForceRefreshIntervalInSeconds = 600; + private final ServiceConfig serviceConfig = ServiceConfig.getInstance(); + + private final String databaseFeedEntryUrl = PathsHelper.generatePath(ResourceType.Database, "", true); + private final URL serviceEndpoint; + private final URL addressEndpoint; + + private final AsyncCache serverPartitionAddressCache; + private final ConcurrentHashMap suboptimalServerPartitionTimestamps; + private final long suboptimalPartitionForceRefreshIntervalInSeconds; + + private final String protocolScheme; + private final String protocolFilter; + private final IAuthorizationTokenProvider tokenProvider; + private final HashMap defaultRequestHeaders; + private final HttpClient httpClient; + + private volatile Pair masterPartitionAddressCache; + private volatile Instant suboptimalMasterPartitionTimestamp; + + public GatewayAddressCache( + URL serviceEndpoint, + Protocol protocol, + IAuthorizationTokenProvider tokenProvider, + UserAgentContainer userAgent, + HttpClient httpClient, + long suboptimalPartitionForceRefreshIntervalInSeconds) { + try { + this.addressEndpoint = new URL(serviceEndpoint, Paths.ADDRESS_PATH_SEGMENT); + } catch (MalformedURLException e) { + logger.error("serviceEndpoint {} is invalid", serviceEndpoint, e); + assert false; + throw new IllegalStateException(e); + } + this.tokenProvider = tokenProvider; + this.serviceEndpoint = serviceEndpoint; + this.serverPartitionAddressCache = new AsyncCache<>(); + this.suboptimalServerPartitionTimestamps = new ConcurrentHashMap<>(); + this.suboptimalMasterPartitionTimestamp = Instant.MAX; + + this.suboptimalPartitionForceRefreshIntervalInSeconds = suboptimalPartitionForceRefreshIntervalInSeconds; + + this.protocolScheme = protocol.scheme(); + this.protocolFilter = String.format(GatewayAddressCache.protocolFilterFormat, + Constants.Properties.PROTOCOL, + this.protocolScheme); + + this.httpClient = httpClient; + + if (userAgent == null) { + userAgent = new UserAgentContainer(); + } + + defaultRequestHeaders = new HashMap<>(); + defaultRequestHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgent.getUserAgent()); + + // Set requested API version header for version enforcement. + defaultRequestHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); + } + + public GatewayAddressCache( + URL serviceEndpoint, + Protocol protocol, + IAuthorizationTokenProvider tokenProvider, + UserAgentContainer userAgent, + HttpClient httpClient) { + this(serviceEndpoint, + protocol, + tokenProvider, + userAgent, + httpClient, + DefaultSuboptimalPartitionForceRefreshIntervalInSeconds); + } + + private URL getServiceEndpoint() { + return this.serviceEndpoint; + } + + @Override + public Mono tryGetAddresses(RxDocumentServiceRequest request, + PartitionKeyRangeIdentity partitionKeyRangeIdentity, + boolean forceRefreshPartitionAddresses) { + + com.azure.data.cosmos.internal.Utils.checkNotNullOrThrow(request, "request", ""); + com.azure.data.cosmos.internal.Utils.checkNotNullOrThrow(partitionKeyRangeIdentity, "partitionKeyRangeIdentity", ""); + + if (StringUtils.equals(partitionKeyRangeIdentity.getPartitionKeyRangeId(), + PartitionKeyRange.MASTER_PARTITION_KEY_RANGE_ID)) { + + // if that's master partition return master partition address! + return this.resolveMasterAsync(request, forceRefreshPartitionAddresses, request.properties).map(Pair::getRight); + } + + Instant suboptimalServerPartitionTimestamp = this.suboptimalServerPartitionTimestamps.get(partitionKeyRangeIdentity); + + if (suboptimalServerPartitionTimestamp != null) { + boolean forceRefreshDueToSuboptimalPartitionReplicaSet = Duration.between(suboptimalServerPartitionTimestamp, Instant.now()).getSeconds() + > this.suboptimalPartitionForceRefreshIntervalInSeconds; + + if (forceRefreshDueToSuboptimalPartitionReplicaSet) { + // Compares the existing value for the specified key with a specified value, + // and if they are equal, updates the key with a third value. + Instant newValue = this.suboptimalServerPartitionTimestamps.computeIfPresent(partitionKeyRangeIdentity, + (key, oldVal) -> { + if (suboptimalServerPartitionTimestamp.equals(oldVal)) { + return Instant.MAX; + } else { + return oldVal; + } + }); + + if (!newValue.equals(suboptimalServerPartitionTimestamp)) { + // the value was replaced; + forceRefreshPartitionAddresses = true; + } + } + } + + final boolean forceRefreshPartitionAddressesModified = forceRefreshPartitionAddresses; + + if (forceRefreshPartitionAddressesModified) { + this.serverPartitionAddressCache.refresh( + partitionKeyRangeIdentity, + () -> this.getAddressesForRangeId( + request, + partitionKeyRangeIdentity.getCollectionRid(), + partitionKeyRangeIdentity.getPartitionKeyRangeId(), + true)); + + this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity); + } + + Mono addressesObs = this.serverPartitionAddressCache.getAsync( + partitionKeyRangeIdentity, + null, + () -> this.getAddressesForRangeId( + request, + partitionKeyRangeIdentity.getCollectionRid(), + partitionKeyRangeIdentity.getPartitionKeyRangeId(), + false)); + + return addressesObs.map( + addresses -> { + if (notAllReplicasAvailable(addresses)) { + this.suboptimalServerPartitionTimestamps.putIfAbsent(partitionKeyRangeIdentity, Instant.now()); + } + + return addresses; + }).onErrorResume(ex -> { + CosmosClientException dce = com.azure.data.cosmos.internal.Utils.as(ex, CosmosClientException.class); + if (dce == null) { + if (forceRefreshPartitionAddressesModified) { + this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity); + } + return Mono.error(ex); + } else { + if (Exceptions.isStatusCode(dce, HttpConstants.StatusCodes.NOTFOUND) || + Exceptions.isStatusCode(dce, HttpConstants.StatusCodes.GONE) || + Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE)) { + //remove from suboptimal cache in case the collection+pKeyRangeId combo is gone. + this.suboptimalServerPartitionTimestamps.remove(partitionKeyRangeIdentity); + return null; + } + return Mono.error(ex); + } + + }); + } + + public Mono> getServerAddressesViaGatewayAsync( + RxDocumentServiceRequest request, + String collectionRid, + List partitionKeyRangeIds, + boolean forceRefresh) { + String entryUrl = PathsHelper.generatePath(ResourceType.Document, collectionRid, true); + HashMap addressQuery = new HashMap<>(); + + addressQuery.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl)); + + HashMap headers = new HashMap<>(defaultRequestHeaders); + if (forceRefresh) { + headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, Boolean.TRUE.toString()); + } + + addressQuery.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter)); + + addressQuery.put(HttpConstants.QueryStrings.PARTITION_KEY_RANGE_IDS, String.join(",", partitionKeyRangeIds)); + headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); + String token; + + token = this.tokenProvider.getUserAuthorizationToken( + collectionRid, + ResourceType.Document, + HttpConstants.HttpMethods.GET, + headers, + AuthorizationTokenType.PrimaryMasterKey, + request.properties); + + if (token == null && request.getIsNameBased()) { + // User doesn't have rid based resource token. Maybe user has name based. + String collectionAltLink = PathsHelper.getCollectionPath(request.getResourceAddress()); + token = this.tokenProvider.getUserAuthorizationToken( + collectionAltLink, + ResourceType.Document, + HttpConstants.HttpMethods.GET, + headers, + AuthorizationTokenType.PrimaryMasterKey, + request.properties); + } + + token = HttpUtils.urlEncode(token); + headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, token); + URL targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(addressQuery)); + String identifier = logAddressResolutionStart(request, targetEndpoint); + + HttpHeaders httpHeaders = new HttpHeaders(headers.size()); + for (Map.Entry entry : headers.entrySet()) { + httpHeaders.set(entry.getKey(), entry.getValue()); + } + + HttpRequest httpRequest; + try { + httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint.toURI(), targetEndpoint.getPort(), httpHeaders); + } catch (URISyntaxException e) { + throw new IllegalArgumentException(targetEndpoint.toString(), e); + } + Mono httpResponseMono = this.httpClient.send(httpRequest); + + Mono dsrObs = HttpClientUtils.parseResponseAsync(httpResponseMono, httpRequest); + return dsrObs.map( + dsr -> { + logAddressResolutionEnd(request, identifier); + return dsr.getQueryResponse(Address.class); + }); + } + + public void dispose() { + // TODO We will implement this in future once we will move to httpClient to CompositeHttpClient + //https://msdata.visualstudio.com/CosmosDB/_workitems/edit/340842 + } + + private Mono> resolveMasterAsync(RxDocumentServiceRequest request, boolean forceRefresh, Map properties) { + Pair masterAddressAndRangeInitial = this.masterPartitionAddressCache; + + forceRefresh = forceRefresh || + (masterAddressAndRangeInitial != null && + notAllReplicasAvailable(masterAddressAndRangeInitial.getRight()) && + Duration.between(this.suboptimalMasterPartitionTimestamp, Instant.now()).getSeconds() > this.suboptimalPartitionForceRefreshIntervalInSeconds); + + if (forceRefresh || this.masterPartitionAddressCache == null) { + Mono> masterReplicaAddressesObs = this.getMasterAddressesViaGatewayAsync( + request, + ResourceType.Database, + null, + databaseFeedEntryUrl, + forceRefresh, + false, + properties); + + return masterReplicaAddressesObs.map( + masterAddresses -> { + Pair masterAddressAndRangeRes = + this.toPartitionAddressAndRange("", masterAddresses); + this.masterPartitionAddressCache = masterAddressAndRangeRes; + + if (notAllReplicasAvailable(masterAddressAndRangeRes.getRight()) + && this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) { + this.suboptimalMasterPartitionTimestamp = Instant.now(); + } else { + this.suboptimalMasterPartitionTimestamp = Instant.MAX; + } + + return masterPartitionAddressCache; + }) + .doOnError( + e -> { + this.suboptimalMasterPartitionTimestamp = Instant.MAX; + }); + } else { + if (notAllReplicasAvailable(masterAddressAndRangeInitial.getRight()) + && this.suboptimalMasterPartitionTimestamp.equals(Instant.MAX)) { + this.suboptimalMasterPartitionTimestamp = Instant.now(); + } + + return Mono.just(masterAddressAndRangeInitial); + } + } + + private Mono getAddressesForRangeId( + RxDocumentServiceRequest request, + String collectionRid, + String partitionKeyRangeId, + boolean forceRefresh) { + Mono> addressResponse = this.getServerAddressesViaGatewayAsync(request, collectionRid, Collections.singletonList(partitionKeyRangeId), forceRefresh); + + Mono>> addressInfos = + addressResponse.map( + addresses -> + addresses.stream().filter(addressInfo -> + this.protocolScheme.equals(addressInfo.getProtocolScheme())) + .collect(Collectors.groupingBy( + Address::getParitionKeyRangeId)) + .values().stream() + .map(groupedAddresses -> toPartitionAddressAndRange(collectionRid, addresses)) + .collect(Collectors.toList())); + + Mono>> result = addressInfos.map(addressInfo -> addressInfo.stream() + .filter(a -> + StringUtils.equals(a.getLeft().getPartitionKeyRangeId(), partitionKeyRangeId)) + .collect(Collectors.toList())); + + return result.flatMap( + list -> { + if (list.isEmpty()) { + + String errorMessage = String.format( + RMResources.PartitionKeyRangeNotFound, + partitionKeyRangeId, + collectionRid); + + PartitionKeyRangeGoneException e = new PartitionKeyRangeGoneException(errorMessage); + BridgeInternal.setResourceAddress(e, collectionRid); + + return Mono.error(e); + } else { + return Mono.just(list.get(0).getRight()); + } + }); + } + + public Mono> getMasterAddressesViaGatewayAsync( + RxDocumentServiceRequest request, + ResourceType resourceType, + String resourceAddress, + String entryUrl, + boolean forceRefresh, + boolean useMasterCollectionResolver, + Map properties) { + HashMap queryParameters = new HashMap<>(); + queryParameters.put(HttpConstants.QueryStrings.URL, HttpUtils.urlEncode(entryUrl)); + HashMap headers = new HashMap<>(defaultRequestHeaders); + + if (forceRefresh) { + headers.put(HttpConstants.HttpHeaders.FORCE_REFRESH, Boolean.TRUE.toString()); + } + + if (useMasterCollectionResolver) { + headers.put(HttpConstants.HttpHeaders.USE_MASTER_COLLECTION_RESOLVER, Boolean.TRUE.toString()); + } + + queryParameters.put(HttpConstants.QueryStrings.FILTER, HttpUtils.urlEncode(this.protocolFilter)); + headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); + String token = this.tokenProvider.getUserAuthorizationToken( + resourceAddress, + resourceType, + HttpConstants.HttpMethods.GET, + headers, + AuthorizationTokenType.PrimaryMasterKey, + properties); + + headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, HttpUtils.urlEncode(token)); + URL targetEndpoint = Utils.setQuery(this.addressEndpoint.toString(), Utils.createQuery(queryParameters)); + String identifier = logAddressResolutionStart(request, targetEndpoint); + + HttpHeaders defaultHttpHeaders = new HttpHeaders(headers.size()); + for (Map.Entry entry : headers.entrySet()) { + defaultHttpHeaders.set(entry.getKey(), entry.getValue()); + } + + HttpRequest httpRequest; + try { + httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint.toURI(), targetEndpoint.getPort(), defaultHttpHeaders); + } catch (URISyntaxException e) { + throw new IllegalArgumentException(targetEndpoint.toString(), e); + } + + Mono httpResponseMono = this.httpClient.send(httpRequest); + Mono dsrObs = HttpClientUtils.parseResponseAsync(httpResponseMono, httpRequest); + + return dsrObs.map( + dsr -> { + logAddressResolutionEnd(request, identifier); + return dsr.getQueryResponse(Address.class); + }); + } + + private Pair toPartitionAddressAndRange(String collectionRid, List

addresses) { + Address address = addresses.get(0); + + AddressInformation[] addressInfos = + addresses.stream().map(addr -> + GatewayAddressCache.toAddressInformation(addr) + ).collect(Collectors.toList()).toArray(new AddressInformation[addresses.size()]); + return Pair.of(new PartitionKeyRangeIdentity(collectionRid, address.getParitionKeyRangeId()), addressInfos); + } + + private static AddressInformation toAddressInformation(Address address) { + return new AddressInformation(true, address.IsPrimary(), address.getPhyicalUri(), address.getProtocolScheme()); + } + + public Mono openAsync( + DocumentCollection collection, + List partitionKeyRangeIdentities) { + List>> tasks = new ArrayList<>(); + int batchSize = GatewayAddressCache.DefaultBatchSize; + + RxDocumentServiceRequest request = RxDocumentServiceRequest.create( + OperationType.Read, + // collection.AltLink, + collection.resourceId(), + ResourceType.DocumentCollection, + // AuthorizationTokenType.PrimaryMasterKey + Collections.emptyMap()); + for (int i = 0; i < partitionKeyRangeIdentities.size(); i += batchSize) { + + int endIndex = i + batchSize; + endIndex = endIndex < partitionKeyRangeIdentities.size() + ? endIndex : partitionKeyRangeIdentities.size(); + + tasks.add(this.getServerAddressesViaGatewayAsync( + request, + collection.resourceId(), + + partitionKeyRangeIdentities.subList(i, endIndex). + stream().map(PartitionKeyRangeIdentity::getPartitionKeyRangeId).collect(Collectors.toList()), + false).flux()); + } + + return Flux.concat(tasks) + .doOnNext(list -> { + List> addressInfos = list.stream() + .filter(addressInfo -> this.protocolScheme.equals(addressInfo.getProtocolScheme())) + .collect(Collectors.groupingBy(Address::getParitionKeyRangeId)) + .values().stream().map(addresses -> toPartitionAddressAndRange(collection.resourceId(), addresses)) + .collect(Collectors.toList()); + + for (Pair addressInfo : addressInfos) { + this.serverPartitionAddressCache.set( + new PartitionKeyRangeIdentity(collection.resourceId(), addressInfo.getLeft().getPartitionKeyRangeId()), + addressInfo.getRight()); + } + }).then(); + } + + private boolean notAllReplicasAvailable(AddressInformation[] addressInformations) { + return addressInformations.length < ServiceConfig.SystemReplicationPolicy.MaxReplicaSetSize; + } + + private static String logAddressResolutionStart(RxDocumentServiceRequest request, URL targetEndpointUrl) { + try { + if (request.requestContext.cosmosResponseDiagnostics != null) { + return BridgeInternal.recordAddressResolutionStart(request.requestContext.cosmosResponseDiagnostics, targetEndpointUrl.toURI()); + } + } catch (URISyntaxException e) { + throw new IllegalArgumentException(e); + } + return null; + } + + private static void logAddressResolutionEnd(RxDocumentServiceRequest request, String identifier) { + if (request.requestContext.cosmosResponseDiagnostics != null) { + BridgeInternal.recordAddressResolutionEnd(request.requestContext.cosmosResponseDiagnostics, identifier); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/GatewayServiceConfigurationReader.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/GatewayServiceConfigurationReader.java new file mode 100644 index 0000000000000..e02e7ef88b20b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/GatewayServiceConfigurationReader.java @@ -0,0 +1,182 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.internal.BaseAuthorizationTokenProvider; +import com.azure.data.cosmos.internal.Constants; +import com.azure.data.cosmos.internal.DatabaseAccount; +import com.azure.data.cosmos.internal.GlobalEndpointManager; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.ReplicationPolicy; +import com.azure.data.cosmos.internal.UserAgentContainer; +import com.azure.data.cosmos.internal.Utils; +import com.azure.data.cosmos.internal.http.HttpClient; +import com.azure.data.cosmos.internal.http.HttpHeaders; +import com.azure.data.cosmos.internal.http.HttpRequest; +import com.azure.data.cosmos.internal.http.HttpResponse; +import io.netty.handler.codec.http.HttpMethod; +import reactor.core.publisher.Mono; + +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; + +/** + * This class will read the service configuration from the gateway. + * + * As .Net does code sharing between the SDK and GW there are two implementation to IServiceConfigurationReader + * GatewayServiceConfigurationReader which is for SDK + * DatabaseAccountConfigurationReader which is for GW + * Some of the APIs are not relevant in SDK and due to that in .Net the SDK implementation one throws not-implemented. + * + * In java, as we don't do code sharing + * and we got rid of the interface which is not needed and only implemented the methods in GatewayServiceConfigurationReader + */ +public class GatewayServiceConfigurationReader { + + public static final String GATEWAY_READER_NOT_INITIALIZED = "GatewayServiceConfigurationReader has not been initialized"; + + public ReplicationPolicy userReplicationPolicy; + private ReplicationPolicy systemReplicationPolicy; + private ConsistencyLevel consistencyLevel; + private volatile boolean initialized; + private URI serviceEndpoint; + private final ConnectionPolicy connectionPolicy; + private Map queryEngineConfiguration; + private final BaseAuthorizationTokenProvider baseAuthorizationTokenProvider; + private final boolean hasAuthKeyResourceToken; + private final String authKeyResourceToken; + private HttpClient httpClient; + + public GatewayServiceConfigurationReader(URI serviceEndpoint, boolean hasResourceToken, String resourceToken, + ConnectionPolicy connectionPolicy, BaseAuthorizationTokenProvider baseAuthorizationTokenProvider, + HttpClient httpClient) { + this.serviceEndpoint = serviceEndpoint; + this.baseAuthorizationTokenProvider = baseAuthorizationTokenProvider; + this.hasAuthKeyResourceToken = hasResourceToken; + this.authKeyResourceToken = resourceToken; + this.connectionPolicy = connectionPolicy; + this.httpClient = httpClient; + } + + public ReplicationPolicy getUserReplicationPolicy() { + this.throwIfNotInitialized(); + return this.userReplicationPolicy; + } + + public ReplicationPolicy getSystemReplicationPolicy() { + this.throwIfNotInitialized(); + return this.systemReplicationPolicy; + } + + public boolean enableAuthorization() { + return true; + } + + public ConsistencyLevel getDefaultConsistencyLevel() { + this.throwIfNotInitialized(); + return this.consistencyLevel; + } + + public void setDefaultConsistencyLevel(ConsistencyLevel value) { + this.throwIfNotInitialized(); + this.consistencyLevel = value; + } + + public Map getQueryEngineConfiguration() { + this.throwIfNotInitialized(); + return this.queryEngineConfiguration; + } + + private Mono getDatabaseAccountAsync(URI serviceEndpoint) { + + HttpHeaders httpHeaders = new HttpHeaders(); + httpHeaders.set(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); + + UserAgentContainer userAgentContainer = new UserAgentContainer(); + String userAgentSuffix = this.connectionPolicy.userAgentSuffix(); + if (userAgentSuffix != null && userAgentSuffix.length() > 0) { + userAgentContainer.setSuffix(userAgentSuffix); + } + + httpHeaders.set(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); + httpHeaders.set(HttpConstants.HttpHeaders.API_TYPE, Constants.Properties.SQL_API_TYPE); + String authorizationToken; + if (this.hasAuthKeyResourceToken || baseAuthorizationTokenProvider == null) { + authorizationToken = HttpUtils.urlEncode(this.authKeyResourceToken); + } else { + // Retrieve the document service properties. + String xDate = Utils.nowAsRFC1123(); + httpHeaders.set(HttpConstants.HttpHeaders.X_DATE, xDate); + Map header = new HashMap<>(); + header.put(HttpConstants.HttpHeaders.X_DATE, xDate); + authorizationToken = baseAuthorizationTokenProvider + .generateKeyAuthorizationSignature(HttpConstants.HttpMethods.GET, serviceEndpoint, header); + } + httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorizationToken); + + HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, serviceEndpoint, serviceEndpoint.getPort(), httpHeaders); + Mono httpResponse = httpClient.send(httpRequest); + return toDatabaseAccountObservable(httpResponse, httpRequest); + } + + public Mono initializeReaderAsync() { + try { + return GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.serviceEndpoint.toURL(), + + new ArrayList<>(this.connectionPolicy.preferredLocations()), url -> { + try { + return getDatabaseAccountAsync(url.toURI()); + } catch (URISyntaxException e) { + throw new IllegalArgumentException("URI " + url); + } + }).doOnSuccess(databaseAccount -> { + userReplicationPolicy = databaseAccount.getReplicationPolicy(); + systemReplicationPolicy = databaseAccount.getSystemReplicationPolicy(); + queryEngineConfiguration = databaseAccount.getQueryEngineConfiuration(); + consistencyLevel = databaseAccount.getConsistencyPolicy().defaultConsistencyLevel(); + initialized = true; + }); + } catch (MalformedURLException e) { + throw new IllegalArgumentException(this.serviceEndpoint.toString(), e); + } + } + + private Mono toDatabaseAccountObservable(Mono httpResponse, HttpRequest httpRequest) { + + return HttpClientUtils.parseResponseAsync(httpResponse, httpRequest) + .map(rxDocumentServiceResponse -> rxDocumentServiceResponse.getResource(DatabaseAccount.class)); + } + + private void throwIfNotInitialized() { + if (!this.initialized) { + throw new IllegalArgumentException(GATEWAY_READER_NOT_INITIALIZED); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/GlobalAddressResolver.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/GlobalAddressResolver.java new file mode 100644 index 0000000000000..0e5b8f918317e --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/GlobalAddressResolver.java @@ -0,0 +1,166 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + + +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.internal.GlobalEndpointManager; +import com.azure.data.cosmos.internal.IAuthorizationTokenProvider; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.UserAgentContainer; +import com.azure.data.cosmos.internal.caches.RxCollectionCache; +import com.azure.data.cosmos.internal.caches.RxPartitionKeyRangeCache; +import com.azure.data.cosmos.internal.http.HttpClient; +import com.azure.data.cosmos.internal.routing.CollectionRoutingMap; +import com.azure.data.cosmos.internal.routing.PartitionKeyRangeIdentity; +import reactor.core.publisher.Mono; + +import java.net.URL; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; + +; + +public class GlobalAddressResolver implements IAddressResolver { + private final static int MaxBackupReadRegions = 3; + private final GlobalEndpointManager endpointManager; + private final Protocol protocol; + private final IAuthorizationTokenProvider tokenProvider; + private final UserAgentContainer userAgentContainer; + private final RxCollectionCache collectionCache; + private final RxPartitionKeyRangeCache routingMapProvider; + private final int maxEndpoints; + private final GatewayServiceConfigurationReader serviceConfigReader; + final Map addressCacheByEndpoint; + + private GatewayAddressCache gatewayAddressCache; + private AddressResolver addressResolver; + private HttpClient httpClient; + + public GlobalAddressResolver( + HttpClient httpClient, + GlobalEndpointManager endpointManager, + Protocol protocol, + IAuthorizationTokenProvider tokenProvider, + RxCollectionCache collectionCache, + RxPartitionKeyRangeCache routingMapProvider, + UserAgentContainer userAgentContainer, + GatewayServiceConfigurationReader serviceConfigReader, + ConnectionPolicy connectionPolicy) { + + this.httpClient = httpClient; + this.endpointManager = endpointManager; + this.protocol = protocol; + this.tokenProvider = tokenProvider; + this.userAgentContainer = userAgentContainer; + this.collectionCache = collectionCache; + this.routingMapProvider = routingMapProvider; + this.serviceConfigReader = serviceConfigReader; + + int maxBackupReadEndpoints = (connectionPolicy.enableReadRequestsFallback() == null || connectionPolicy.enableReadRequestsFallback()) ? GlobalAddressResolver.MaxBackupReadRegions : 0; + this.maxEndpoints = maxBackupReadEndpoints + 2; // for write and alternate write endpoint (during failover) + this.addressCacheByEndpoint = new ConcurrentHashMap<>(); + + for (URL endpoint : endpointManager.getWriteEndpoints()) { + this.getOrAddEndpoint(endpoint); + } + for (URL endpoint : endpointManager.getReadEndpoints()) { + this.getOrAddEndpoint(endpoint); + } + } + + Mono openAsync(DocumentCollection collection) { + Mono routingMap = this.routingMapProvider.tryLookupAsync(collection.id(), null, null); + return routingMap.flatMap(collectionRoutingMap -> { + + List ranges = ((List)collectionRoutingMap.getOrderedPartitionKeyRanges()).stream().map(range -> + new PartitionKeyRangeIdentity(collection.resourceId(), range.id())).collect(Collectors.toList()); + List> tasks = new ArrayList<>(); + for (EndpointCache endpointCache : this.addressCacheByEndpoint.values()) { + tasks.add(endpointCache.addressCache.openAsync(collection, ranges)); + } + // TODO: Not sure if this will work. + return Mono.whenDelayError(tasks); + }).switchIfEmpty(Mono.defer(Mono::empty)); + } + + @Override + public Mono resolveAsync(RxDocumentServiceRequest request, boolean forceRefresh) { + IAddressResolver resolver = this.getAddressResolver(request); + return resolver.resolveAsync(request, forceRefresh); + } + + public void dispose() { + for (EndpointCache endpointCache : this.addressCacheByEndpoint.values()) { + endpointCache.addressCache.dispose(); + } + } + + private IAddressResolver getAddressResolver(RxDocumentServiceRequest rxDocumentServiceRequest) { + URL endpoint = this.endpointManager.resolveServiceEndpoint(rxDocumentServiceRequest); + return this.getOrAddEndpoint(endpoint).addressResolver; + } + + private EndpointCache getOrAddEndpoint(URL endpoint) { + EndpointCache endpointCache = this.addressCacheByEndpoint.computeIfAbsent(endpoint , key -> { + GatewayAddressCache gatewayAddressCache = new GatewayAddressCache(endpoint, protocol, this.tokenProvider, this.userAgentContainer, this.httpClient); + AddressResolver addressResolver = new AddressResolver(); + addressResolver.initializeCaches(this.collectionCache, this.routingMapProvider, gatewayAddressCache); + EndpointCache cache = new EndpointCache(); + cache.addressCache = gatewayAddressCache; + cache.addressResolver = addressResolver; + return cache; + }); + + if (this.addressCacheByEndpoint.size() > this.maxEndpoints) { + List allEndpoints = new ArrayList(this.endpointManager.getWriteEndpoints()); + allEndpoints.addAll(this.endpointManager.getReadEndpoints()); + Collections.reverse(allEndpoints); + LinkedList endpoints = new LinkedList<>(allEndpoints); + while (this.addressCacheByEndpoint.size() > this.maxEndpoints) { + if (endpoints.size() > 0) { + URL dequeueEnpoint = endpoints.pop(); + if (this.addressCacheByEndpoint.get(dequeueEnpoint) != null) { + this.addressCacheByEndpoint.remove(dequeueEnpoint); + } + } else { + break; + } + } + } + return endpointCache; + } + + static class EndpointCache { + GatewayAddressCache addressCache; + AddressResolver addressResolver; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/GoneAndRetryWithRetryPolicy.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/GoneAndRetryWithRetryPolicy.java new file mode 100644 index 0000000000000..5cfeb3ae1cea4 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/GoneAndRetryWithRetryPolicy.java @@ -0,0 +1,215 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.GoneException; +import com.azure.data.cosmos.InvalidPartitionException; +import com.azure.data.cosmos.PartitionIsMigratingException; +import com.azure.data.cosmos.PartitionKeyRangeGoneException; +import com.azure.data.cosmos.PartitionKeyRangeIsSplittingException; +import com.azure.data.cosmos.RetryWithException; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.IRetryPolicy; +import com.azure.data.cosmos.internal.Quadruple; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import org.apache.commons.lang3.time.StopWatch; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.time.Duration; + +public class GoneAndRetryWithRetryPolicy implements IRetryPolicy { + + private final static Logger logger = LoggerFactory.getLogger(GoneAndRetryWithRetryPolicy.class); + private final static int DEFAULT_WAIT_TIME_IN_SECONDS = 30; + private final static int MAXIMUM_BACKOFF_TIME_IN_SECONDS = 15; + private final static int INITIAL_BACKOFF_TIME = 1; + private final static int BACK_OFF_MULTIPLIER = 2; + + private final RxDocumentServiceRequest request; + private volatile int attemptCount = 1; + private volatile int attemptCountInvalidPartition = 1; + private volatile int currentBackoffSeconds = GoneAndRetryWithRetryPolicy.INITIAL_BACKOFF_TIME; + private volatile RetryWithException lastRetryWithException; + private final StopWatch durationTimer = new StopWatch(); + private final int waitTimeInSeconds; + //TODO once this is moved to IRetryPolicy, remove from here. + public static Quadruple INITIAL_ARGUMENT_VALUE_POLICY_ARG = Quadruple.with(false, false, + Duration.ofSeconds(60), 0); + + public GoneAndRetryWithRetryPolicy(RxDocumentServiceRequest request, Integer waitTimeInSeconds) { + this.request = request; + startStopWatch(this.durationTimer); + if (waitTimeInSeconds != null) { + this.waitTimeInSeconds = waitTimeInSeconds; + } else { + this.waitTimeInSeconds = DEFAULT_WAIT_TIME_IN_SECONDS; + } + } + + @Override + public Mono shouldRetry(Exception exception) { + CosmosClientException exceptionToThrow = null; + Duration backoffTime = Duration.ofSeconds(0); + Duration timeout = Duration.ofSeconds(0); + boolean forceRefreshAddressCache = false; + if (!(exception instanceof GoneException) && + !(exception instanceof RetryWithException) && + !(exception instanceof PartitionIsMigratingException) && + !(exception instanceof InvalidPartitionException && + (this.request.getPartitionKeyRangeIdentity() == null || + this.request.getPartitionKeyRangeIdentity().getCollectionRid() == null)) && + !(exception instanceof PartitionKeyRangeIsSplittingException)) { + logger.debug("Operation will NOT be retried. Current attempt {}, Exception: {} ", this.attemptCount, + exception); + stopStopWatch(this.durationTimer); + return Mono.just(ShouldRetryResult.noRetry()); + } else if (exception instanceof RetryWithException) { + this.lastRetryWithException = (RetryWithException) exception; + } + long remainingSeconds = this.waitTimeInSeconds - this.durationTimer.getTime() / 1000; + int currentRetryAttemptCount = this.attemptCount; + if (this.attemptCount++ > 1) { + if (remainingSeconds <= 0) { + if (exception instanceof GoneException) { + if (this.lastRetryWithException != null) { + logger.warn( + "Received gone exception after backoff/retry including at least one RetryWithException. " + + "Will fail the request with RetryWithException. GoneException: {}. RetryWithException: {}", + exception, this.lastRetryWithException); + exceptionToThrow = this.lastRetryWithException; + } else { + logger.warn("Received gone exception after backoff/retry. Will fail the request. {}", + exception.toString()); + exceptionToThrow = BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.SERVICE_UNAVAILABLE, + exception); + } + + } else if (exception instanceof PartitionKeyRangeGoneException) { + if (this.lastRetryWithException != null) { + logger.warn( + "Received partition key range gone exception after backoff/retry including at least one RetryWithException." + + "Will fail the request with RetryWithException. GoneException: {}. RetryWithException: {}", + exception, this.lastRetryWithException); + exceptionToThrow = this.lastRetryWithException; + } else { + logger.warn( + "Received partition key range gone exception after backoff/retry. Will fail the request. {}", + exception.toString()); + exceptionToThrow = BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.SERVICE_UNAVAILABLE, + exception); + } + } else if (exception instanceof InvalidPartitionException) { + if (this.lastRetryWithException != null) { + logger.warn( + "Received InvalidPartitionException after backoff/retry including at least one RetryWithException. " + + "Will fail the request with RetryWithException. InvalidPartitionException: {}. RetryWithException: {}", + exception, this.lastRetryWithException); + } else { + logger.warn( + "Received invalid collection partition exception after backoff/retry. Will fail the request. {}", + exception.toString()); + exceptionToThrow = BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.SERVICE_UNAVAILABLE, + exception); + } + } else { + logger.warn("Received retrywith exception after backoff/retry. Will fail the request. {}", + exception.toString()); + } + stopStopWatch(this.durationTimer); + return Mono.just(ShouldRetryResult.error(exceptionToThrow)); + } + backoffTime = Duration.ofSeconds(Math.min(Math.min(this.currentBackoffSeconds, remainingSeconds), + GoneAndRetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS)); + this.currentBackoffSeconds *= GoneAndRetryWithRetryPolicy.BACK_OFF_MULTIPLIER; + logger.info("BackoffTime: {} seconds.", backoffTime.getSeconds()); + } + + // Calculate the remaining time based after accounting for the backoff that we + // will perform + long timeoutInMillSec = remainingSeconds*1000 - backoffTime.toMillis(); + timeout = timeoutInMillSec > 0 ? Duration.ofMillis(timeoutInMillSec) + : Duration.ofSeconds(GoneAndRetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS); + if (exception instanceof GoneException) { + logger.warn("Received gone exception, will retry, {}", exception.toString()); + forceRefreshAddressCache = true; // indicate we are in retry. + } else if (exception instanceof PartitionIsMigratingException) { + logger.warn("Received PartitionIsMigratingException, will retry, {}", exception.toString()); + this.request.forceCollectionRoutingMapRefresh = true; + forceRefreshAddressCache = true; + } else if (exception instanceof InvalidPartitionException) { + this.request.requestContext.quorumSelectedLSN = -1; + this.request.requestContext.resolvedPartitionKeyRange = null; + this.request.requestContext.quorumSelectedStoreResponse = null; + this.request.requestContext.globalCommittedSelectedLSN = -1; + if (this.attemptCountInvalidPartition++ > 2) { + // for second InvalidPartitionException, stop retrying. + logger.warn("Received second InvalidPartitionException after backoff/retry. Will fail the request. {}", + exception.toString()); + return Mono.just(ShouldRetryResult + .error(BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.SERVICE_UNAVAILABLE, exception))); + } + + if (this.request != null) { + logger.warn("Received invalid collection exception, will retry, {}", exception.toString()); + this.request.forceNameCacheRefresh = true; + } else { + logger.error("Received unexpected invalid collection exception, request should be non-null.", + exception); + return Mono.just(ShouldRetryResult + .error(BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.INTERNAL_SERVER_ERROR, exception))); + } + forceRefreshAddressCache = false; + } else if (exception instanceof PartitionKeyRangeIsSplittingException) { + this.request.requestContext.resolvedPartitionKeyRange = null; + this.request.requestContext.quorumSelectedLSN = -1; + this.request.requestContext.quorumSelectedStoreResponse = null; + logger.info("Received partition key range splitting exception, will retry, {}", exception.toString()); + this.request.forcePartitionKeyRangeRefresh = true; + forceRefreshAddressCache = false; + } else { + logger.warn("Received retrywith exception, will retry, {}", exception); + // For RetryWithException, prevent the caller + // from refreshing any caches. + forceRefreshAddressCache = false; + } + return Mono.just(ShouldRetryResult.retryAfter(backoffTime, + Quadruple.with(forceRefreshAddressCache, true, timeout, currentRetryAttemptCount))); + } + + private void stopStopWatch(StopWatch stopwatch) { + synchronized (stopwatch) { + stopwatch.stop(); + } + } + + private void startStopWatch(StopWatch stopwatch) { + synchronized (stopwatch) { + stopwatch.start(); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/HttpClientUtils.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/HttpClientUtils.java new file mode 100644 index 0000000000000..87e53b3647c40 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/HttpClientUtils.java @@ -0,0 +1,64 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosError; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RxDocumentServiceResponse; +import com.azure.data.cosmos.internal.http.HttpRequest; +import com.azure.data.cosmos.internal.http.HttpResponse; +import reactor.core.publisher.Mono; + +public class HttpClientUtils { + + static Mono parseResponseAsync(Mono httpResponse, HttpRequest httpRequest) { + return httpResponse.flatMap(response -> { + if (response.statusCode() < HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { + + return ResponseUtils.toStoreResponse(response, httpRequest).map(RxDocumentServiceResponse::new); + + // TODO: to break the dependency between RxDocumentServiceResponse and StoreResponse + // we should factor out the RxDocumentServiceResponse(StoreResponse) constructor to a helper class + + } else { + return HttpClientUtils + .createDocumentClientException(response).flatMap(Mono::error); + } + }); + } + + private static Mono createDocumentClientException(HttpResponse httpResponse) { + Mono readStream = ResponseUtils.toString(httpResponse.body()); + + return readStream.map(body -> { + CosmosError cosmosError = BridgeInternal.createCosmosError(body); + + // TODO: we should set resource address in the Document Client Exception + return BridgeInternal.createCosmosClientException(httpResponse.statusCode(), cosmosError, + httpResponse.headers().toMap()); + }); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/HttpTransportClient.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/HttpTransportClient.java new file mode 100644 index 0000000000000..8e1cbdad9cdc5 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/HttpTransportClient.java @@ -0,0 +1,995 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.BadRequestException; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ConflictException; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.ForbiddenException; +import com.azure.data.cosmos.GoneException; +import com.azure.data.cosmos.InternalServerErrorException; +import com.azure.data.cosmos.InvalidPartitionException; +import com.azure.data.cosmos.LockedException; +import com.azure.data.cosmos.MethodNotAllowedException; +import com.azure.data.cosmos.NotFoundException; +import com.azure.data.cosmos.PartitionIsMigratingException; +import com.azure.data.cosmos.PartitionKeyRangeGoneException; +import com.azure.data.cosmos.PartitionKeyRangeIsSplittingException; +import com.azure.data.cosmos.PreconditionFailedException; +import com.azure.data.cosmos.RequestEntityTooLargeException; +import com.azure.data.cosmos.RequestRateTooLargeException; +import com.azure.data.cosmos.RequestTimeoutException; +import com.azure.data.cosmos.RetryWithException; +import com.azure.data.cosmos.ServiceUnavailableException; +import com.azure.data.cosmos.UnauthorizedException; +import com.azure.data.cosmos.internal.Configs; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.Integers; +import com.azure.data.cosmos.internal.Lists; +import com.azure.data.cosmos.internal.Longs; +import com.azure.data.cosmos.internal.MutableVolatile; +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.internal.PathsHelper; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RuntimeConstants; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.Strings; +import com.azure.data.cosmos.internal.UserAgentContainer; +import com.azure.data.cosmos.internal.Utils; +import com.azure.data.cosmos.internal.http.HttpClient; +import com.azure.data.cosmos.internal.http.HttpClientConfig; +import com.azure.data.cosmos.internal.http.HttpHeaders; +import com.azure.data.cosmos.internal.http.HttpRequest; +import com.azure.data.cosmos.internal.http.HttpResponse; +import io.netty.handler.codec.http.HttpMethod; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.net.URI; +import java.time.Instant; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import static com.azure.data.cosmos.internal.Utils.trimBeginningAndEndingSlashes; +/* + * The following code only support Document Write without any error handling support. + */ +public class HttpTransportClient extends TransportClient { + private final Logger logger = LoggerFactory.getLogger(HttpTransportClient.class); + private final HttpClient httpClient; + private final Map defaultHeaders; + private final Configs configs; + + HttpClient createHttpClient(int requestTimeout) { + // TODO: use one instance of SSL context everywhere + HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs); + httpClientConfig.withRequestTimeoutInMillis(requestTimeout * 1000); + httpClientConfig.withPoolSize(configs.getDirectHttpsMaxConnectionLimit()); + + return HttpClient.createFixed(httpClientConfig); + } + + public HttpTransportClient(Configs configs, int requestTimeout, UserAgentContainer userAgent) { + this.configs = configs; + this.httpClient = createHttpClient(requestTimeout); + + this.defaultHeaders = new HashMap<>(); + + // Set requested API version header for version enforcement. + this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); + this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, HttpConstants.HeaderValues.NoCache); + + if (userAgent == null) { + userAgent = new UserAgentContainer(); + } + + this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgent.getUserAgent()); + this.defaultHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); + } + + @Override + public void close() { + httpClient.shutdown(); + } + + public Mono invokeStoreAsync( + URI physicalAddress, + RxDocumentServiceRequest request) { + + try { + ResourceOperation resourceOperation = new ResourceOperation(request.getOperationType(), request.getResourceType()); + // uuid correlation manager + UUID activityId = UUID.fromString(request.getActivityId()); + + if (resourceOperation.operationType == OperationType.Recreate) { + Map errorResponseHeaders = new HashMap<>(); + errorResponseHeaders.put(HttpConstants.HttpHeaders.REQUEST_VALIDATION_FAILURE, "1"); + + logger.error("Received Recreate request on Http client"); + throw new InternalServerErrorException(RMResources.InternalServerError, null, errorResponseHeaders, null); + } + + HttpRequest httpRequest = prepareHttpMessage(activityId, physicalAddress, resourceOperation, request); + + MutableVolatile sendTimeUtc = new MutableVolatile<>(); + + Mono httpResponseMono = this.httpClient + .send(httpRequest) + .doOnSubscribe(subscription -> { + sendTimeUtc.v = Instant.now(); + this.beforeRequest( + activityId, + httpRequest.uri(), + request.getResourceType(), + httpRequest.headers()); + }) + .onErrorResume(t -> { + Exception exception = Utils.as(t, Exception.class); + if (exception == null) { + logger.error("critical failure", t); + t.printStackTrace(); + assert false : "critical failure"; + return Mono.error(t); + } + + //Trace.CorrelationManager.ActivityId = activityId; + if (WebExceptionUtility.isWebExceptionRetriable(exception)) { + logger.debug("Received retriable exception {} " + + "sending the request to {}, will re-resolve the address " + + "send time UTC: {}", + exception, + physicalAddress, + sendTimeUtc); + + GoneException goneException = new GoneException( + String.format( + RMResources.ExceptionMessage, + RMResources.Gone), + exception, + null, + physicalAddress); + + return Mono.error(goneException); + } else if (request.isReadOnlyRequest()) { + logger.trace("Received exception {} on readonly request" + + "sending the request to {}, will reresolve the address " + + "send time UTC: {}", + exception, + physicalAddress, + sendTimeUtc); + + GoneException goneException = new GoneException( + String.format( + RMResources.ExceptionMessage, + RMResources.Gone), + exception, + null, + physicalAddress); + + return Mono.error(goneException); + } else { + // We can't throw a GoneException here because it will cause retry and we don't + // know if the request failed before or after the message got sent to the server. + // So in order to avoid duplicating the request we will not retry. + // TODO: a possible solution for this is to add the ability to send a request to the server + // to check if the previous request was received or not and act accordingly. + ServiceUnavailableException serviceUnavailableException = new ServiceUnavailableException( + String.format( + RMResources.ExceptionMessage, + RMResources.ServiceUnavailable), + exception, + null, + physicalAddress.toString()); + serviceUnavailableException.responseHeaders().put(HttpConstants.HttpHeaders.REQUEST_VALIDATION_FAILURE, "1"); + serviceUnavailableException.responseHeaders().put(HttpConstants.HttpHeaders.WRITE_REQUEST_TRIGGER_ADDRESS_REFRESH, "1"); + return Mono.error(serviceUnavailableException); + }}) + .doOnSuccess(httpClientResponse -> { + Instant receivedTimeUtc = Instant.now(); + double durationInMilliSeconds = (receivedTimeUtc.toEpochMilli() - sendTimeUtc.v.toEpochMilli()); + this.afterRequest( + activityId, + httpClientResponse.statusCode(), + durationInMilliSeconds, + httpClientResponse.headers()); + }) + .doOnError(e -> { + Instant receivedTimeUtc = Instant.now(); + double durationInMilliSeconds = (receivedTimeUtc.toEpochMilli() - sendTimeUtc.v.toEpochMilli()); + this.afterRequest( + activityId, + 0, + durationInMilliSeconds, + null); + }); + + return httpResponseMono.flatMap(rsp -> processHttpResponse(request.getResourceAddress(), + httpRequest, activityId.toString(), rsp, physicalAddress)); + + } catch (Exception e) { + return Mono.error(e); + } + } + + private void beforeRequest(UUID activityId, URI uri, ResourceType resourceType, HttpHeaders requestHeaders) { + // TODO: perf counters + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/258624 + } + + private void afterRequest(UUID activityId, + int statusCode, + double durationInMilliSeconds, + HttpHeaders responseHeaders) { + // TODO: perf counters + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/258624 + } + + private static void addHeader(HttpHeaders requestHeaders, String headerName, RxDocumentServiceRequest request) { + String headerValue = request.getHeaders().get(headerName); + if (!Strings.isNullOrEmpty(headerValue)) { + requestHeaders.set(headerName, headerValue); + } + } + + private static void addHeader(HttpHeaders requestHeaders, String headerName, String headerValue) { + if (!Strings.isNullOrEmpty(headerValue)) { + requestHeaders.set(headerName, headerValue); + } + } + + private String getMatch(RxDocumentServiceRequest request, ResourceOperation resourceOperation) { + switch (resourceOperation.operationType) { + case Delete: + case ExecuteJavaScript: + case Replace: + case Update: + case Upsert: + return request.getHeaders().get(HttpConstants.HttpHeaders.IF_MATCH); + + case Read: + case ReadFeed: + return request.getHeaders().get(HttpConstants.HttpHeaders.IF_NONE_MATCH); + + default: + return null; + } + } + + private HttpRequest prepareHttpMessage( + UUID activityId, + URI physicalAddress, + ResourceOperation resourceOperation, + RxDocumentServiceRequest request) throws Exception { + + HttpRequest httpRequestMessage; + URI requestUri; + HttpMethod method; + + // The StreamContent created below will own and dispose its underlying stream, but we may need to reuse the stream on the + // RxDocumentServiceRequest for future requests. Hence we need to clone without incurring copy cost, so that when + // HttpRequestMessage -> StreamContent -> MemoryStream all get disposed, the original stream will be left open. + switch (resourceOperation.operationType) { + case Create: + requestUri = getResourceFeedUri(resourceOperation.resourceType, physicalAddress, request); + method = HttpMethod.POST; + assert request.getContent() != null; + httpRequestMessage = new HttpRequest(method, requestUri.toString(), physicalAddress.getPort()); + httpRequestMessage.withBody(request.getContent()); + break; + + case ExecuteJavaScript: + requestUri = getResourceEntryUri(resourceOperation.resourceType, physicalAddress, request); + method = HttpMethod.POST; + assert request.getContent() != null; + httpRequestMessage = new HttpRequest(method, requestUri.toString(), physicalAddress.getPort()); + httpRequestMessage.withBody(request.getContent()); + break; + + case Delete: + requestUri = getResourceEntryUri(resourceOperation.resourceType, physicalAddress, request); + method = HttpMethod.DELETE; + httpRequestMessage = new HttpRequest(method, requestUri.toString(), physicalAddress.getPort()); + break; + + case Read: + requestUri = getResourceEntryUri(resourceOperation.resourceType, physicalAddress, request); + method = HttpMethod.GET; + httpRequestMessage = new HttpRequest(method, requestUri.toString(), physicalAddress.getPort()); + break; + + case ReadFeed: + requestUri = getResourceFeedUri(resourceOperation.resourceType, physicalAddress, request); + method = HttpMethod.GET; + httpRequestMessage = new HttpRequest(method, requestUri.toString(), physicalAddress.getPort()); + break; + + case Replace: + requestUri = getResourceEntryUri(resourceOperation.resourceType, physicalAddress, request); + method = HttpMethod.PUT; + assert request.getContent() != null; + httpRequestMessage = new HttpRequest(method, requestUri.toString(), physicalAddress.getPort()); + httpRequestMessage.withBody(request.getContent()); + break; + + case Update: + requestUri = getResourceEntryUri(resourceOperation.resourceType, physicalAddress, request); + method = new HttpMethod("PATCH"); + assert request.getContent() != null; + httpRequestMessage = new HttpRequest(method, requestUri.toString(), physicalAddress.getPort()); + httpRequestMessage.withBody(request.getContent()); + break; + + case Query: + case SqlQuery: + requestUri = getResourceFeedUri(resourceOperation.resourceType, physicalAddress, request); + method = HttpMethod.POST; + assert request.getContent() != null; + httpRequestMessage = new HttpRequest(method, requestUri.toString(), physicalAddress.getPort()); + httpRequestMessage.withBody(request.getContent()); + HttpTransportClient.addHeader(httpRequestMessage.headers(), HttpConstants.HttpHeaders.CONTENT_TYPE, request); + break; + + case Upsert: + requestUri = getResourceFeedUri(resourceOperation.resourceType, physicalAddress, request); + method = HttpMethod.POST; + assert request.getContent() != null; + httpRequestMessage = new HttpRequest(method, requestUri.toString(), physicalAddress.getPort()); + httpRequestMessage.withBody(request.getContent()); + break; + + case Head: + requestUri = getResourceEntryUri(resourceOperation.resourceType, physicalAddress, request); + method = HttpMethod.HEAD; + httpRequestMessage = new HttpRequest(method, requestUri.toString(), physicalAddress.getPort()); + break; + + case HeadFeed: + requestUri = getResourceFeedUri(resourceOperation.resourceType, physicalAddress, request); + method = HttpMethod.HEAD; + httpRequestMessage = new HttpRequest(method, requestUri.toString(), physicalAddress.getPort()); + break; + + default: + assert false : "Unsupported operation type"; + throw new IllegalStateException(); + } + + Map documentServiceRequestHeaders = request.getHeaders(); + HttpHeaders httpRequestHeaders = httpRequestMessage.headers(); + + // add default headers + for(Map.Entry entry: defaultHeaders.entrySet()) { + HttpTransportClient.addHeader(httpRequestHeaders, entry.getKey(), entry.getValue()); + } + + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.VERSION, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.USER_AGENT, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.PAGE_SIZE, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.PRE_TRIGGER_EXCLUDE, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.POST_TRIGGER_EXCLUDE, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.AUTHORIZATION, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.MIGRATE_COLLECTION_DIRECTIVE, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.SESSION_TOKEN, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.PREFER, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.ENABLE_SCAN_IN_QUERY, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.EMIT_VERBOSE_TRACES_IN_QUERY, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.CAN_CHARGE, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.CAN_THROTTLE, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.ENABLE_LOW_PRECISION_ORDER_BY, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.ENABLE_LOGGING, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.IS_READ_ONLY_SCRIPT, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.CONTENT_SERIALIZATION_FORMAT, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.CONTINUATION, request.getContinuation()); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.ACTIVITY_ID, activityId.toString()); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.PARTITION_KEY, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID, request); + + String dateHeader = HttpUtils.getDateHeader(documentServiceRequestHeaders); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.X_DATE, dateHeader); + HttpTransportClient.addHeader(httpRequestHeaders, "Match", this.getMatch(request, resourceOperation)); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.IF_MODIFIED_SINCE, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.A_IM, request); + if (!request.getIsNameBased()) { + HttpTransportClient.addHeader(httpRequestHeaders, WFConstants.BackendHeaders.RESOURCE_ID, request.getResourceId()); + } + + HttpTransportClient.addHeader(httpRequestHeaders, WFConstants.BackendHeaders.ENTITY_ID, request.entityId); + + String fanoutRequestHeader = request.getHeaders().get(WFConstants.BackendHeaders.IS_FANOUT_REQUEST); + HttpTransportClient.addHeader(httpRequestMessage.headers(), WFConstants.BackendHeaders.IS_FANOUT_REQUEST, fanoutRequestHeader); + + if (request.getResourceType() == ResourceType.DocumentCollection) { + HttpTransportClient.addHeader(httpRequestHeaders, WFConstants.BackendHeaders.COLLECTION_PARTITION_INDEX, documentServiceRequestHeaders.get(WFConstants.BackendHeaders.COLLECTION_PARTITION_INDEX)); + HttpTransportClient.addHeader(httpRequestHeaders, WFConstants.BackendHeaders.COLLECTION_SERVICE_INDEX, documentServiceRequestHeaders.get(WFConstants.BackendHeaders.COLLECTION_SERVICE_INDEX)); + } + + if (documentServiceRequestHeaders.get(WFConstants.BackendHeaders.BIND_REPLICA_DIRECTIVE) != null) { + HttpTransportClient.addHeader(httpRequestHeaders, WFConstants.BackendHeaders.BIND_REPLICA_DIRECTIVE, documentServiceRequestHeaders.get(WFConstants.BackendHeaders.BIND_REPLICA_DIRECTIVE)); + HttpTransportClient.addHeader(httpRequestHeaders, WFConstants.BackendHeaders.PRIMARY_MASTER_KEY, documentServiceRequestHeaders.get(WFConstants.BackendHeaders.PRIMARY_MASTER_KEY)); + HttpTransportClient.addHeader(httpRequestHeaders, WFConstants.BackendHeaders.SECONDARY_MASTER_KEY, documentServiceRequestHeaders.get(WFConstants.BackendHeaders.SECONDARY_MASTER_KEY)); + HttpTransportClient.addHeader(httpRequestHeaders, WFConstants.BackendHeaders.PRIMARY_READONLY_KEY, documentServiceRequestHeaders.get(WFConstants.BackendHeaders.PRIMARY_READONLY_KEY)); + HttpTransportClient.addHeader(httpRequestHeaders, WFConstants.BackendHeaders.SECONDARY_READONLY_KEY, documentServiceRequestHeaders.get(WFConstants.BackendHeaders.SECONDARY_READONLY_KEY)); + } + + if (documentServiceRequestHeaders.get(HttpConstants.HttpHeaders.CAN_OFFER_REPLACE_COMPLETE) != null) { + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.CAN_OFFER_REPLACE_COMPLETE, documentServiceRequestHeaders.get(HttpConstants.HttpHeaders.CAN_OFFER_REPLACE_COMPLETE)); + } + + //Query + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.IS_QUERY, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.QUERY, request); + + // Upsert + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.IS_UPSERT, request); + + // SupportSpatialLegacyCoordinates + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.SUPPORT_SPATIAL_LEGACY_COORDINATES, request); + + HttpTransportClient.addHeader(httpRequestHeaders, WFConstants.BackendHeaders.PARTITION_COUNT, request); + + HttpTransportClient.addHeader(httpRequestHeaders, WFConstants.BackendHeaders.COLLECTION_RID, request); + + // Filter by schema + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.FILTER_BY_SCHEMA_RESOURCE_ID, request); + + // UsePolygonsSmallerThanAHemisphere + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.USE_POLYGONS_SMALLER_THAN_AHEMISPHERE, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.GATEWAY_SIGNATURE, request); + + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.POPULATE_QUERY_METRICS, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.FORCE_QUERY_SCAN, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.RESPONSE_CONTINUATION_TOKEN_LIMIT_IN_KB, request); + HttpTransportClient.addHeader(httpRequestHeaders, WFConstants.BackendHeaders.REMOTE_STORAGE_TYPE, request); + HttpTransportClient.addHeader(httpRequestHeaders, WFConstants.BackendHeaders.SHARE_THROUGHPUT, request); + + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.POPULATE_PARTITION_STATISTICS, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.POPULATE_COLLECTION_THROUGHPUT_INFO, request); + + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.REMAINING_TIME_IN_MS_ON_CLIENT_REQUEST, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.CLIENT_RETRY_ATTEMPT_COUNT, request); + + // target lsn for head requests. + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.TARGET_LSN, request); + HttpTransportClient.addHeader(httpRequestHeaders, HttpConstants.HttpHeaders.TARGET_GLOBAL_COMMITTED_LSN, request); + + HttpTransportClient.addHeader(httpRequestHeaders, WFConstants.BackendHeaders.FEDERATION_ID_FOR_AUTH, request); + + HttpTransportClient.addHeader(httpRequestHeaders, WFConstants.BackendHeaders.FANOUT_OPERATION_STATE, request); + HttpTransportClient.addHeader(httpRequestHeaders, WFConstants.BackendHeaders.ALLOW_TENTATIVE_WRITES, request); + + HttpTransportClient.addHeader(httpRequestHeaders, CustomHeaders.HttpHeaders.EXCLUDE_SYSTEM_PROPERTIES, request); + + return httpRequestMessage; + } + + static URI getResourceFeedUri(ResourceType resourceType, URI physicalAddress, RxDocumentServiceRequest request) throws Exception { + switch (resourceType) { + case Attachment: + return getAttachmentFeedUri(physicalAddress, request); + case DocumentCollection: + return getCollectionFeedUri(physicalAddress, request); + case Conflict: + return getConflictFeedUri(physicalAddress, request); + case Database: + return getDatabaseFeedUri(physicalAddress); + case Document: + return getDocumentFeedUri(physicalAddress, request); + case Permission: + return getPermissionFeedUri(physicalAddress, request); + case StoredProcedure: + return getStoredProcedureFeedUri(physicalAddress, request); + case Trigger: + return getTriggerFeedUri(physicalAddress, request); + case User: + return getUserFeedUri(physicalAddress, request); + + case UserDefinedFunction: + return getUserDefinedFunctionFeedUri(physicalAddress, request); + case Schema: + return getSchemaFeedUri(physicalAddress, request); + case Offer: + return getOfferFeedUri(physicalAddress, request); + +// Other types: Replica, Module, ModuleCommand, Record, UserDefinedType not applicable to SDK. + + default: + assert false : "Unexpected resource type: " + resourceType; + throw new NotFoundException(); + } + } + + private static URI getResourceEntryUri(ResourceType resourceType, URI physicalAddress, RxDocumentServiceRequest request) throws Exception { + switch (resourceType) { + case Attachment: + return getAttachmentEntryUri(physicalAddress, request); + case DocumentCollection: + return getCollectionEntryUri(physicalAddress, request); + case Conflict: + return getConflictEntryUri(physicalAddress, request); + case Database: + return getDatabaseEntryUri(physicalAddress, request); + case Document: + return getDocumentEntryUri(physicalAddress, request); + case Permission: + return getPermissionEntryUri(physicalAddress, request); + case StoredProcedure: + return getStoredProcedureEntryUri(physicalAddress, request); + case Trigger: + return getTriggerEntryUri(physicalAddress, request); + case User: + return getUserEntryUri(physicalAddress, request); + case UserDefinedFunction: + return getUserDefinedFunctionEntryUri(physicalAddress, request); + case Schema: + return getSchemaEntryUri(physicalAddress, request); + case Offer: + return getOfferEntryUri(physicalAddress, request); + +// Other types: Replica, Module, ModuleCommand, Record, UserDefinedType not applicable to SDK. + + default: + assert false: "Unexpected resource type: " + resourceType; + throw new IllegalStateException(); + } + } + + private static URI createURI(URI baseAddress, String resourcePath) { + return baseAddress.resolve(HttpUtils.urlEncode(trimBeginningAndEndingSlashes(resourcePath))); + } + + static URI getRootFeedUri(URI baseAddress) { + return baseAddress; + } + + private static URI getDatabaseFeedUri(URI baseAddress) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.Database, StringUtils.EMPTY, true)); + } + + private static URI getDatabaseEntryUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.Database, request, false)); + } + + private static URI getCollectionFeedUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.DocumentCollection, request, true)); + } + + private static URI getStoredProcedureFeedUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.StoredProcedure, request, true)); + } + + private static URI getTriggerFeedUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.Trigger, request, true)); + } + + private static URI getUserDefinedFunctionFeedUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.UserDefinedFunction, request, true)); + } + + private static URI getCollectionEntryUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.DocumentCollection, request, false)); + } + + private static URI getStoredProcedureEntryUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.StoredProcedure, request, false)); + } + + private static URI getTriggerEntryUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.Trigger, request, false)); + } + + private static URI getUserDefinedFunctionEntryUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.UserDefinedFunction, request, false)); + } + + private static URI getDocumentFeedUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.Document, request, true)); + } + + private static URI getDocumentEntryUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.Document, request, false)); + } + + private static URI getConflictFeedUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.Conflict, request, true)); + } + + private static URI getConflictEntryUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.Conflict, request, false)); + } + + private static URI getAttachmentFeedUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.Attachment, request, true)); + } + + private static URI getAttachmentEntryUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.Attachment, request, false)); + } + + private static URI getUserFeedUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.User, request, true)); + } + + private static URI getUserEntryUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.User, request, false)); + } + + private static URI getPermissionFeedUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.Permission, request, true)); + } + + private static URI getPermissionEntryUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.Permission, request, false)); + } + + private static URI getOfferFeedUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.Offer, request, true)); + } + + private static URI getSchemaFeedUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.Schema, request, true)); + } + + private static URI getSchemaEntryUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.Schema, request, false)); + } + + private static URI getOfferEntryUri(URI baseAddress, RxDocumentServiceRequest request) { + return createURI(baseAddress, PathsHelper.generatePath(ResourceType.Offer, request, false)); + } + + static String getHeader(String[] names, String[] values, String name) { + for (int idx = 0; idx < names.length; idx++) { + if (Strings.areEqual(names[idx], name)) { + return values[idx]; + } + } + + return null; + } + + private Mono processHttpResponse(String resourceAddress, HttpRequest httpRequest, String activityId, HttpResponse response, URI physicalAddress) { + if (response == null) { + InternalServerErrorException exception = + new InternalServerErrorException( + String.format( + RMResources.ExceptionMessage, + RMResources.InvalidBackendResponse), + null, + physicalAddress); + exception.responseHeaders().put(HttpConstants.HttpHeaders.ACTIVITY_ID, + activityId); + exception.responseHeaders().put(HttpConstants.HttpHeaders.REQUEST_VALIDATION_FAILURE, "1"); + + return Mono.error(exception); + } + + // If the status code is < 300 or 304 NotModified (we treat not modified as success) then it means that it's a success code and shouldn't throw. + if (response.statusCode() < HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY || + response.statusCode() == HttpConstants.StatusCodes.NOT_MODIFIED) { + return ResponseUtils.toStoreResponse(response, httpRequest); + } + else { + return this.createErrorResponseFromHttpResponse(resourceAddress, activityId, httpRequest, response); + } + } + + private Mono createErrorResponseFromHttpResponse(String resourceAddress, String activityId, + HttpRequest request, + HttpResponse response) { + int statusCode = response.statusCode(); + Mono errorMessageObs = ErrorUtils.getErrorResponseAsync(response, request); + + return errorMessageObs.flatMap( + errorMessage -> { + long responseLSN = -1; + + List lsnValues = null; + String[] headerValues = response.headers().values(WFConstants.BackendHeaders.LSN); + if (headerValues != null) { + lsnValues = com.google.common.collect.Lists.newArrayList(headerValues); + } + + if (lsnValues != null) { + String temp = lsnValues.isEmpty() ? null : lsnValues.get(0); + responseLSN = Longs.tryParse(temp, responseLSN); + } + + String responsePartitionKeyRangeId = null; + List partitionKeyRangeIdValues = null; + headerValues = response.headers().values(WFConstants.BackendHeaders.PARTITION_KEY_RANGE_ID); + if (headerValues != null) { + partitionKeyRangeIdValues = com.google.common.collect.Lists.newArrayList(headerValues); + } + if (partitionKeyRangeIdValues != null) { + responsePartitionKeyRangeId = Lists.firstOrDefault(partitionKeyRangeIdValues, null); + } + + CosmosClientException exception; + + switch (statusCode) { + case HttpConstants.StatusCodes.UNAUTHORIZED: + exception = new UnauthorizedException( + String.format( + RMResources.ExceptionMessage, + Strings.isNullOrEmpty(errorMessage) ? RMResources.Unauthorized : errorMessage), + response.headers(), + request.uri()); + break; + + case HttpConstants.StatusCodes.FORBIDDEN: + exception = new ForbiddenException( + String.format( + RMResources.ExceptionMessage, + Strings.isNullOrEmpty(errorMessage) ? RMResources.Forbidden : errorMessage), + response.headers(), + request.uri()); + break; + + case HttpConstants.StatusCodes.NOTFOUND: + // HTTP.SYS returns NotFound (404) if the URI + // is not registered. This is really an indication that + // the replica which registered the URI is not + // available at the server. We detect this case by + // the presence of Content-Type header in the response + // and map it to HTTP Gone (410), which is the more + // appropriate response for this case. + if (response.body() != null && response.headers() != null && response.headers().value(HttpConstants.HttpHeaders.CONTENT_TYPE) != null && + !Strings.isNullOrEmpty(response.headers().value(HttpConstants.HttpHeaders.CONTENT_TYPE)) && + Strings.containsIgnoreCase(response.headers().value(HttpConstants.HttpHeaders.CONTENT_TYPE), RuntimeConstants.MediaTypes.TEXT_HTML)) { + // Have the request URL in the exception message for debugging purposes. + exception = new GoneException( + String.format( + RMResources.ExceptionMessage, + RMResources.Gone), + request.uri().toString()); + exception.responseHeaders().put(HttpConstants.HttpHeaders.ACTIVITY_ID, + activityId); + + break; + } else { + exception = new NotFoundException( + String.format( + RMResources.ExceptionMessage, + Strings.isNullOrEmpty(errorMessage) ? RMResources.NotFound : errorMessage), + response.headers(), + request.uri()); + break; + } + + case HttpConstants.StatusCodes.BADREQUEST: + exception = new BadRequestException( + String.format( + RMResources.ExceptionMessage, + Strings.isNullOrEmpty(errorMessage) ? RMResources.BadRequest : errorMessage), + response.headers(), + request.uri()); + break; + + case HttpConstants.StatusCodes.METHOD_NOT_ALLOWED: + exception = new MethodNotAllowedException( + String.format( + RMResources.ExceptionMessage, + Strings.isNullOrEmpty(errorMessage) ? RMResources.MethodNotAllowed : errorMessage), + null, + response.headers(), + request.uri().toString()); + break; + + case HttpConstants.StatusCodes.GONE: { + + // TODO: update perf counter + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/258624 + ErrorUtils.logGoneException(request.uri(), activityId); + + Integer nSubStatus = 0; + String valueSubStatus = response.headers().value(WFConstants.BackendHeaders.SUB_STATUS); + if (!Strings.isNullOrEmpty(valueSubStatus)) { + if ((nSubStatus = Integers.tryParse(valueSubStatus)) == null) { + exception = new InternalServerErrorException( + String.format( + RMResources.ExceptionMessage, + RMResources.InvalidBackendResponse), + response.headers(), + request.uri()); + break; + } + } + + if (nSubStatus == HttpConstants.SubStatusCodes.NAME_CACHE_IS_STALE) { + exception = new InvalidPartitionException( + String.format( + RMResources.ExceptionMessage, + Strings.isNullOrEmpty(errorMessage) ? RMResources.Gone : errorMessage), + response.headers(), + request.uri().toString()); + break; + } else if (nSubStatus == HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE) { + exception = new PartitionKeyRangeGoneException( + String.format( + RMResources.ExceptionMessage, + Strings.isNullOrEmpty(errorMessage) ? RMResources.Gone : errorMessage), + response.headers(), + request.uri().toString()); + break; + } else if (nSubStatus == HttpConstants.SubStatusCodes.COMPLETING_SPLIT) { + exception = new PartitionKeyRangeIsSplittingException( + String.format( + RMResources.ExceptionMessage, + Strings.isNullOrEmpty(errorMessage) ? RMResources.Gone : errorMessage), + response.headers(), + request.uri().toString()); + break; + } else if (nSubStatus == HttpConstants.SubStatusCodes.COMPLETING_PARTITION_MIGRATION) { + exception = new PartitionIsMigratingException( + String.format( + RMResources.ExceptionMessage, + Strings.isNullOrEmpty(errorMessage) ? RMResources.Gone : errorMessage), + response.headers(), + request.uri().toString()); + break; + } else { + // Have the request URL in the exception message for debugging purposes. + exception = new GoneException( + String.format( + RMResources.ExceptionMessage, + RMResources.Gone), + response.headers(), + request.uri()); + + exception.responseHeaders().put(HttpConstants.HttpHeaders.ACTIVITY_ID, + activityId); + break; + } + } + + case HttpConstants.StatusCodes.CONFLICT: + exception = new ConflictException( + String.format( + RMResources.ExceptionMessage, + Strings.isNullOrEmpty(errorMessage) ? RMResources.EntityAlreadyExists : errorMessage), + response.headers(), + request.uri().toString()); + break; + + case HttpConstants.StatusCodes.PRECONDITION_FAILED: + exception = new PreconditionFailedException( + String.format( + RMResources.ExceptionMessage, + Strings.isNullOrEmpty(errorMessage) ? RMResources.PreconditionFailed : errorMessage), + response.headers(), + request.uri().toString()); + break; + + case HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE: + exception = new RequestEntityTooLargeException( + String.format( + RMResources.ExceptionMessage, + String.format( + RMResources.RequestEntityTooLarge, + HttpConstants.HttpHeaders.PAGE_SIZE)), + response.headers(), + request.uri().toString()); + break; + + case HttpConstants.StatusCodes.LOCKED: + exception = new LockedException( + String.format( + RMResources.ExceptionMessage, + Strings.isNullOrEmpty(errorMessage) ? RMResources.Locked : errorMessage), + response.headers(), + request.uri().toString()); + break; + + case HttpConstants.StatusCodes.SERVICE_UNAVAILABLE: + exception = new ServiceUnavailableException( + String.format( + RMResources.ExceptionMessage, + Strings.isNullOrEmpty(errorMessage) ? RMResources.ServiceUnavailable : errorMessage), + response.headers(), + request.uri()); + break; + + case HttpConstants.StatusCodes.REQUEST_TIMEOUT: + exception = new RequestTimeoutException( + String.format( + RMResources.ExceptionMessage, + Strings.isNullOrEmpty(errorMessage) ? RMResources.RequestTimeout : errorMessage), + response.headers(), + request.uri()); + break; + + case HttpConstants.StatusCodes.RETRY_WITH: + exception = new RetryWithException( + String.format( + RMResources.ExceptionMessage, + Strings.isNullOrEmpty(errorMessage) ? RMResources.RetryWith : errorMessage), + response.headers(), + request.uri()); + break; + + case HttpConstants.StatusCodes.TOO_MANY_REQUESTS: + exception = + new RequestRateTooLargeException( + String.format( + RMResources.ExceptionMessage, + Strings.isNullOrEmpty(errorMessage) ? RMResources.TooManyRequests : errorMessage), + response.headers(), + request.uri()); + + List values = null; + headerValues = response.headers().values(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS); + if (headerValues != null) { + values = com.google.common.collect.Lists.newArrayList(headerValues); + } + if (values == null || values.isEmpty()) { + logger.warn("RequestRateTooLargeException being thrown without RetryAfter."); + } else { + exception.responseHeaders().put(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS, values.get(0)); + } + + break; + + case HttpConstants.StatusCodes.INTERNAL_SERVER_ERROR: + exception = new InternalServerErrorException( + String.format( + RMResources.ExceptionMessage, + Strings.isNullOrEmpty(errorMessage) ? RMResources.InternalServerError : errorMessage), + response.headers(), + request.uri()); + break; + + default: + logger.error("Unrecognized status code {} returned by backend. ActivityId {}", statusCode, activityId); + ErrorUtils.logException(request.uri(), activityId); + exception = new InternalServerErrorException( + String.format( + RMResources.ExceptionMessage, + RMResources.InvalidBackendResponse), + response.headers(), + request.uri()); + break; + } + + BridgeInternal.setLSN(exception, responseLSN); + BridgeInternal.setPartitionKeyRangeId(exception, responsePartitionKeyRangeId); + BridgeInternal.setResourceAddress(exception, resourceAddress); + BridgeInternal.setRequestHeaders(exception, HttpUtils.asMap(request.headers())); + + return Mono.error(exception); + } + ); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/HttpUtils.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/HttpUtils.java new file mode 100644 index 0000000000000..1d3bc513800f5 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/HttpUtils.java @@ -0,0 +1,118 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.Constants.UrlEncodingInfo; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.Strings; +import com.azure.data.cosmos.internal.http.HttpHeaders; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.UnsupportedEncodingException; +import java.net.URI; +import java.net.URLDecoder; +import java.net.URLEncoder; +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +public class HttpUtils { + + private static Logger log = LoggerFactory.getLogger(HttpUtils.class); + + public static String urlEncode(String url) { + try { + return URLEncoder.encode(url, UrlEncodingInfo.UTF_8).replaceAll(UrlEncodingInfo.PLUS_SYMBOL_ESCAPED, UrlEncodingInfo.SINGLE_SPACE_URI_ENCODING); + } catch (UnsupportedEncodingException e) { + log.error("failed to encode {}", url, e); + throw new IllegalArgumentException("failed to encode url " + url, e); + } + } + + public static String urlDecode(String url) { + try { + return URLDecoder.decode(url.replaceAll(UrlEncodingInfo.PLUS_SYMBOL_ESCAPED, UrlEncodingInfo.PLUS_SYMBOL_URI_ENCODING), UrlEncodingInfo.UTF_8); + } catch (UnsupportedEncodingException e) { + log.error("failed to decode {}", url, e); + throw new IllegalArgumentException("failed to decode url " + url, e); + } + } + + public static URI toURI(String uri) { + try { + return new URI(uri); + } catch (Exception e) { + log.error("failed to parse {}", uri, e); + throw new IllegalArgumentException("failed to parse uri " + uri, e); + } + } + + public static Map asMap(HttpHeaders headers) { + if (headers == null) { + return new HashMap<>(); + } + HashMap map = new HashMap<>(headers.size()); + for (Entry entry : headers.toMap().entrySet()) { + if (entry.getKey().equals(HttpConstants.HttpHeaders.OWNER_FULL_NAME)) { + map.put(entry.getKey(), HttpUtils.urlDecode(entry.getValue())); + } else { + map.put(entry.getKey(), entry.getValue()); + } + } + return map; + } + + public static String getDateHeader(Map headerValues) { + if (headerValues == null) { + return StringUtils.EMPTY; + } + + // Since Date header is overridden by some proxies/http client libraries, we support + // an additional date header 'x-ms-date' and prefer that to the regular 'date' header. + String date = headerValues.get(HttpConstants.HttpHeaders.X_DATE); + if (Strings.isNullOrEmpty(date)) { + date = headerValues.get(HttpConstants.HttpHeaders.HTTP_DATE); + } + + return date != null ? date : StringUtils.EMPTY; + } + + public static List> unescape(Set> headers) { + List> result = new ArrayList<>(headers.size()); + for (Entry entry : headers) { + if (entry.getKey().equals(HttpConstants.HttpHeaders.OWNER_FULL_NAME)) { + String unescapedUrl = HttpUtils.urlDecode(entry.getValue()); + entry = new AbstractMap.SimpleEntry<>(entry.getKey(), unescapedUrl); + } + result.add(entry); + } + return result; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/IAddressCache.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/IAddressCache.java new file mode 100644 index 0000000000000..9a507d14f0305 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/IAddressCache.java @@ -0,0 +1,46 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.routing.PartitionKeyRangeIdentity; +import reactor.core.publisher.Mono; + +public interface IAddressCache { + + /** + * Resolves physical addresses by either PartitionKeyRangeIdentity. + * + * + * @param request Request is needed only by GatewayAddressCache in the only case when request is name based and user has name based auth token. + * PartitionkeyRangeIdentity can be used to locate auth token in this case. + * @param partitionKeyRangeIdentity target partition key range Id + * @param forceRefreshPartitionAddresses Whether addresses need to be refreshed as previously resolved addresses were determined to be outdated. + * @return Physical addresses. + */ + Mono tryGetAddresses( + RxDocumentServiceRequest request, + PartitionKeyRangeIdentity partitionKeyRangeIdentity, + boolean forceRefreshPartitionAddresses); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/IAddressResolver.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/IAddressResolver.java new file mode 100644 index 0000000000000..e8a783002b926 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/IAddressResolver.java @@ -0,0 +1,33 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import reactor.core.publisher.Mono; + +public interface IAddressResolver { + Mono resolveAsync( + RxDocumentServiceRequest request, + boolean forceRefreshPartitionAddresses); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/IStoreClient.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/IStoreClient.java new file mode 100644 index 0000000000000..e06cd8c72e993 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/IStoreClient.java @@ -0,0 +1,56 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.IRetryPolicy; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.RxDocumentServiceResponse; +import reactor.core.publisher.Mono; + +import java.util.function.Function; + +public interface IStoreClient { + + Mono processMessageAsync( + RxDocumentServiceRequest request, + IRetryPolicy retryPolicy, + Function> prepareRequestAsyncDelegate); + + default Mono processMessageAsync( + RxDocumentServiceRequest request, + Function> prepareRequestAsyncDelegate) { + return processMessageAsync(request, null, prepareRequestAsyncDelegate); + } + + default Mono processMessageAsync( + RxDocumentServiceRequest request, + IRetryPolicy retryPolicy) { + return processMessageAsync(request, retryPolicy, null); + } + + default Mono processMessageAsync( + RxDocumentServiceRequest request) { + return processMessageAsync(request, null, null); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/Protocol.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/Protocol.java new file mode 100644 index 0000000000000..326f48939b542 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/Protocol.java @@ -0,0 +1,46 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import org.apache.commons.text.WordUtils; + +public enum Protocol { + HTTPS, TCP; + + String scheme() { + switch (this) { + case HTTPS: + return "https"; + case TCP: + return "rntbd"; + default: + throw new IllegalStateException(); + } + } + + @Override + public String toString() { + return WordUtils.capitalizeFully(this.name()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/QueryRequestPerformanceActivity.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/QueryRequestPerformanceActivity.java new file mode 100644 index 0000000000000..e0805afffc0ed --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/QueryRequestPerformanceActivity.java @@ -0,0 +1,29 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +// TODO troubleshooting info +// https://msdata.visualstudio.com/CosmosDB/_workitems/edit/258624 +public class QueryRequestPerformanceActivity { +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/QuorumReader.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/QuorumReader.java new file mode 100644 index 0000000000000..923f70a6ab604 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/QuorumReader.java @@ -0,0 +1,810 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + + +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.GoneException; +import com.azure.data.cosmos.InternalServerErrorException; +import com.azure.data.cosmos.internal.Configs; +import com.azure.data.cosmos.internal.IAuthorizationTokenProvider; +import com.azure.data.cosmos.internal.JavaStreamUtils; +import com.azure.data.cosmos.internal.MutableVolatile; +import com.azure.data.cosmos.internal.Quadruple; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.RequestChargeTracker; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import org.apache.commons.lang3.tuple.Pair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.time.Duration; +import java.util.Comparator; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; + +import static com.azure.data.cosmos.internal.Utils.ValueHolder; + +// +//================================================================================================================= +// STRONG read logic: +//================================================================================================================= +// +// ------------------- PerformPrimaryRead------------------------------------------------------------- +// | ^ | +// [RetryOnSecondary] | | +// | [QuorumNotSelected] | +// \/ | \/ +// Start-------------------------->SecondaryQuorumRead-------------[QuorumMet]-------------------------------->Result +// | ^ +// [QuorumSelected] | +// | | +// \/ | +// PrimaryReadBarrier------------------------------------------------------------- +// +//================================================================================================================= +// BOUNDED_STALENESS quorum read logic: +//================================================================================================================= +// +// ------------------- PerformPrimaryRead------------------------------------------------------------- +// | ^ | +// [RetryOnSecondary] | | +// | [QuorumNotSelected] | +// \/ | \/ +// Start-------------------------->SecondaryQuorumRead-------------[QuorumMet]-------------------------------->Result +// | ^ +// [QuorumSelected] | +// | | +// | | +// --------------------------------------------------------------------------- +// +/** + * QuorumReader wraps the client side quorum logic on top of the StoreReader + */ +public class QuorumReader { + private final static Logger logger = LoggerFactory.getLogger(QuorumReader.class); + + private final int maxNumberOfReadBarrierReadRetries; + private final int maxNumberOfPrimaryReadRetries; + private final int maxNumberOfReadQuorumRetries; + private final int delayBetweenReadBarrierCallsInMs; + + private final int maxBarrierRetriesForMultiRegion; + private final int barrierRetryIntervalInMsForMultiRegion; + + private final int maxShortBarrierRetriesForMultiRegion; + private final int shortBarrierRetryIntervalInMsForMultiRegion; + + private final StoreReader storeReader; + private final GatewayServiceConfigurationReader serviceConfigReader; + private final IAuthorizationTokenProvider authorizationTokenProvider; + + public QuorumReader( + Configs configs, + TransportClient transportClient, + AddressSelector addressSelector, + StoreReader storeReader, + GatewayServiceConfigurationReader serviceConfigReader, + IAuthorizationTokenProvider authorizationTokenProvider) { + this.storeReader = storeReader; + this.serviceConfigReader = serviceConfigReader; + this.authorizationTokenProvider = authorizationTokenProvider; + + this.maxNumberOfReadBarrierReadRetries = configs.getMaxNumberOfReadBarrierReadRetries(); + this.maxNumberOfPrimaryReadRetries = configs.getMaxNumberOfPrimaryReadRetries(); + this.maxNumberOfReadQuorumRetries = configs.getMaxNumberOfReadQuorumRetries(); + this.delayBetweenReadBarrierCallsInMs = configs.getDelayBetweenReadBarrierCallsInMs(); + this.maxBarrierRetriesForMultiRegion = configs.getMaxBarrierRetriesForMultiRegion(); + this.barrierRetryIntervalInMsForMultiRegion = configs.getBarrierRetryIntervalInMsForMultiRegion(); + this.maxShortBarrierRetriesForMultiRegion = configs.getMaxShortBarrierRetriesForMultiRegion(); + this.shortBarrierRetryIntervalInMsForMultiRegion = configs.getShortBarrierRetryIntervalInMsForMultiRegion(); + } + + public QuorumReader( + TransportClient transportClient, + AddressSelector addressSelector, + StoreReader storeReader, + GatewayServiceConfigurationReader serviceConfigReader, + IAuthorizationTokenProvider authorizationTokenProvider, + Configs configs) { + this(configs, transportClient, addressSelector, storeReader, serviceConfigReader, authorizationTokenProvider); + } + + public Mono readStrongAsync( + RxDocumentServiceRequest entity, + int readQuorumValue, + ReadMode readMode) { + final MutableVolatile shouldRetryOnSecondary = new MutableVolatile<>(false); + final MutableVolatile hasPerformedReadFromPrimary = new MutableVolatile<>(false); + + return Flux.defer( + // the following will be repeated till the repeat().takeUntil(.) condition is satisfied. + () -> { + if (entity.requestContext.timeoutHelper.isElapsed()) { + return Flux.error(new GoneException()); + } + + shouldRetryOnSecondary.v = false; + Mono secondaryQuorumReadResultObs = + this.readQuorumAsync(entity, readQuorumValue, false, readMode); + + return secondaryQuorumReadResultObs.flux().flatMap( + secondaryQuorumReadResult -> { + + switch (secondaryQuorumReadResult.quorumResult) { + case QuorumMet: + try { + return Flux.just(secondaryQuorumReadResult.getResponse()); + } catch (CosmosClientException e) { + return Flux.error(e); + } + + case QuorumSelected: + Mono barrierRequestObs = BarrierRequestHelper.createAsync( + entity, + this.authorizationTokenProvider, + secondaryQuorumReadResult.selectedLsn, + secondaryQuorumReadResult.globalCommittedSelectedLsn); + + return barrierRequestObs.flux().flatMap(barrierRequest -> { + Mono readBarrierObs = this.waitForReadBarrierAsync( + barrierRequest, + true /* include primary */, + readQuorumValue, + secondaryQuorumReadResult.selectedLsn, + secondaryQuorumReadResult.globalCommittedSelectedLsn, + readMode); + + return readBarrierObs.flux().flatMap( + readBarrier -> { + + if (readBarrier) { + try { + return Flux.just(secondaryQuorumReadResult.getResponse()); + } catch (Exception e) { + return Flux.error(e); + } + } + + // else barrier was not successful + logger.warn( + "QuorumSelected: Could not converge on the LSN {} GlobalCommittedLSN {} after primary read barrier with read quorum {} for strong read, Responses: {}", + secondaryQuorumReadResult.selectedLsn, + secondaryQuorumReadResult.globalCommittedSelectedLsn, + readQuorumValue, + String.join(";", secondaryQuorumReadResult.storeResponses) + ); + + entity.requestContext.quorumSelectedStoreResponse = secondaryQuorumReadResult.selectedResponse; + entity.requestContext.storeResponses = secondaryQuorumReadResult.storeResponses; + entity.requestContext.quorumSelectedLSN = secondaryQuorumReadResult.selectedLsn; + entity.requestContext.globalCommittedSelectedLSN = secondaryQuorumReadResult.globalCommittedSelectedLsn; + + return Flux.empty(); + } + ); + }); + + case QuorumNotSelected: + if (hasPerformedReadFromPrimary.v) { + logger.warn("QuorumNotSelected: Primary read already attempted. Quorum could not be selected after retrying on secondaries."); + return Flux.error(new GoneException(RMResources.ReadQuorumNotMet)); + } + + logger.warn("QuorumNotSelected: Quorum could not be selected with read quorum of {}", readQuorumValue); + Mono responseObs = this.readPrimaryAsync(entity, readQuorumValue, false); + + return responseObs.flux().flatMap( + response -> { + if (response.isSuccessful && response.shouldRetryOnSecondary) { + assert false : "QuorumNotSelected: PrimaryResult has both Successful and shouldRetryOnSecondary flags set"; + logger.error("PrimaryResult has both Successful and shouldRetryOnSecondary flags set"); + } else if (response.isSuccessful) { + logger.debug("QuorumNotSelected: ReadPrimary successful"); + try { + return Flux.just(response.getResponse()); + } catch (CosmosClientException e) { + return Flux.error(e); + } + } else if (response.shouldRetryOnSecondary) { + shouldRetryOnSecondary.v = true; + logger.warn("QuorumNotSelected: ReadPrimary did not succeed. Will retry on secondary."); + hasPerformedReadFromPrimary.v = true; + } else { + logger.warn("QuorumNotSelected: Could not get successful response from ReadPrimary"); + return Flux.error(new GoneException(String.format(RMResources.ReadQuorumNotMet, readQuorumValue))); + } + + return Flux.empty(); + + } + ); + + default: + logger.error("Unknown ReadQuorum result {}", secondaryQuorumReadResult.quorumResult.toString()); + return Flux.error(new InternalServerErrorException(RMResources.InternalServerError)); + } + + }); + }).repeat(maxNumberOfReadQuorumRetries) + .takeUntil(dummy -> !shouldRetryOnSecondary.v) + .concatWith(Flux.defer(() -> { + logger.warn("Could not complete read quorum with read quorum value of {}", readQuorumValue); + + return Flux.error(new GoneException( + String.format( + RMResources.ReadQuorumNotMet, + readQuorumValue))); + })) + .take(1) + .single(); + } + + private Mono readQuorumAsync( + RxDocumentServiceRequest entity, + int readQuorum, + boolean includePrimary, + ReadMode readMode) { + if (entity.requestContext.timeoutHelper.isElapsed()) { + return Mono.error(new GoneException()); + } + + return ensureQuorumSelectedStoreResponse(entity, readQuorum, includePrimary, readMode).flatMap( + res -> { + if (res.getLeft() != null) { + // no need for barrier + return Mono.just(res.getKey()); + } + + long readLsn = res.getValue().getValue0(); + long globalCommittedLSN = res.getValue().getValue1(); + StoreResult storeResult = res.getValue().getValue2(); + List storeResponses = res.getValue().getValue3(); + + // ReadBarrier required + Mono barrierRequestObs = BarrierRequestHelper.createAsync(entity, this.authorizationTokenProvider, readLsn, globalCommittedLSN); + return barrierRequestObs.flatMap( + barrierRequest -> { + Mono waitForObs = this.waitForReadBarrierAsync(barrierRequest, false, readQuorum, readLsn, globalCommittedLSN, readMode); + return waitForObs.flatMap( + waitFor -> { + if (!waitFor) { + return Mono.just(new ReadQuorumResult( + entity.requestContext.requestChargeTracker, + ReadQuorumResultKind.QuorumSelected, + readLsn, + globalCommittedLSN, + storeResult, + storeResponses)); + } + + return Mono.just(new ReadQuorumResult( + entity.requestContext.requestChargeTracker, + ReadQuorumResultKind.QuorumMet, + readLsn, + globalCommittedLSN, + storeResult, + storeResponses)); + } + ); + } + ); + } + ); + } + + private Mono>>> ensureQuorumSelectedStoreResponse(RxDocumentServiceRequest entity, int readQuorum, boolean includePrimary, ReadMode readMode) { + + if (entity.requestContext.quorumSelectedStoreResponse == null) { + Mono> responseResultObs = this.storeReader.readMultipleReplicaAsync( + entity, includePrimary, readQuorum, true /*required valid LSN*/, false, readMode); + + return responseResultObs.flatMap( + responseResult -> { + List storeResponses = responseResult.stream().map(response -> response.toString()).collect(Collectors.toList()); + int responseCount = (int) responseResult.stream().filter(response -> response.isValid).count(); + if (responseCount < readQuorum) { + return Mono.just(Pair.of(new ReadQuorumResult(entity.requestContext.requestChargeTracker, + ReadQuorumResultKind.QuorumNotSelected, + -1, -1, null, storeResponses), null)); + } + + //either request overrides consistency level with strong, or request does not override and account default consistency level is strong + boolean isGlobalStrongReadCandidate = + (ReplicatedResourceClient.isGlobalStrongEnabled() && this.serviceConfigReader.getDefaultConsistencyLevel() == ConsistencyLevel.STRONG) && + (entity.requestContext.originalRequestConsistencyLevel == null || entity.requestContext.originalRequestConsistencyLevel == ConsistencyLevel.STRONG); + + ValueHolder readLsn = new ValueHolder(-1); + ValueHolder globalCommittedLSN = new ValueHolder(-1); + ValueHolder storeResult = new ValueHolder(null); + + if (this.isQuorumMet( + responseResult, + readQuorum, + false, + isGlobalStrongReadCandidate, + readLsn, + globalCommittedLSN, + storeResult)) { + return Mono.just(Pair.of(new ReadQuorumResult( + entity.requestContext.requestChargeTracker, + ReadQuorumResultKind.QuorumMet, + readLsn.v, + globalCommittedLSN.v, + storeResult.v, + storeResponses), null)); + } + + // at this point, if refresh were necessary, we would have refreshed it in ReadMultipleReplicaAsync + // so set to false here to avoid further refrehses for this request. + entity.requestContext.forceRefreshAddressCache = false; + + Quadruple> state = Quadruple.with(readLsn.v, globalCommittedLSN.v, storeResult.v, storeResponses); + return Mono.just(Pair.of(null, state)); + } + ); + } else { + + ValueHolder readLsn = ValueHolder.initialize(entity.requestContext.quorumSelectedLSN); + ValueHolder globalCommittedLSN = ValueHolder.initialize(entity.requestContext.globalCommittedSelectedLSN); + ValueHolder storeResult = ValueHolder.initialize(entity.requestContext.quorumSelectedStoreResponse); + List storeResponses = entity.requestContext.storeResponses; + Quadruple> state = Quadruple.with(readLsn.v, globalCommittedLSN.v, storeResult.v, storeResponses); + + return Mono.just(Pair.of(null, state)); + } + } + + /** + * READ and get response from Primary + * + * @param entity + * @param readQuorum + * @param useSessionToken + * @return + */ + private Mono readPrimaryAsync( + RxDocumentServiceRequest entity, + int readQuorum, + boolean useSessionToken) { + if (entity.requestContext.timeoutHelper.isElapsed()) { + return Mono.error(new GoneException()); + } + + // We would have already refreshed address before reaching here. Avoid performing here. + entity.requestContext.forceRefreshAddressCache = false; + + Mono storeResultObs = this.storeReader.readPrimaryAsync( + entity, true /*required valid LSN*/, useSessionToken); + + return storeResultObs.flatMap( + storeResult -> { + if (!storeResult.isValid) { + try { + return Mono.error(storeResult.getException()); + } catch (InternalServerErrorException e) { + return Mono.error(e); + } + } + + if (storeResult.currentReplicaSetSize <= 0 || storeResult.lsn < 0 || storeResult.quorumAckedLSN < 0) { + String message = String.format( + "INVALID value received from response header. CurrentReplicaSetSize %d, StoreLSN %d, QuorumAckedLSN %d", + storeResult.currentReplicaSetSize, storeResult.lsn, storeResult.quorumAckedLSN); + + // might not be returned if primary is still building the secondary replicas (during churn) + logger.error(message); + + // throw exception instead of returning inconsistent result. + return Mono.error(new GoneException(String.format(RMResources.ReadQuorumNotMet, readQuorum))); + + } + + if (storeResult.currentReplicaSetSize > readQuorum) { + logger.warn( + "Unexpected response. Replica Set size is {} which is greater than min value {}", storeResult.currentReplicaSetSize, readQuorum); + return Mono.just(new ReadPrimaryResult(entity.requestContext.requestChargeTracker, /*isSuccessful */ false, + /* shouldRetryOnSecondary: */ true, /* response: */ null)); + } + + // To accommodate for store latency, where an LSN may be acked by not persisted in the store, we compare the quorum acked LSN and store LSN. + // In case of sync replication, the store LSN will follow the quorum committed LSN + // In case of async replication (if enabled for bounded staleness), the store LSN can be ahead of the quorum committed LSN if the primary is able write to faster than secondary acks. + // We pick higher of the 2 LSN and wait for the other to reach that LSN. + if (storeResult.lsn != storeResult.quorumAckedLSN) { + logger.warn("Store LSN {} and quorum acked LSN {} don't match", storeResult.lsn, storeResult.quorumAckedLSN); + long higherLsn = storeResult.lsn > storeResult.quorumAckedLSN ? storeResult.lsn : storeResult.quorumAckedLSN; + + Mono waitForLsnRequestObs = BarrierRequestHelper.createAsync(entity, this.authorizationTokenProvider, higherLsn, null); + return waitForLsnRequestObs.flatMap( + waitForLsnRequest -> { + Mono primaryWaitForLsnResponseObs = this.waitForPrimaryLsnAsync(waitForLsnRequest, higherLsn, readQuorum); + return primaryWaitForLsnResponseObs.map( + primaryWaitForLsnResponse -> { + if (primaryWaitForLsnResponse == PrimaryReadOutcome.QuorumNotMet) { + return new ReadPrimaryResult( + entity.requestContext.requestChargeTracker, /*(isSuccessful: */ false, /* shouldRetryOnSecondary: */ false, /* response: */null); + } else if (primaryWaitForLsnResponse == PrimaryReadOutcome.QuorumInconclusive) { + return new ReadPrimaryResult( + entity.requestContext.requestChargeTracker, /* isSuccessful: */ false, /* shouldRetryOnSecondary: */ + true, /* response: */ null); + } + + return new ReadPrimaryResult( + entity.requestContext.requestChargeTracker, /* isSuccessful: */ true, /* shouldRetryOnSecondary: */ false, /*response: */ storeResult); + } + ); + } + ); + } + + return Mono.just(new ReadPrimaryResult( + /* requestChargeTracker: */ entity.requestContext.requestChargeTracker, /* isSuccessful: */ true, /* shouldRetryOnSecondary:*/ false, + /*response: */ storeResult)); + } + ); + } + + private Mono waitForPrimaryLsnAsync( + RxDocumentServiceRequest barrierRequest, + long lsnToWaitFor, + int readQuorum) { + + return Flux.defer(() -> { + if (barrierRequest.requestContext.timeoutHelper.isElapsed()) { + return Flux.error(new GoneException()); + } + + // We would have already refreshed address before reaching here. Avoid performing here. + barrierRequest.requestContext.forceRefreshAddressCache = false; + + Mono storeResultObs = this.storeReader.readPrimaryAsync(barrierRequest, true /*required valid LSN*/, false); + + return storeResultObs.flux().flatMap( + storeResult -> { + if (!storeResult.isValid) { + try { + return Flux.error(storeResult.getException()); + } catch (InternalServerErrorException e) { + return Flux.error(e); + } + } + + if (storeResult.currentReplicaSetSize > readQuorum) { + logger.warn( + "Unexpected response. Replica Set size is {} which is greater than min value {}", storeResult.currentReplicaSetSize, readQuorum); + return Flux.just(PrimaryReadOutcome.QuorumInconclusive); + } + + // Java this will move to the repeat logic + if (storeResult.lsn < lsnToWaitFor || storeResult.quorumAckedLSN < lsnToWaitFor) { + logger.warn( + "Store LSN {} or quorum acked LSN {} are lower than expected LSN {}", storeResult.lsn, storeResult.quorumAckedLSN, lsnToWaitFor); + + return Flux.just(0L).delayElements(Duration.ofMillis(delayBetweenReadBarrierCallsInMs)).flatMap(dummy -> Flux.empty()); + } + + return Flux.just(PrimaryReadOutcome.QuorumMet); + } + ); + }).repeat(maxNumberOfPrimaryReadRetries) // Loop for store and quorum LSN to match + .defaultIfEmpty(PrimaryReadOutcome.QuorumNotMet) + .take(1) + .single(); + + } + + private Mono waitForReadBarrierAsync( + RxDocumentServiceRequest barrierRequest, + boolean allowPrimary, + final int readQuorum, + final long readBarrierLsn, + final long targetGlobalCommittedLSN, + ReadMode readMode) { + AtomicInteger readBarrierRetryCount = new AtomicInteger(maxNumberOfReadBarrierReadRetries); + AtomicInteger readBarrierRetryCountMultiRegion = new AtomicInteger(maxBarrierRetriesForMultiRegion); + + AtomicLong maxGlobalCommittedLsn = new AtomicLong(0); + + return Flux.defer(() -> { + + if (barrierRequest.requestContext.timeoutHelper.isElapsed()) { + return Flux.error(new GoneException()); + } + + Mono> responsesObs = this.storeReader.readMultipleReplicaAsync( + barrierRequest, allowPrimary, readQuorum, + true /*required valid LSN*/, false /*useSessionToken*/, readMode, false /*checkMinLSN*/, true /*forceReadAll*/); + + return responsesObs.flux().flatMap( + responses -> { + + long maxGlobalCommittedLsnInResponses = responses.size() > 0 ? responses.stream() + .mapToLong(response -> response.globalCommittedLSN).max().getAsLong() : 0; + + + if ((responses.stream().filter(response -> response.lsn >= readBarrierLsn).count() >= readQuorum) && + (!(targetGlobalCommittedLSN > 0) || maxGlobalCommittedLsnInResponses >= targetGlobalCommittedLSN)) { + return Flux.just(true); + } + + maxGlobalCommittedLsn.set(maxGlobalCommittedLsn.get() > maxGlobalCommittedLsnInResponses ? + maxGlobalCommittedLsn.get() : maxGlobalCommittedLsnInResponses); + + //only refresh on first barrier call, set to false for subsequent attempts. + barrierRequest.requestContext.forceRefreshAddressCache = false; + + if (readBarrierRetryCount.decrementAndGet() == 0) { + logger.debug("QuorumReader: waitForReadBarrierAsync - Last barrier for single-region requests. Responses: {}", + JavaStreamUtils.toString(responses, "; ")); + + // retries exhausted + return Flux.just(false); + + } else { + // delay + //await Task.Delay(QuorumReader.delayBetweenReadBarrierCallsInMs); + return Flux.empty(); + + } + } + ); + }).repeatWhen(obs -> obs.flatMap(aVoid -> Flux.just(0L).delayElements(Duration.ofMillis(delayBetweenReadBarrierCallsInMs)))) + .take(1) // Retry loop + .flatMap(barrierRequestSucceeded -> + Flux.defer(() -> { + + if (barrierRequestSucceeded) { + return Flux.just(true); + } + + // we will go into global strong read barrier mode for global strong requests after regular barrier calls have been exhausted. + if (targetGlobalCommittedLSN > 0) { + return Flux.defer(() -> { + + if (barrierRequest.requestContext.timeoutHelper.isElapsed()) { + return Flux.error(new GoneException()); + } + + Mono> responsesObs = this.storeReader.readMultipleReplicaAsync( + barrierRequest, allowPrimary, readQuorum, + true /*required valid LSN*/, false /*useSessionToken*/, readMode, false /*checkMinLSN*/, true /*forceReadAll*/); + + return responsesObs.flux().flatMap( + responses -> { + long maxGlobalCommittedLsnInResponses = responses.size() > 0 ? responses.stream() + .mapToLong(response -> response.globalCommittedLSN).max().getAsLong() : 0; + + if ((responses.stream().filter(response -> response.lsn >= readBarrierLsn).count() >= readQuorum) && + maxGlobalCommittedLsnInResponses >= targetGlobalCommittedLSN) { + return Flux.just(true); + } + + maxGlobalCommittedLsn.set(maxGlobalCommittedLsn.get() > maxGlobalCommittedLsnInResponses ? + maxGlobalCommittedLsn.get() : maxGlobalCommittedLsnInResponses); + + //trace on last retry. + if (readBarrierRetryCountMultiRegion.getAndDecrement() == 0) { + logger.debug("QuorumReader: waitForReadBarrierAsync - Last barrier for mult-region strong requests. Responses: {}", + JavaStreamUtils.toString(responses, "; ")); + return Flux.just(false); + } else { + return Flux.empty(); + } + } + ); + + }).repeatWhen(obs -> obs.flatMap(aVoid -> { + + if ((maxBarrierRetriesForMultiRegion - readBarrierRetryCountMultiRegion.get()) > maxShortBarrierRetriesForMultiRegion) { + return Flux.just(0L).delayElements(Duration.ofMillis(barrierRetryIntervalInMsForMultiRegion)); + } else { + return Flux.just(0L).delayElements(Duration.ofMillis(shortBarrierRetryIntervalInMsForMultiRegion)); + } + + }) + // stop predicate, simulating while loop + ).take(1); + } + + return Flux.empty(); + })). + concatWith( + Flux.defer(() -> { + logger.debug("QuorumReader: waitForReadBarrierAsync - TargetGlobalCommittedLsn: {}, MaxGlobalCommittedLsn: {}.", targetGlobalCommittedLSN, maxGlobalCommittedLsn); + return Flux.just(false); + }) + ).take(1).single(); + } + + private boolean isQuorumMet( + List readResponses, + int readQuorum, + boolean isPrimaryIncluded, + boolean isGlobalStrongRead, + ValueHolder readLsn, + ValueHolder globalCommittedLSN, + ValueHolder selectedResponse) { + long maxLsn = 0; + long minLsn = Long.MAX_VALUE; + int replicaCountMaxLsn = 0; + List validReadResponses = readResponses.stream().filter(response -> response.isValid).collect(Collectors.toList()); + int validResponsesCount = validReadResponses.size(); + + if (validResponsesCount == 0) { + readLsn.v = 0l; + globalCommittedLSN.v = -1l; + selectedResponse.v = null; + + return false; + } + + assert !validReadResponses.isEmpty(); + long numberOfReadRegions = validReadResponses.stream().map(res -> res.numberOfReadRegions).max(Comparator.naturalOrder()).get(); + boolean checkForGlobalStrong = isGlobalStrongRead && numberOfReadRegions > 0; + + // Pick any R replicas in the response and check if they are at the same LSN + for (StoreResult response : validReadResponses) { + if (response.lsn == maxLsn) { + replicaCountMaxLsn++; + } else if (response.lsn > maxLsn) { + replicaCountMaxLsn = 1; + maxLsn = response.lsn; + } + + if (response.lsn < minLsn) { + minLsn = response.lsn; + } + } + + final long maxLsnFinal = maxLsn; + selectedResponse.v = validReadResponses.stream().filter(s -> s.lsn == maxLsnFinal).findFirst().get(); + + readLsn.v = selectedResponse.v.itemLSN == -1 ? + maxLsn : Math.min(selectedResponse.v.itemLSN, maxLsn); + globalCommittedLSN.v = checkForGlobalStrong ? readLsn.v : -1l; + + long maxGlobalCommittedLSN = validReadResponses.stream().mapToLong(res -> res.globalCommittedLSN).max().getAsLong(); + + logger.debug("QuorumReader: MaxLSN {} ReplicaCountMaxLSN {} bCheckGlobalStrong {} MaxGlobalCommittedLSN {} NumberOfReadRegions {} SelectedResponseItemLSN {}", + maxLsn, replicaCountMaxLsn, checkForGlobalStrong, maxGlobalCommittedLSN, numberOfReadRegions, selectedResponse.v.itemLSN); + + // quorum is met if one of the following conditions are satisfied: + // 1. readLsn is greater than zero + // AND the number of responses that have the same LSN as the selected response is greater than or equal to the read quorum + // AND if applicable, the max GlobalCommittedLSN of all responses is greater than or equal to the lsn of the selected response. + + // 2. if the request is a point-read request, + // AND there are more than one response in the readResponses + // AND the LSN of the returned resource of the selected response is less than or equal to the minimum lsn of the all the responses, + // AND if applicable, the LSN of the returned resource of the selected response is less than or equal to the minimum globalCommittedLsn of all the responses. + // This means that the returned resource is old enough to have been committed by at least all the received responses, + // which should be larger than or equal to the read quorum, which therefore means we have strong consistency. + boolean isQuorumMet = false; + + if ((readLsn.v > 0 && replicaCountMaxLsn >= readQuorum) && + (!checkForGlobalStrong || maxGlobalCommittedLSN >= maxLsn)) { + isQuorumMet = true; + } + + if (!isQuorumMet && validResponsesCount >= readQuorum && selectedResponse.v.itemLSN != -1 && + (minLsn != Long.MAX_VALUE && selectedResponse.v.itemLSN <= minLsn) && + (!checkForGlobalStrong || (selectedResponse.v.itemLSN <= maxGlobalCommittedLSN))) { + isQuorumMet = true; + } + + return isQuorumMet; + } + + private enum ReadQuorumResultKind { + QuorumMet, + QuorumSelected, + QuorumNotSelected + } + + private abstract class ReadResult { + private final StoreResult response; + private final RequestChargeTracker requestChargeTracker; + + protected ReadResult(RequestChargeTracker requestChargeTracker, StoreResult response) { + this.requestChargeTracker = requestChargeTracker; + this.response = response; + } + + public StoreResponse getResponse() throws CosmosClientException { + if (!this.isValidResult()) { + logger.error("getResponse called for invalid result"); + throw new InternalServerErrorException(RMResources.InternalServerError); + } + + return this.response.toResponse(requestChargeTracker); + } + + protected abstract boolean isValidResult(); + } + + private class ReadQuorumResult extends ReadResult { + public ReadQuorumResult( + RequestChargeTracker requestChargeTracker, + ReadQuorumResultKind QuorumResult, + long selectedLsn, + long globalCommittedSelectedLsn, + StoreResult selectedResponse, + List storeResponses) { + super(requestChargeTracker, selectedResponse); + + this.quorumResult = QuorumResult; + this.selectedLsn = selectedLsn; + this.globalCommittedSelectedLsn = globalCommittedSelectedLsn; + this.selectedResponse = selectedResponse; + this.storeResponses = storeResponses; + } + + public final ReadQuorumResultKind quorumResult; + + /** + * Response selected to lock on the LSN. This is the response with the highest LSN + */ + public final StoreResult selectedResponse; + + /** + * ALL store responses from Quorum READ. + */ + public final List storeResponses; + + public final long selectedLsn; + + public final long globalCommittedSelectedLsn; + + protected boolean isValidResult() { + return this.quorumResult == ReadQuorumResultKind.QuorumMet || this.quorumResult == ReadQuorumResultKind.QuorumSelected; + } + } + + private class ReadPrimaryResult extends ReadResult { + public final boolean shouldRetryOnSecondary; + public final boolean isSuccessful; + + public ReadPrimaryResult(RequestChargeTracker requestChargeTracker, boolean isSuccessful, boolean shouldRetryOnSecondary, StoreResult response) { + super(requestChargeTracker, response); + this.isSuccessful = isSuccessful; + this.shouldRetryOnSecondary = shouldRetryOnSecondary; + } + + protected boolean isValidResult() { + return isSuccessful; + } + } + + private enum PrimaryReadOutcome { + QuorumNotMet, // Primary LSN is not committed. + QuorumInconclusive, // Secondary replicas are available. Must read R secondary's to deduce current quorum. + QuorumMet, + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ReadMode.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ReadMode.java new file mode 100644 index 0000000000000..2368f4b5d5668 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ReadMode.java @@ -0,0 +1,32 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +public enum ReadMode { + Primary, // Test hook + Strong, + BoundedStaleness, + Any +} + diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ReplicatedResourceClient.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ReplicatedResourceClient.java new file mode 100644 index 0000000000000..ff9df5aa91b55 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ReplicatedResourceClient.java @@ -0,0 +1,195 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.internal.ISessionContainer; +import com.azure.data.cosmos.internal.BackoffRetryUtility; +import com.azure.data.cosmos.internal.Configs; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.IAuthorizationTokenProvider; +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.internal.Quadruple; +import com.azure.data.cosmos.internal.ReplicatedResourceClientUtils; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.time.Duration; +import java.util.function.BiFunction; +import java.util.function.Function; + +/** + * ReplicatedResourceClient uses the ConsistencyReader to make requests to + * backend + */ +public class ReplicatedResourceClient { + private final Logger logger = LoggerFactory.getLogger(ReplicatedResourceClient.class); + private static final int GONE_AND_RETRY_WITH_TIMEOUT_IN_SECONDS = 30; + private static final int STRONG_GONE_AND_RETRY_WITH_RETRY_TIMEOUT_SECONDS = 60; + private static final int MIN_BACKOFF_FOR_FAILLING_BACK_TO_OTHER_REGIONS_FOR_READ_REQUESTS_IN_SECONDS = 1; + + private final AddressSelector addressSelector; + private final ConsistencyReader consistencyReader; + private final ConsistencyWriter consistencyWriter; + private final Protocol protocol; + private final TransportClient transportClient; + private final boolean enableReadRequestsFallback; + private final GatewayServiceConfigurationReader serviceConfigReader; + private final Configs configs; + + public ReplicatedResourceClient( + Configs configs, + AddressSelector addressSelector, + ISessionContainer sessionContainer, + TransportClient transportClient, + GatewayServiceConfigurationReader serviceConfigReader, + IAuthorizationTokenProvider authorizationTokenProvider, + boolean enableReadRequestsFallback, + boolean useMultipleWriteLocations) { + this.configs = configs; + this.protocol = configs.getProtocol(); + this.addressSelector = addressSelector; + if (protocol != Protocol.HTTPS && protocol != Protocol.TCP) { + throw new IllegalArgumentException("protocol"); + } + + this.transportClient = transportClient; + this.serviceConfigReader = serviceConfigReader; + + this.consistencyReader = new ConsistencyReader(configs, + this.addressSelector, + sessionContainer, + transportClient, + serviceConfigReader, + authorizationTokenProvider); + this.consistencyWriter = new ConsistencyWriter( + this.addressSelector, + sessionContainer, + transportClient, + authorizationTokenProvider, + serviceConfigReader, + useMultipleWriteLocations); + this.enableReadRequestsFallback = enableReadRequestsFallback; + } + + public static boolean isReadingFromMaster(ResourceType resourceType, OperationType operationType) { + return ReplicatedResourceClientUtils.isReadingFromMaster(resourceType, operationType); + } + + public static boolean isMasterResource(ResourceType resourceType) { + return ReplicatedResourceClientUtils.isMasterResource(resourceType); + } + + public static boolean isGlobalStrongEnabled() { + return true; + } + + public Mono invokeAsync(RxDocumentServiceRequest request, + Function> prepareRequestAsyncDelegate) { + BiFunction, RxDocumentServiceRequest, Mono> mainFuncDelegate = ( + Quadruple forceRefreshAndTimeout, + RxDocumentServiceRequest documentServiceRequest) -> { + documentServiceRequest.getHeaders().put(HttpConstants.HttpHeaders.CLIENT_RETRY_ATTEMPT_COUNT, + forceRefreshAndTimeout.getValue3().toString()); + documentServiceRequest.getHeaders().put(HttpConstants.HttpHeaders.REMAINING_TIME_IN_MS_ON_CLIENT_REQUEST, + Long.toString(forceRefreshAndTimeout.getValue2().toMillis())); + return invokeAsync(request, new TimeoutHelper(forceRefreshAndTimeout.getValue2()), + forceRefreshAndTimeout.getValue1(), forceRefreshAndTimeout.getValue0()); + + }; + Function, Mono> funcDelegate = ( + Quadruple forceRefreshAndTimeout) -> { + if (prepareRequestAsyncDelegate != null) { + return prepareRequestAsyncDelegate.apply(request).flatMap(responseReq -> mainFuncDelegate.apply(forceRefreshAndTimeout, responseReq)); + } else { + return mainFuncDelegate.apply(forceRefreshAndTimeout, request); + } + + }; + + Function, Mono> inBackoffFuncDelegate = null; + + // we will enable fallback to other regions if the following conditions are met: + // 1. request is a read operation AND + // 2. enableReadRequestsFallback is set to true. (can only ever be true if + // direct mode, on client) + if (request.isReadOnlyRequest() && this.enableReadRequestsFallback) { + if (request.requestContext.cosmosResponseDiagnostics == null) { + request.requestContext.cosmosResponseDiagnostics = BridgeInternal.createCosmosResponseDiagnostics(); + } + RxDocumentServiceRequest freshRequest = request.clone(); + inBackoffFuncDelegate = (Quadruple forceRefreshAndTimeout) -> { + RxDocumentServiceRequest readRequestClone = freshRequest.clone(); + + if (prepareRequestAsyncDelegate != null) { + return prepareRequestAsyncDelegate.apply(readRequestClone).flatMap(responseReq -> { + logger.trace("Executing inBackoffAlternateCallbackMethod on readRegionIndex {}", forceRefreshAndTimeout.getValue3()); + responseReq.requestContext.RouteToLocation(forceRefreshAndTimeout.getValue3(), true); + return invokeAsync(responseReq, new TimeoutHelper(forceRefreshAndTimeout.getValue2()), + forceRefreshAndTimeout.getValue1(), + forceRefreshAndTimeout.getValue0()); + }); + } else { + logger.trace("Executing inBackoffAlternateCallbackMethod on readRegionIndex {}", forceRefreshAndTimeout.getValue3()); + readRequestClone.requestContext.RouteToLocation(forceRefreshAndTimeout.getValue3(), true); + return invokeAsync(readRequestClone, new TimeoutHelper(forceRefreshAndTimeout.getValue2()), + forceRefreshAndTimeout.getValue1(), + forceRefreshAndTimeout.getValue0()); + } + + }; + } + + int retryTimeout = this.serviceConfigReader.getDefaultConsistencyLevel() == ConsistencyLevel.STRONG ? + ReplicatedResourceClient.STRONG_GONE_AND_RETRY_WITH_RETRY_TIMEOUT_SECONDS : + ReplicatedResourceClient.GONE_AND_RETRY_WITH_TIMEOUT_IN_SECONDS; + + return BackoffRetryUtility.executeAsync(funcDelegate, new GoneAndRetryWithRetryPolicy(request, retryTimeout), + inBackoffFuncDelegate, Duration.ofSeconds( + ReplicatedResourceClient.MIN_BACKOFF_FOR_FAILLING_BACK_TO_OTHER_REGIONS_FOR_READ_REQUESTS_IN_SECONDS)); + } + + private Mono invokeAsync(RxDocumentServiceRequest request, TimeoutHelper timeout, + boolean isInRetry, boolean forceRefresh) { + + if (request.getOperationType().equals(OperationType.ExecuteJavaScript)) { + if (request.isReadOnlyScript()) { + return this.consistencyReader.readAsync(request, timeout, isInRetry, forceRefresh); + } else { + return this.consistencyWriter.writeAsync(request, timeout, forceRefresh); + } + } else if (request.getOperationType().isWriteOperation()) { + return this.consistencyWriter.writeAsync(request, timeout, forceRefresh); + } else if (request.isReadOnlyRequest()) { + return this.consistencyReader.readAsync(request, timeout, isInRetry, forceRefresh); + } else { + throw new IllegalArgumentException( + String.format("Unexpected operation type %s", request.getOperationType())); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/RequestHelper.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/RequestHelper.java new file mode 100644 index 0000000000000..ad18b133b6d0b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/RequestHelper.java @@ -0,0 +1,57 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.BadRequestException; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.Strings; +import org.apache.commons.lang3.EnumUtils; + +public class RequestHelper { + public static ConsistencyLevel GetConsistencyLevelToUse(GatewayServiceConfigurationReader serviceConfigReader, + RxDocumentServiceRequest request) throws CosmosClientException { + ConsistencyLevel consistencyLevelToUse = serviceConfigReader.getDefaultConsistencyLevel(); + + String requestConsistencyLevelHeaderValue = request.getHeaders().get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); + + if (!Strings.isNullOrEmpty(requestConsistencyLevelHeaderValue)) { + ConsistencyLevel requestConsistencyLevel = EnumUtils.getEnum(ConsistencyLevel.class, Strings.fromCamelCaseToUpperCase(requestConsistencyLevelHeaderValue)); + if (requestConsistencyLevel == null) { + throw new BadRequestException( + String.format( + RMResources.InvalidHeaderValue, + requestConsistencyLevelHeaderValue, + HttpConstants.HttpHeaders.CONSISTENCY_LEVEL)); + } + + consistencyLevelToUse = requestConsistencyLevel; + } + + return consistencyLevelToUse; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ResourceOperation.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ResourceOperation.java new file mode 100644 index 0000000000000..230fa1d6ae0b7 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ResourceOperation.java @@ -0,0 +1,39 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.internal.ResourceType; + +public class ResourceOperation { + public final OperationType operationType; + public final ResourceType resourceType; + + public ResourceOperation( + OperationType operationType, + ResourceType resourceType) { + this.operationType = operationType; + this.resourceType = resourceType; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ResponseUtils.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ResponseUtils.java new file mode 100644 index 0000000000000..6cecf893b76bc --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ResponseUtils.java @@ -0,0 +1,80 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.http.HttpHeaders; +import com.azure.data.cosmos.internal.http.HttpRequest; +import com.azure.data.cosmos.internal.http.HttpResponse; +import io.netty.buffer.ByteBuf; +import io.netty.handler.codec.http.HttpMethod; +import org.apache.commons.lang3.StringUtils; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +class ResponseUtils { + private final static int INITIAL_RESPONSE_BUFFER_SIZE = 1024; + + public static Mono toString(Flux contentObservable) { + return contentObservable + .reduce( + new ByteArrayOutputStream(INITIAL_RESPONSE_BUFFER_SIZE), + (out, bb) -> { + try { + bb.readBytes(out, bb.readableBytes()); + return out; + } catch (IOException e) { + throw new RuntimeException(e); + } + }) + .map(out -> new String(out.toByteArray(), StandardCharsets.UTF_8)); + } + + static Mono toStoreResponse(HttpResponse httpClientResponse, HttpRequest httpRequest) { + + HttpHeaders httpResponseHeaders = httpClientResponse.headers(); + + Mono contentObservable; + + if (httpRequest.httpMethod() == HttpMethod.DELETE) { + // for delete we don't expect any body + contentObservable = Mono.just(StringUtils.EMPTY); + } else { + contentObservable = toString(httpClientResponse.body()); + } + + return contentObservable.flatMap(content -> { + try { + // transforms to Mono + StoreResponse rsp = new StoreResponse(httpClientResponse.statusCode(), HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); + return Mono.just(rsp); + } catch (Exception e) { + return Mono.error(e); + } + }); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/RntbdTransportClient.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/RntbdTransportClient.java new file mode 100644 index 0000000000000..28cb645ee9f5b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/RntbdTransportClient.java @@ -0,0 +1,381 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.Configs; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.UserAgentContainer; +import com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdEndpoint; +import com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdMetrics; +import com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdObjectMapper; +import com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdRequestArgs; +import com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdRequestRecord; +import com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdServiceEndpoint; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import com.fasterxml.jackson.databind.ser.std.StdSerializer; +import io.netty.handler.ssl.SslContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; +import reactor.core.publisher.SignalType; + +import java.io.IOException; +import java.net.URI; +import java.time.Duration; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +@JsonSerialize(using = RntbdTransportClient.JsonSerializer.class) +public final class RntbdTransportClient extends TransportClient { + + // region Fields + + private static final AtomicLong instanceCount = new AtomicLong(); + private static final Logger logger = LoggerFactory.getLogger(RntbdTransportClient.class); + private static final String namePrefix = RntbdTransportClient.class.getSimpleName() + '-'; + + private final AtomicBoolean closed = new AtomicBoolean(); + private final RntbdEndpoint.Provider endpointProvider; + private final RntbdMetrics metrics; + private final String name; + + // endregion + + // region Constructors + + RntbdTransportClient(final RntbdEndpoint.Provider endpointProvider) { + this.name = RntbdTransportClient.namePrefix + RntbdTransportClient.instanceCount.incrementAndGet(); + this.endpointProvider = endpointProvider; + this.metrics = new RntbdMetrics(this.name); + } + + RntbdTransportClient(final Options options, final SslContext sslContext) { + this(new RntbdServiceEndpoint.Provider(options, sslContext)); + } + + RntbdTransportClient(final Configs configs, final int requestTimeoutInSeconds, final UserAgentContainer userAgent) { + this(new Options.Builder(requestTimeoutInSeconds).userAgent(userAgent).build(), configs.getSslContext()); + } + + // endregion + + // region Methods + + @Override + public void close() { + + logger.debug("\n [{}] CLOSE", this); + + if (this.closed.compareAndSet(false, true)) { + this.endpointProvider.close(); + this.metrics.close(); + return; + } + + logger.debug("\n [{}]\n already closed", this); + } + + @Override + public Mono invokeStoreAsync(final URI physicalAddress, final RxDocumentServiceRequest request) { + + checkNotNull(physicalAddress, "physicalAddress"); + checkNotNull(request, "request"); + this.throwIfClosed(); + + final RntbdRequestArgs requestArgs = new RntbdRequestArgs(request, physicalAddress); + + if (logger.isDebugEnabled()) { + requestArgs.traceOperation(logger, null, "invokeStoreAsync"); + logger.debug("\n [{}]\n {}\n INVOKE_STORE_ASYNC", this, requestArgs); + } + + final RntbdEndpoint endpoint = this.endpointProvider.get(physicalAddress); + this.metrics.incrementRequestCount(); + + final RntbdRequestRecord requestRecord = endpoint.request(requestArgs); + + requestRecord.whenComplete((response, error) -> { + this.metrics.incrementResponseCount(); + if (error != null) { + this.metrics.incrementErrorResponseCount(); + } + }); + + return Mono.fromFuture(requestRecord).doFinally(signal -> { + if (signal == SignalType.CANCEL) { + requestRecord.cancel(false); + } + }); + } + + @Override + public String toString() { + return RntbdObjectMapper.toJson(this); + } + + private void throwIfClosed() { + checkState(!this.closed.get(), "%s is closed", this); + } + + // endregion + + // region Types + + static final class JsonSerializer extends StdSerializer { + + public JsonSerializer() { + this(null); + } + + public JsonSerializer(Class type) { + super(type); + } + + @Override + public void serialize(RntbdTransportClient value, JsonGenerator generator, SerializerProvider provider) throws IOException { + + generator.writeStartObject(); + + generator.writeArrayFieldStart(value.name); + + value.endpointProvider.list().forEach(endpoint -> { + try { + generator.writeObject(endpoint); + } catch (IOException error) { + logger.error("failed to serialize {} due to ", endpoint.getName(), error); + } + }); + + generator.writeEndArray(); + + generator.writeObjectField("config", value.endpointProvider.config()); + generator.writeObjectField("metrics", value.metrics); + generator.writeEndObject(); + } + } + + public static final class Options { + + // region Fields + + private final String certificateHostNameOverride; + private final int maxChannelsPerEndpoint; + private final int maxRequestsPerChannel; + private final Duration connectionTimeout; + private final int partitionCount; + private final Duration receiveHangDetectionTime; + private final Duration requestTimeout; + private final Duration sendHangDetectionTime; + private final UserAgentContainer userAgent; + + // endregion + + // region Constructors + + private Options(Builder builder) { + + this.certificateHostNameOverride = builder.certificateHostNameOverride; + this.maxChannelsPerEndpoint = builder.maxChannelsPerEndpoint; + this.maxRequestsPerChannel = builder.maxRequestsPerChannel; + this.connectionTimeout = builder.connectionTimeout == null ? builder.requestTimeout : builder.connectionTimeout; + this.partitionCount = builder.partitionCount; + this.requestTimeout = builder.requestTimeout; + this.receiveHangDetectionTime = builder.receiveHangDetectionTime; + this.sendHangDetectionTime = builder.sendHangDetectionTime; + this.userAgent = builder.userAgent; + } + + // endregion + + // region Accessors + + public String getCertificateHostNameOverride() { + return this.certificateHostNameOverride; + } + + public int getMaxChannelsPerEndpoint() { + return this.maxChannelsPerEndpoint; + } + + public int getMaxRequestsPerChannel() { + return this.maxRequestsPerChannel; + } + + public Duration getConnectionTimeout() { + return this.connectionTimeout; + } + + public int getPartitionCount() { + return this.partitionCount; + } + + public Duration getReceiveHangDetectionTime() { + return this.receiveHangDetectionTime; + } + + public Duration getRequestTimeout() { + return this.requestTimeout; + } + + public Duration getSendHangDetectionTime() { + return this.sendHangDetectionTime; + } + + public UserAgentContainer getUserAgent() { + return this.userAgent; + } + + // endregion + + // region Methods + + @Override + public String toString() { + return RntbdObjectMapper.toJson(this); + } + + // endregion + + // region Types + + public static class Builder { + + // region Fields + + private static final UserAgentContainer DEFAULT_USER_AGENT_CONTAINER = new UserAgentContainer(); + private static final Duration SIXTY_FIVE_SECONDS = Duration.ofSeconds(65L); + private static final Duration TEN_SECONDS = Duration.ofSeconds(10L); + + // Required parameters + + private String certificateHostNameOverride = null; + + // Optional parameters + + private int maxChannelsPerEndpoint = 10; + private int maxRequestsPerChannel = 30; + private Duration connectionTimeout = null; + private int partitionCount = 1; + private Duration receiveHangDetectionTime = SIXTY_FIVE_SECONDS; + private Duration requestTimeout; + private Duration sendHangDetectionTime = TEN_SECONDS; + private UserAgentContainer userAgent = DEFAULT_USER_AGENT_CONTAINER; + + // endregion + + // region Constructors + + public Builder(Duration requestTimeout) { + this.requestTimeout(requestTimeout); + } + + public Builder(int requestTimeoutInSeconds) { + this(Duration.ofSeconds(requestTimeoutInSeconds)); + } + + // endregion + + // region Methods + + public Options build() { + return new Options(this); + } + + public Builder certificateHostNameOverride(final String value) { + this.certificateHostNameOverride = value; + return this; + } + + public Builder connectionTimeout(final Duration value) { + checkArgument(value == null || value.compareTo(Duration.ZERO) > 0, "value: %s", value); + this.connectionTimeout = value; + return this; + } + + public Builder maxRequestsPerChannel(final int value) { + checkArgument(value > 0, "value: %s", value); + this.maxRequestsPerChannel = value; + return this; + } + + public Builder maxChannelsPerEndpoint(final int value) { + checkArgument(value > 0, "value: %s", value); + this.maxChannelsPerEndpoint = value; + return this; + } + + public Builder partitionCount(final int value) { + checkArgument(value > 0, "value: %s", value); + this.partitionCount = value; + return this; + } + + public Builder receiveHangDetectionTime(final Duration value) { + + checkNotNull(value, "value: null"); + checkArgument(value.compareTo(Duration.ZERO) > 0, "value: %s", value); + + this.receiveHangDetectionTime = value; + return this; + } + + public Builder requestTimeout(final Duration value) { + + checkNotNull(value, "value: null"); + checkArgument(value.compareTo(Duration.ZERO) > 0, "value: %s", value); + + this.requestTimeout = value; + return this; + } + + public Builder sendHangDetectionTime(final Duration value) { + + checkNotNull(value, "value: null"); + checkArgument(value.compareTo(Duration.ZERO) > 0, "value: %s", value); + + this.sendHangDetectionTime = value; + return this; + } + + public Builder userAgent(final UserAgentContainer value) { + checkNotNull(value, "value: null"); + this.userAgent = value; + return this; + } + + // endregion + } + + // endregion + } + + // endregion +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ServerProperties.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ServerProperties.java new file mode 100644 index 0000000000000..bf79099d9e495 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ServerProperties.java @@ -0,0 +1,43 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +final public class ServerProperties { + + final private String agent, version; + + public ServerProperties(String agent, String version) { + this.agent = agent; + this.version = version; + } + + public String getAgent() { + return this.agent; + } + + public String getVersion() { + return this.version; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ServerStoreModel.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ServerStoreModel.java new file mode 100644 index 0000000000000..2c0731145e505 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ServerStoreModel.java @@ -0,0 +1,72 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + + +import com.azure.data.cosmos.BadRequestException; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.RxDocumentServiceResponse; +import com.azure.data.cosmos.internal.RxStoreModel; +import com.azure.data.cosmos.internal.Strings; +import org.apache.commons.lang3.EnumUtils; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +public class ServerStoreModel implements RxStoreModel { + private final StoreClient storeClient; + + public ServerStoreModel(StoreClient storeClient) { + this.storeClient = storeClient; + } + + public Flux processMessage(RxDocumentServiceRequest request) { + String requestConsistencyLevelHeaderValue = request.getHeaders().get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); + + request.requestContext.originalRequestConsistencyLevel = null; + + if (!Strings.isNullOrEmpty(requestConsistencyLevelHeaderValue)) { + ConsistencyLevel requestConsistencyLevel; + + if ((requestConsistencyLevel = EnumUtils.getEnum(ConsistencyLevel.class, Strings.fromCamelCaseToUpperCase(requestConsistencyLevelHeaderValue))) == null) { + return Flux.error(new BadRequestException( + String.format( + RMResources.InvalidHeaderValue, + requestConsistencyLevelHeaderValue, + HttpConstants.HttpHeaders.CONSISTENCY_LEVEL))); + } + + request.requestContext.originalRequestConsistencyLevel = requestConsistencyLevel; + } + + if (ReplicatedResourceClient.isMasterResource(request.getResourceType())) { + request.getHeaders().put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, ConsistencyLevel.STRONG.toString()); + } + + Mono response = this.storeClient.processMessageAsync(request); + return response.flux(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ServiceConfig.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ServiceConfig.java new file mode 100644 index 0000000000000..c3c4c5c31bd7d --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/ServiceConfig.java @@ -0,0 +1,38 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +class ServiceConfig { + final static ServiceConfig instance = new ServiceConfig(); + public SystemReplicationPolicy systemReplicationPolicy = new SystemReplicationPolicy(); + public SystemReplicationPolicy userReplicationPolicy = new SystemReplicationPolicy(); + + public static ServiceConfig getInstance() { + return instance; + } + + class SystemReplicationPolicy { + public static final int MaxReplicaSetSize = 4; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/StoreClient.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/StoreClient.java new file mode 100644 index 0000000000000..f12f4d6f17e8e --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/StoreClient.java @@ -0,0 +1,238 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.InternalServerErrorException; +import com.azure.data.cosmos.internal.BackoffRetryUtility; +import com.azure.data.cosmos.internal.Configs; +import com.azure.data.cosmos.internal.Exceptions; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.IAuthorizationTokenProvider; +import com.azure.data.cosmos.internal.IRetryPolicy; +import com.azure.data.cosmos.internal.ISessionToken; +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.RxDocumentServiceResponse; +import com.azure.data.cosmos.internal.SessionContainer; +import com.azure.data.cosmos.internal.SessionTokenHelper; +import com.azure.data.cosmos.internal.Strings; +import com.azure.data.cosmos.internal.Utils; +import org.apache.commons.lang3.math.NumberUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.function.Function; + +/** + * Instantiated to issue direct connectivity requests to the backend on: + * - GATEWAY (for gateway mode clients) + * - Client (for direct mode clients) + * StoreClient uses the ReplicatedResourceClient to make requests to the backend. + */ +public class StoreClient implements IStoreClient { + private final Logger logger = LoggerFactory.getLogger(StoreClient.class); + private final GatewayServiceConfigurationReader serviceConfigurationReader; + + private final SessionContainer sessionContainer; + private final ReplicatedResourceClient replicatedResourceClient; + private final TransportClient transportClient; + private final String ZERO_PARTITION_KEY_RANGE = "0"; + + public StoreClient( + Configs configs, + IAddressResolver addressResolver, + SessionContainer sessionContainer, + GatewayServiceConfigurationReader serviceConfigurationReader, IAuthorizationTokenProvider userTokenProvider, + TransportClient transportClient, + boolean useMultipleWriteLocations) { + this.transportClient = transportClient; + this.sessionContainer = sessionContainer; + this.serviceConfigurationReader = serviceConfigurationReader; + this.replicatedResourceClient = new ReplicatedResourceClient( + configs, + new AddressSelector(addressResolver, configs.getProtocol()), + sessionContainer, + this.transportClient, + serviceConfigurationReader, + userTokenProvider, + false, + useMultipleWriteLocations); + } + + @Override + public Mono processMessageAsync(RxDocumentServiceRequest request, IRetryPolicy retryPolicy, Function> prepareRequestAsyncDelegate) { + if (request == null) { + throw new NullPointerException("request"); + } + + Callable> storeResponseDelegate = () -> this.replicatedResourceClient.invokeAsync(request, prepareRequestAsyncDelegate); + + Mono storeResponse; + try { + storeResponse = retryPolicy != null + ? BackoffRetryUtility.executeRetry(storeResponseDelegate, retryPolicy) + : storeResponseDelegate.call(); + } catch (Exception e) { + return Mono.error(e); + } + + storeResponse = storeResponse.doOnError(e -> { + try { + CosmosClientException exception = Utils.as(e, CosmosClientException.class); + + if (exception == null) { + return; + } + + exception = BridgeInternal.setCosmosResponseDiagnostics(exception, request.requestContext.cosmosResponseDiagnostics); + + handleUnsuccessfulStoreResponse(request, exception); + } catch (Throwable throwable) { + logger.error("Unexpected failure in handling orig [{}]", e.getMessage(), e); + logger.error("Unexpected failure in handling orig [{}] : new [{}]", e.getMessage(), throwable.getMessage(), throwable); + } + } + ); + + return storeResponse.flatMap(sr -> { + try { + return Mono.just(this.completeResponse(sr, request)); + } catch (Exception e) { + return Mono.error(e); + } + }); + } + + private void handleUnsuccessfulStoreResponse(RxDocumentServiceRequest request, CosmosClientException exception) { + this.updateResponseHeader(request, exception.responseHeaders()); + if ((!ReplicatedResourceClient.isMasterResource(request.getResourceType())) && + (Exceptions.isStatusCode(exception, HttpConstants.StatusCodes.PRECONDITION_FAILED) || Exceptions.isStatusCode(exception, HttpConstants.StatusCodes.CONFLICT) || + (Exceptions.isStatusCode(exception, HttpConstants.StatusCodes.NOTFOUND) && + !Exceptions.isSubStatusCode(exception, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { + this.captureSessionToken(request, exception.responseHeaders()); + } + } + + private RxDocumentServiceResponse completeResponse( + StoreResponse storeResponse, + RxDocumentServiceRequest request) throws InternalServerErrorException { + if (storeResponse.getResponseHeaderNames().length != storeResponse.getResponseHeaderValues().length) { + throw new InternalServerErrorException(RMResources.InvalidBackendResponse); + } + + Map headers = new HashMap<>(storeResponse.getResponseHeaderNames().length); + for (int idx = 0; idx < storeResponse.getResponseHeaderNames().length; idx++) { + String name = storeResponse.getResponseHeaderNames()[idx]; + String value = storeResponse.getResponseHeaderValues()[idx]; + + headers.put(name, value); + } + + this.updateResponseHeader(request, headers); + this.captureSessionToken(request, headers); + storeResponse.setCosmosResponseDiagnostics(request.requestContext.cosmosResponseDiagnostics); + return new RxDocumentServiceResponse(storeResponse); + } + + private long getLSN(Map headers) { + long defaultValue = -1; + String value = headers.get(WFConstants.BackendHeaders.LSN); + + if (!Strings.isNullOrEmpty(value)) { + return NumberUtils.toLong(value, defaultValue); + + } + + return defaultValue; + } + + private void updateResponseHeader(RxDocumentServiceRequest request, Map headers) { + String requestConsistencyLevel = request.getHeaders().get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); + + boolean sessionConsistency = + this.serviceConfigurationReader.getDefaultConsistencyLevel() == ConsistencyLevel.SESSION || + (!Strings.isNullOrEmpty(requestConsistencyLevel) + && Strings.areEqualIgnoreCase(requestConsistencyLevel, ConsistencyLevel.SESSION.toString())); + + long storeLSN = this.getLSN(headers); + if (storeLSN == -1) { + return; + } + + String partitionKeyRangeId = headers.get(WFConstants.BackendHeaders.PARTITION_KEY_RANGE_ID); + + if (Strings.isNullOrEmpty(partitionKeyRangeId)) { + String inputSession = request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); + if (!Strings.isNullOrEmpty(inputSession) + && inputSession.indexOf(ISessionToken.PARTITION_KEY_RANGE_SESSION_SEPARATOR) >= 1) { + partitionKeyRangeId = inputSession.substring(0, + inputSession.indexOf(ISessionToken.PARTITION_KEY_RANGE_SESSION_SEPARATOR)); + } else { + partitionKeyRangeId = ZERO_PARTITION_KEY_RANGE; + } + } + + ISessionToken sessionToken = null; + String sessionTokenResponseHeader = headers.get(HttpConstants.HttpHeaders.SESSION_TOKEN); + if (!Strings.isNullOrEmpty(sessionTokenResponseHeader)) { + sessionToken = SessionTokenHelper.parse(sessionTokenResponseHeader); + } + + if (sessionToken != null) { + headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, String.format( + "%s:%s", + partitionKeyRangeId, + sessionToken.convertToString())); + } + + headers.remove(WFConstants.BackendHeaders.PARTITION_KEY_RANGE_ID); + } + + private void captureSessionToken(RxDocumentServiceRequest request, Map headers) { + if (request.getResourceType() == ResourceType.DocumentCollection + && request.getOperationType() == OperationType.Delete) { + String resourceId; + if (request.getIsNameBased()) { + resourceId = headers.get(HttpConstants.HttpHeaders.OWNER_ID); + } else { + resourceId = request.getResourceId(); + } + this.sessionContainer.clearTokenByResourceId(resourceId); + } else { + this.sessionContainer.setSessionToken(request, headers); + } + } + + // TODO RNTBD support + // https://msdata.visualstudio.com/CosmosDB/SDK/_workitems/edit/262496 +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/StoreClientFactory.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/StoreClientFactory.java new file mode 100644 index 0000000000000..6888f4908506d --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/StoreClientFactory.java @@ -0,0 +1,93 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.Configs; +import com.azure.data.cosmos.internal.IAuthorizationTokenProvider; +import com.azure.data.cosmos.internal.SessionContainer; +import com.azure.data.cosmos.internal.UserAgentContainer; + +// TODO: DANOBLE: no support for ICommunicationEventSource ask Ji +// Links: +// https://msdata.visualstudio.com/CosmosDB/SDK/_workitems/edit/262496 + +public class StoreClientFactory implements AutoCloseable { + private final Configs configs; + private final int maxConcurrentConnectionOpenRequests; + private final int requestTimeoutInSeconds; + private final Protocol protocol; + private final TransportClient transportClient; + private volatile boolean isClosed; + + public StoreClientFactory( + Configs configs, + int requestTimeoutInSeconds, + int maxConcurrentConnectionOpenRequests, + UserAgentContainer userAgent) { + + this.configs = configs; + this.protocol = configs.getProtocol(); + this.requestTimeoutInSeconds = requestTimeoutInSeconds; + this.maxConcurrentConnectionOpenRequests = maxConcurrentConnectionOpenRequests; + + if (protocol == Protocol.HTTPS) { + this.transportClient = new HttpTransportClient(configs, requestTimeoutInSeconds, userAgent); + } else if (protocol == Protocol.TCP){ + this.transportClient = new RntbdTransportClient(configs, requestTimeoutInSeconds, userAgent); + } else { + throw new IllegalArgumentException(String.format("protocol: %s", this.protocol)); + } + } + + public void close() throws Exception { + this.transportClient.close(); + this.isClosed = true; + } + + // TODO wew don't have support for the following yet + // TODO enableReadRequestsFallback ask Ji + // TODO useFallbackClient ask Ji + public StoreClient createStoreClient( + IAddressResolver addressResolver, + SessionContainer sessionContainer, + GatewayServiceConfigurationReader serviceConfigurationReader, + IAuthorizationTokenProvider authorizationTokenProvider, + boolean useMultipleWriteLocations) { + this.throwIfClosed(); + + return new StoreClient(configs, + addressResolver, + sessionContainer, + serviceConfigurationReader, + authorizationTokenProvider, + this.transportClient, + useMultipleWriteLocations); + } + + private void throwIfClosed() { + if (isClosed) { + throw new IllegalStateException("storeClient already closed!"); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/StoreReader.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/StoreReader.java new file mode 100644 index 0000000000000..ec2415fc96ea2 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/StoreReader.java @@ -0,0 +1,893 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.BadRequestException; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.GoneException; +import com.azure.data.cosmos.internal.ISessionContainer; +import com.azure.data.cosmos.InternalServerErrorException; +import com.azure.data.cosmos.PartitionIsMigratingException; +import com.azure.data.cosmos.PartitionKeyRangeGoneException; +import com.azure.data.cosmos.PartitionKeyRangeIsSplittingException; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.ISessionToken; +import com.azure.data.cosmos.internal.Integers; +import com.azure.data.cosmos.internal.MutableVolatile; +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.SessionTokenHelper; +import com.azure.data.cosmos.internal.Strings; +import com.azure.data.cosmos.internal.Utils; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.Pair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.Exceptions; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Schedulers; + +import java.net.URI; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + +import static com.azure.data.cosmos.internal.Exceptions.isSubStatusCode; + +public class StoreReader { + private final Logger logger = LoggerFactory.getLogger(StoreReader.class); + private final TransportClient transportClient; + private final AddressSelector addressSelector; + private final ISessionContainer sessionContainer; + private String lastReadAddress; + + public StoreReader( + TransportClient transportClient, + AddressSelector addressSelector, + ISessionContainer sessionContainer) { + this.transportClient = transportClient; + this.addressSelector = addressSelector; + this.sessionContainer = sessionContainer; + } + + public Mono> readMultipleReplicaAsync( + RxDocumentServiceRequest entity, + boolean includePrimary, + int replicaCountToRead, + boolean requiresValidLsn, + boolean useSessionToken, + ReadMode readMode) { + return readMultipleReplicaAsync(entity, includePrimary, replicaCountToRead, requiresValidLsn, useSessionToken, readMode, false, false); + } + + /** + * Makes requests to multiple replicas at once and returns responses + * @param entity RxDocumentServiceRequest + * @param includePrimary flag to indicate whether to indicate primary replica in the reads + * @param replicaCountToRead number of replicas to read from + * @param requiresValidLsn flag to indicate whether a valid lsn is required to consider a response as valid + * @param useSessionToken flag to indicate whether to use session token + * @param readMode READ mode + * @param checkMinLSN set minimum required session lsn + * @param forceReadAll reads from all available replicas to gather result from readsToRead number of replicas + * @return ReadReplicaResult which indicates the LSN and whether Quorum was Met / Not Met etc + */ + public Mono> readMultipleReplicaAsync( + RxDocumentServiceRequest entity, + boolean includePrimary, + int replicaCountToRead, + boolean requiresValidLsn, + boolean useSessionToken, + ReadMode readMode, + boolean checkMinLSN, + boolean forceReadAll) { + + if (entity.requestContext.timeoutHelper.isElapsed()) { + return Mono.error(new GoneException()); + } + + String originalSessionToken = entity.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); + + if (entity.requestContext.cosmosResponseDiagnostics == null) { + entity.requestContext.cosmosResponseDiagnostics = BridgeInternal.createCosmosResponseDiagnostics(); + } + + Mono readQuorumResultObs = this.readMultipleReplicasInternalAsync( + entity, includePrimary, replicaCountToRead, requiresValidLsn, useSessionToken, readMode, checkMinLSN, forceReadAll); + + return readQuorumResultObs.flatMap(readQuorumResult -> { + if (entity.requestContext.performLocalRefreshOnGoneException && + readQuorumResult.retryWithForceRefresh && + !entity.requestContext.forceRefreshAddressCache) { + if (entity.requestContext.timeoutHelper.isElapsed()) { + return Mono.error(new GoneException()); + } + + entity.requestContext.forceRefreshAddressCache = true; + + return this.readMultipleReplicasInternalAsync( + entity, includePrimary, replicaCountToRead, requiresValidLsn, useSessionToken, readMode, false /*checkMinLSN*/, forceReadAll) + .map(r -> r.responses); + } else { + return Mono.just(readQuorumResult.responses); + } + }).flux().doAfterTerminate(() -> SessionTokenHelper.setOriginalSessionToken(entity, originalSessionToken)).single(); + } + + private Flux earlyResultIfNotEnoughReplicas(List replicaAddresses, + RxDocumentServiceRequest request, + int replicaCountToRead) { + if (replicaAddresses.size() < replicaCountToRead) { + // if not enough replicas, return ReadReplicaResult + if (!request.requestContext.forceRefreshAddressCache) { + return Flux.just(new ReadReplicaResult(true /*retryWithForceRefresh*/, Collections.emptyList())); + } else { + return Flux.just(new ReadReplicaResult(false /*retryWithForceRefresh*/, Collections.emptyList())); + } + } else { + // if there are enough replicas, move on + return Flux.empty(); + } + } + + private Flux toStoreResult(RxDocumentServiceRequest request, + Pair, URI> storeRespAndURI, + ReadMode readMode, + boolean requiresValidLsn) { + + return storeRespAndURI.getLeft() + .flatMap(storeResponse -> { + try { + StoreResult storeResult = this.createStoreResult( + storeResponse, + null, requiresValidLsn, + readMode != ReadMode.Strong, + storeRespAndURI.getRight()); + + BridgeInternal.getContactedReplicas(request.requestContext.cosmosResponseDiagnostics).add(storeRespAndURI.getRight()); + return Flux.just(storeResult); + } catch (Exception e) { + // RxJava1 doesn't allow throwing checked exception from Observable operators + return Flux.error(e); + } + } + ).onErrorResume(t -> { + + try { + logger.debug("Exception {} is thrown while doing readMany", t); + Exception storeException = Utils.as(t, Exception.class); + if (storeException == null) { + return Flux.error(t); + } + +// Exception storeException = readTask.Exception != null ? readTask.Exception.InnerException : null; + StoreResult storeResult = this.createStoreResult( + null, + storeException, requiresValidLsn, + readMode != ReadMode.Strong, + null); + if (storeException instanceof TransportException) { + BridgeInternal.getFailedReplicas(request.requestContext.cosmosResponseDiagnostics).add(storeRespAndURI.getRight()); + } + return Flux.just(storeResult); + } catch (Exception e) { + // RxJava1 doesn't allow throwing checked exception from Observable operators + return Flux.error(e); + } + }); + } + + private Flux> readFromReplicas(List resultCollector, + List resolveApiResults, + final AtomicInteger replicasToRead, + RxDocumentServiceRequest entity, + boolean includePrimary, + int replicaCountToRead, + boolean requiresValidLsn, + boolean useSessionToken, + ReadMode readMode, + boolean checkMinLSN, + boolean forceReadAll, + final MutableVolatile requestSessionToken, + final MutableVolatile hasGoneException, + boolean enforceSessionCheck, + final MutableVolatile shortCircut) { + if (entity.requestContext.timeoutHelper.isElapsed()) { + return Flux.error(new GoneException()); + } + List, URI>> readStoreTasks = new ArrayList<>(); + int uriIndex = StoreReader.generateNextRandom(resolveApiResults.size()); + + while (resolveApiResults.size() > 0) { + uriIndex = uriIndex % resolveApiResults.size(); + URI uri = resolveApiResults.get(uriIndex); + Pair, URI> res; + try { + res = this.readFromStoreAsync(resolveApiResults.get(uriIndex), + entity); + + } catch (Exception e) { + res = Pair.of(Mono.error(e), uri); + } + + readStoreTasks.add(Pair.of(res.getLeft().flux(), res.getRight())); + resolveApiResults.remove(uriIndex); + + + if (!forceReadAll && readStoreTasks.size() == replicasToRead.get()) { + break; + } + } + + replicasToRead.set(readStoreTasks.size() >= replicasToRead.get() ? 0 : replicasToRead.get() - readStoreTasks.size()); + + + List> storeResult = readStoreTasks + .stream() + .map(item -> toStoreResult(entity, item, readMode, requiresValidLsn)) + .collect(Collectors.toList()); + Flux allStoreResults = Flux.merge(storeResult); + + return allStoreResults.collectList().onErrorResume(e -> { + if (Exceptions.isMultiple(e)) { + logger.info("Captured composite exception"); + List exceptions = Exceptions.unwrapMultiple(e); + assert !exceptions.isEmpty(); + return Mono.error(exceptions.get(0)); + } + + return Mono.error(e); + }).map(newStoreResults -> { + for (StoreResult srr : newStoreResults) { + + entity.requestContext.requestChargeTracker.addCharge(srr.requestCharge); + BridgeInternal.recordResponse(entity.requestContext.cosmosResponseDiagnostics, entity, srr); + if (srr.isValid) { + + try { + + if (requestSessionToken.v == null + || (srr.sessionToken != null && requestSessionToken.v.isValid(srr.sessionToken)) + || (!enforceSessionCheck && !srr.isNotFoundException)) { + resultCollector.add(srr); + } + + } catch (Exception e) { + // TODO: what to do on exception? + } + } + + hasGoneException.v = hasGoneException.v || (srr.isGoneException && !srr.isInvalidPartitionException); + + if (resultCollector.size() >= replicaCountToRead) { + if (hasGoneException.v && !entity.requestContext.performedBackgroundAddressRefresh) { + this.startBackgroundAddressRefresh(entity); + entity.requestContext.performedBackgroundAddressRefresh = true; + } + + shortCircut.v = new ReadReplicaResult(false, resultCollector); + replicasToRead.set(0); + return resultCollector; + } + + // Remaining replicas + replicasToRead.set(replicaCountToRead - resultCollector.size()); + } + return resultCollector; + }).flux(); + } + + private ReadReplicaResult createReadReplicaResult(List responseResult, + int replicaCountToRead, + int resolvedAddressCount, + boolean hasGoneException, + RxDocumentServiceRequest entity) throws CosmosClientException { + if (responseResult.size() < replicaCountToRead) { + logger.debug("Could not get quorum number of responses. " + + "ValidResponsesReceived: {} ResponsesExpected: {}, ResolvedAddressCount: {}, ResponsesString: {}", + responseResult.size(), + replicaCountToRead, + resolvedAddressCount, + String.join(";", responseResult.stream().map(r -> r.toString()).collect(Collectors.toList()))); + + if (hasGoneException) { + if (!entity.requestContext.performLocalRefreshOnGoneException) { + // If we are not supposed to act upon GoneExceptions here, just throw them + throw new GoneException(); + } else if (!entity.requestContext.forceRefreshAddressCache) { + // We could not obtain valid read quorum number of responses even when we went through all the secondary addresses + // Attempt force refresh and start over again. + return new ReadReplicaResult(true, responseResult); + } + } + } + + return new ReadReplicaResult(false, responseResult); + } + + /** + * Makes requests to multiple replicas at once and returns responses + * @param entity DocumentServiceRequest + * @param includePrimary flag to indicate whether to indicate primary replica in the reads + * @param replicaCountToRead number of replicas to read from + * @param requiresValidLsn flag to indicate whether a valid lsn is required to consider a response as valid + * @param useSessionToken flag to indicate whether to use session token + * @param readMode READ mode + * @param checkMinLSN set minimum required session lsn + * @param forceReadAll will read from all available replicas to put together result from readsToRead number of replicas + * @return ReadReplicaResult which indicates the LSN and whether Quorum was Met / Not Met etc + */ + private Mono readMultipleReplicasInternalAsync(RxDocumentServiceRequest entity, + boolean includePrimary, + int replicaCountToRead, + boolean requiresValidLsn, + boolean useSessionToken, + ReadMode readMode, + boolean checkMinLSN, + boolean forceReadAll) { + if (entity.requestContext.timeoutHelper.isElapsed()) { + return Mono.error(new GoneException()); + } + + String requestedCollectionId = null; + + if (entity.forceNameCacheRefresh) { + requestedCollectionId = entity.requestContext.resolvedCollectionRid; + } + + Mono> resolveApiResultsObs = this.addressSelector.resolveAllUriAsync( + entity, + includePrimary, + entity.requestContext.forceRefreshAddressCache); + + if (!StringUtils.isEmpty(requestedCollectionId) && !StringUtils.isEmpty(entity.requestContext.resolvedCollectionRid)) { + if (!requestedCollectionId.equals(entity.requestContext.resolvedCollectionRid)) { + this.sessionContainer.clearTokenByResourceId(requestedCollectionId); + } + } + + return resolveApiResultsObs.flux() + .map(list -> Collections.synchronizedList(new ArrayList<>(list))) + .flatMap( + resolveApiResults -> { + try { + MutableVolatile requestSessionToken = new MutableVolatile<>(); + if (useSessionToken) { + SessionTokenHelper.setPartitionLocalSessionToken(entity, this.sessionContainer); + if (checkMinLSN) { + requestSessionToken.v = entity.requestContext.sessionToken; + } + } else { + entity.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); + } + + Flux y = earlyResultIfNotEnoughReplicas(resolveApiResults, entity, replicaCountToRead); + return y.switchIfEmpty( + Flux.defer(() -> { + + List storeResultList = Collections.synchronizedList(new ArrayList<>()); + AtomicInteger replicasToRead = new AtomicInteger(replicaCountToRead); + + // string clientVersion = entity.Headers[HttpConstants.HttpHeaders.Version]; + // enforceSessionCheck = string.IsNullOrEmpty(clientVersion) ? false : VersionUtility.IsLaterThan(clientVersion, HttpConstants.Versions.v2016_05_30); + // TODO: enforceSessionCheck is true, replace with true + boolean enforceSessionCheck = true; + + MutableVolatile hasGoneException = new MutableVolatile(false); + MutableVolatile shortCircuitResult = new MutableVolatile(); + + return Flux.defer(() -> + readFromReplicas( + storeResultList, + resolveApiResults, + replicasToRead, + entity, + includePrimary, + replicaCountToRead, + requiresValidLsn, + useSessionToken, + readMode, + checkMinLSN, + forceReadAll, + requestSessionToken, + hasGoneException, + enforceSessionCheck, + shortCircuitResult)) + // repeat().takeUntil() simulate a while loop pattern + .repeat() + .takeUntil(x -> { + // Loop until we have the read quorum number of valid responses or if we have read all the replicas + if (replicasToRead.get() > 0 && resolveApiResults.size() > 0) { + // take more from the source observable + return false; + } else { + // enough result + return true; + } + }) + .thenMany( + Flux.defer(() -> { + try { + // TODO: some fields which get updated need to be thread-safe + return Flux.just(createReadReplicaResult(storeResultList, replicaCountToRead, resolveApiResults.size(), hasGoneException.v, entity)); + } catch (Exception e) { + return Flux.error(e); + } + } + )); + })); + } catch (Exception e) { + return Flux.error(e); + } + } + ).single(); + } + + public Mono readPrimaryAsync( + RxDocumentServiceRequest entity, + boolean requiresValidLsn, + boolean useSessionToken) { + if (entity.requestContext.timeoutHelper.isElapsed()) { + return Mono.error(new GoneException()); + } + + String originalSessionToken = entity.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); + if (entity.requestContext.cosmosResponseDiagnostics == null) { + entity.requestContext.cosmosResponseDiagnostics = BridgeInternal.createCosmosResponseDiagnostics(); + } + + return this.readPrimaryInternalAsync( + entity, requiresValidLsn, useSessionToken).flatMap( + readQuorumResult -> { + + if (entity.requestContext.performLocalRefreshOnGoneException && + readQuorumResult.retryWithForceRefresh && + !entity.requestContext.forceRefreshAddressCache) { + if (entity.requestContext.timeoutHelper.isElapsed()) { + return Mono.error(new GoneException()); + } + + entity.requestContext.forceRefreshAddressCache = true; + return this.readPrimaryInternalAsync(entity, requiresValidLsn, useSessionToken); + } else { + return Mono.just(readQuorumResult); + } + } + ).flatMap(readQuorumResult -> { + + // RxJava1 doesn't allow throwing Typed Exception from Observable.map(.) + // this is a design flaw which was fixed in RxJava2. + + // as our core is built on top of RxJava1 here we had to use Observable.flatMap(.) not map(.) + // once we switch to RxJava2 we can move to Observable.map(.) + // https://github.com/ReactiveX/RxJava/wiki/What's-different-in-2.0#functional-interfaces + if (readQuorumResult.responses.size() == 0) { + return Mono.error(new GoneException(RMResources.Gone)); + } + + return Mono.just(readQuorumResult.responses.get(0)); + + }).doOnEach(arg -> { + try { + SessionTokenHelper.setOriginalSessionToken(entity, originalSessionToken); + } catch (Throwable throwable) { + logger.error("Unexpected failure in handling orig [{}]: new [{}]", arg, throwable.getMessage(), throwable); + } + } + ); + } + + private Mono readPrimaryInternalAsync( + RxDocumentServiceRequest entity, + boolean requiresValidLsn, + boolean useSessionToken) { + if (entity.requestContext.timeoutHelper.isElapsed()) { + return Mono.error(new GoneException()); + } + + Mono primaryUriObs = this.addressSelector.resolvePrimaryUriAsync( + entity, + entity.requestContext.forceRefreshAddressCache); + + Mono storeResultObs = primaryUriObs.flatMap( + primaryUri -> { + try { + if (useSessionToken) { + SessionTokenHelper.setPartitionLocalSessionToken(entity, this.sessionContainer); + } else { + // Remove whatever session token can be there in headers. + // We don't need it. If it is global - backend will not undersand it. + // But there's no point in producing partition local sesison token. + entity.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); + } + + + Pair, URI> storeResponseObsAndUri = this.readFromStoreAsync(primaryUri, entity); + + return storeResponseObsAndUri.getLeft().flatMap( + storeResponse -> { + + try { + StoreResult storeResult = this.createStoreResult( + storeResponse != null ? storeResponse : null, + null, requiresValidLsn, + true, + storeResponse != null ? storeResponseObsAndUri.getRight() : null); + return Mono.just(storeResult); + } catch (CosmosClientException e) { + return Mono.error(e); + } + } + + ); + + } catch (CosmosClientException e) { + // RxJava1 doesn't allow throwing checked exception from Observable:map + return Mono.error(e); + } + + } + ).onErrorResume(t -> { + logger.debug("Exception {} is thrown while doing READ Primary", t); + + Exception storeTaskException = Utils.as(t, Exception.class); + if (storeTaskException == null) { + return Mono.error(t); + } + + try { + StoreResult storeResult = this.createStoreResult( + null, + storeTaskException, requiresValidLsn, + true, + null); + return Mono.just(storeResult); + } catch (CosmosClientException e) { + // RxJava1 doesn't allow throwing checked exception from Observable operators + return Mono.error(e); + } + }); + + return storeResultObs.map(storeResult -> { + BridgeInternal.recordResponse(entity.requestContext.cosmosResponseDiagnostics, entity, storeResult); + entity.requestContext.requestChargeTracker.addCharge(storeResult.requestCharge); + + if (storeResult.isGoneException && !storeResult.isInvalidPartitionException) { + return new ReadReplicaResult(true, Collections.emptyList()); + } + + return new ReadReplicaResult(false, Collections.singletonList(storeResult)); + }); + } + + private Pair, URI> readFromStoreAsync( + URI physicalAddress, + RxDocumentServiceRequest request) throws CosmosClientException { + + if (request.requestContext.timeoutHelper.isElapsed()) { + throw new GoneException(); + } + + //QueryRequestPerformanceActivity activity = null; + // TODO: ifNoneMatch and maxPageSize are not used in the .Net code. check with Ji + String ifNoneMatch = request.getHeaders().get(HttpConstants.HttpHeaders.IF_NONE_MATCH); + String continuation = null; + String maxPageSize = null; + + // TODO: is this needed + this.lastReadAddress = physicalAddress.toString(); + + if (request.getOperationType() == OperationType.ReadFeed || + request.getOperationType() == OperationType.Query) { + continuation = request.getHeaders().get(HttpConstants.HttpHeaders.CONTINUATION); + maxPageSize = request.getHeaders().get(HttpConstants.HttpHeaders.PAGE_SIZE); + + if (continuation != null && continuation.contains(";")) { + String[] parts = StringUtils.split(continuation, ';'); + if (parts.length < 3) { + throw new BadRequestException(String.format( + RMResources.InvalidHeaderValue, + continuation, + HttpConstants.HttpHeaders.CONTINUATION)); + } + + continuation = parts[0]; + } + + request.setContinuation(continuation); + + // TODO: troubleshooting + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/258624 + //activity = CustomTypeExtensions.StartActivity(request); + } + + switch (request.getOperationType()) { + case Read: + case Head: { + Mono storeResponseObs = this.transportClient.invokeResourceOperationAsync( + physicalAddress, + request); + + return Pair.of(storeResponseObs, physicalAddress); + + } + + case ReadFeed: + case HeadFeed: + case Query: + case SqlQuery: + case ExecuteJavaScript: { + Mono storeResponseObs = StoreReader.completeActivity(this.transportClient.invokeResourceOperationAsync( + physicalAddress, + request), null); + // TODO activity); + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/258624 + return Pair.of(storeResponseObs, physicalAddress); + } + + default: + throw new IllegalStateException(String.format("Unexpected operation type {%s}", request.getOperationType())); + } + } + + + private static Mono completeActivity(Mono task, Object activity) { + // TODO: client statistics + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/258624 + return task; + } + + StoreResult createStoreResult(StoreResponse storeResponse, + Exception responseException, + boolean requiresValidLsn, + boolean useLocalLSNBasedHeaders, + URI storePhysicalAddress) throws CosmosClientException { + + if (responseException == null) { + String headerValue = null; + long quorumAckedLSN = -1; + int currentReplicaSetSize = -1; + int currentWriteQuorum = -1; + long globalCommittedLSN = -1; + int numberOfReadRegions = -1; + long itemLSN = -1; + if ((headerValue = storeResponse.getHeaderValue( + useLocalLSNBasedHeaders ? WFConstants.BackendHeaders.QUORUM_ACKED_LOCAL_LSN : WFConstants.BackendHeaders.QUORUM_ACKED_LSN)) != null) { + quorumAckedLSN = Long.parseLong(headerValue); + } + + if ((headerValue = storeResponse.getHeaderValue(WFConstants.BackendHeaders.CURRENT_REPLICA_SET_SIZE)) != null) { + currentReplicaSetSize = Integer.parseInt(headerValue); + } + + if ((headerValue = storeResponse.getHeaderValue(WFConstants.BackendHeaders.CURRENT_WRITE_QUORUM)) != null) { + currentWriteQuorum = Integer.parseInt(headerValue); + } + + double requestCharge = 0; + if ((headerValue = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.REQUEST_CHARGE)) != null) { + requestCharge = Double.parseDouble(headerValue); + } + + if ((headerValue = storeResponse.getHeaderValue(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS)) != null) { + numberOfReadRegions = Integer.parseInt(headerValue); + } + + if ((headerValue = storeResponse.getHeaderValue(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN)) != null) { + globalCommittedLSN = Long.parseLong(headerValue); + } + + if ((headerValue = storeResponse.getHeaderValue( + useLocalLSNBasedHeaders ? WFConstants.BackendHeaders.ITEM_LOCAL_LSN : WFConstants.BackendHeaders.ITEM_LSN)) != null) { + itemLSN = Long.parseLong(headerValue); + } + + long lsn = -1; + if (useLocalLSNBasedHeaders) { + if ((headerValue = storeResponse.getHeaderValue(WFConstants.BackendHeaders.LOCAL_LSN)) != null) { + lsn = Long.parseLong(headerValue); + } + } else { + lsn = storeResponse.getLSN(); + } + + ISessionToken sessionToken = null; + // SESSION token response header is introduced from version HttpConstants.Versions.v2018_06_18 onwards. + // Previously it was only a request header + if ((headerValue = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.SESSION_TOKEN)) != null) { + sessionToken = SessionTokenHelper.parse(headerValue); + } + + return new StoreResult( + /* storeResponse: */storeResponse, + /* exception: */ null, + /* partitionKeyRangeId: */ storeResponse.getPartitionKeyRangeId(), + /* lsn: */ lsn, + /* quorumAckedLsn: */ quorumAckedLSN, + /* requestCharge: */ requestCharge, + /* currentReplicaSetSize: */ currentReplicaSetSize, + /* currentWriteQuorum: */ currentWriteQuorum, + /* isValid: */true, + /* storePhysicalAddress: */ storePhysicalAddress, + /* globalCommittedLSN: */ globalCommittedLSN, + /* numberOfReadRegions: */ numberOfReadRegions, + /* itemLSN: */ itemLSN, + /* sessionToken: */ sessionToken); + } else { + CosmosClientException cosmosClientException = Utils.as(responseException, CosmosClientException.class); + if (cosmosClientException != null) { + StoreReader.verifyCanContinueOnException(cosmosClientException); + long quorumAckedLSN = -1; + int currentReplicaSetSize = -1; + int currentWriteQuorum = -1; + long globalCommittedLSN = -1; + int numberOfReadRegions = -1; + String headerValue = cosmosClientException.responseHeaders().get(useLocalLSNBasedHeaders ? WFConstants.BackendHeaders.QUORUM_ACKED_LOCAL_LSN : WFConstants.BackendHeaders.QUORUM_ACKED_LSN); + if (!Strings.isNullOrEmpty(headerValue)) { + quorumAckedLSN = Long.parseLong(headerValue); + } + + headerValue = cosmosClientException.responseHeaders().get(WFConstants.BackendHeaders.CURRENT_REPLICA_SET_SIZE); + if (!Strings.isNullOrEmpty(headerValue)) { + currentReplicaSetSize = Integer.parseInt(headerValue); + } + + headerValue = cosmosClientException.responseHeaders().get(WFConstants.BackendHeaders.CURRENT_WRITE_QUORUM); + if (!Strings.isNullOrEmpty(headerValue)) { + currentReplicaSetSize = Integer.parseInt(headerValue); + } + + double requestCharge = 0; + headerValue = cosmosClientException.responseHeaders().get(HttpConstants.HttpHeaders.REQUEST_CHARGE); + if (!Strings.isNullOrEmpty(headerValue)) { + requestCharge = Double.parseDouble(headerValue); + } + + headerValue = cosmosClientException.responseHeaders().get(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS); + if (!Strings.isNullOrEmpty(headerValue)) { + numberOfReadRegions = Integer.parseInt(headerValue); + } + + headerValue = cosmosClientException.responseHeaders().get(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN); + if (!Strings.isNullOrEmpty(headerValue)) { + globalCommittedLSN = Integer.parseInt(headerValue); + } + + long lsn = -1; + if (useLocalLSNBasedHeaders) { + headerValue = cosmosClientException.responseHeaders().get(WFConstants.BackendHeaders.LOCAL_LSN); + if (!Strings.isNullOrEmpty(headerValue)) { + lsn = Long.parseLong(headerValue); + } + } else { + lsn = BridgeInternal.getLSN(cosmosClientException); + } + + ISessionToken sessionToken = null; + + // SESSION token response header is introduced from version HttpConstants.Versions.v2018_06_18 onwards. + // Previously it was only a request header + headerValue = cosmosClientException.responseHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); + if (!Strings.isNullOrEmpty(headerValue)) { + sessionToken = SessionTokenHelper.parse(headerValue); + } + + return new StoreResult( + /* storeResponse: */ (StoreResponse) null, + /* exception: */ cosmosClientException, + /* partitionKeyRangeId: */BridgeInternal.getPartitionKeyRangeId(cosmosClientException), + /* lsn: */ lsn, + /* quorumAckedLsn: */ quorumAckedLSN, + /* requestCharge: */ requestCharge, + /* currentReplicaSetSize: */ currentReplicaSetSize, + /* currentWriteQuorum: */ currentWriteQuorum, + /* isValid: */!requiresValidLsn + || ((cosmosClientException.statusCode() != HttpConstants.StatusCodes.GONE || isSubStatusCode(cosmosClientException, HttpConstants.SubStatusCodes.NAME_CACHE_IS_STALE)) + && lsn >= 0), + // TODO: verify where exception.RequestURI is supposed to be set in .Net + /* storePhysicalAddress: */ storePhysicalAddress == null ? BridgeInternal.getRequestUri(cosmosClientException) : storePhysicalAddress, + /* globalCommittedLSN: */ globalCommittedLSN, + /* numberOfReadRegions: */ numberOfReadRegions, + /* itemLSN: */ -1, + sessionToken); + } else { + logger.error("Unexpected exception {} received while reading from store.", responseException.getMessage(), responseException); + return new StoreResult( + /* storeResponse: */ null, + /* exception: */ new InternalServerErrorException(RMResources.InternalServerError), + /* partitionKeyRangeId: */ (String) null, + /* lsn: */ -1, + /* quorumAckedLsn: */ -1, + /* requestCharge: */ 0, + /* currentReplicaSetSize: */ 0, + /* currentWriteQuorum: */ 0, + /* isValid: */ false, + /* storePhysicalAddress: */ storePhysicalAddress, + /* globalCommittedLSN: */-1, + /* numberOfReadRegions: */ 0, + /* itemLSN: */ -1, + /* sessionToken: */ null); + } + } + } + + void startBackgroundAddressRefresh(RxDocumentServiceRequest request) { + this.addressSelector.resolveAllUriAsync(request, true, true) + .publishOn(Schedulers.elastic()) + .subscribe( + r -> { + }, + e -> logger.warn( + "Background refresh of the addresses failed with {}", e.getMessage(), e) + ); + } + + private static int generateNextRandom(int maxValue) { + // The benefit of using ThreadLocalRandom.current() over Random is + // avoiding the synchronization contention due to multi-threading. + return ThreadLocalRandom.current().nextInt(maxValue); + } + + static void verifyCanContinueOnException(CosmosClientException ex) throws CosmosClientException { + if (ex instanceof PartitionKeyRangeGoneException) { + throw ex; + } + + if (ex instanceof PartitionKeyRangeIsSplittingException) { + throw ex; + } + + if (ex instanceof PartitionIsMigratingException) { + throw ex; + } + + String value = ex.responseHeaders().get(HttpConstants.HttpHeaders.REQUEST_VALIDATION_FAILURE); + if (Strings.isNullOrWhiteSpace(value)) { + return; + } + + Integer result = Integers.tryParse(value); + if (result != null && result == 1) { + throw ex; + } + + return; + } + + private class ReadReplicaResult { + public ReadReplicaResult(boolean retryWithForceRefresh, List responses) { + this.retryWithForceRefresh = retryWithForceRefresh; + this.responses = responses; + } + + public final boolean retryWithForceRefresh; + public final List responses; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/StoreResponse.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/StoreResponse.java new file mode 100644 index 0000000000000..0639c6bb109c3 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/StoreResponse.java @@ -0,0 +1,151 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.CosmosResponseDiagnostics; +import com.azure.data.cosmos.internal.HttpConstants; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.InputStream; +import java.util.List; +import java.util.Map.Entry; + +/** + * Used internally to represents a response from the store. + */ +public class StoreResponse { + final static Logger LOGGER = LoggerFactory.getLogger(StoreResponse.class); + final private int status; + final private String[] responseHeaderNames; + final private String[] responseHeaderValues; + final private InputStream httpEntityStream; + final private String content; + + private CosmosResponseDiagnostics cosmosResponseDiagnostics; + + public StoreResponse(int status, List> headerEntries, InputStream inputStream) { + this(status, headerEntries, null, inputStream); + } + + public StoreResponse(int status, List> headerEntries, String content) { + this(status, headerEntries, content, null); + } + + private StoreResponse( + int status, + List> headerEntries, + String content, + InputStream inputStream) { + responseHeaderNames = new String[headerEntries.size()]; + responseHeaderValues = new String[headerEntries.size()]; + + int i = 0; + + for(Entry headerEntry: headerEntries) { + responseHeaderNames[i] = headerEntry.getKey(); + responseHeaderValues[i] = headerEntry.getValue(); + i++; + } + + this.status = status; + + this.content = content; + this.httpEntityStream = inputStream; + } + + public int getStatus() { + return status; + } + + public String[] getResponseHeaderNames() { + return responseHeaderNames; + } + + public String[] getResponseHeaderValues() { + return responseHeaderValues; + } + + public String getResponseBody() { + return this.content; + } + + public InputStream getResponseStream() { + // Some operation type doesn't have a response stream so this can be null + return this.httpEntityStream; + } + + public long getLSN() { + String lsnString = this.getHeaderValue(WFConstants.BackendHeaders.LSN); + if (StringUtils.isNotEmpty(lsnString)) { + return Long.parseLong(lsnString); + } + + return -1; + } + + public String getPartitionKeyRangeId() { + return this.getHeaderValue(WFConstants.BackendHeaders.PARTITION_KEY_RANGE_ID); + } + + public String getContinuation() { + return this.getHeaderValue(HttpConstants.HttpHeaders.CONTINUATION); + } + + public String getHeaderValue(String attribute) { + if (this.responseHeaderValues == null || this.responseHeaderNames.length != this.responseHeaderValues.length) { + return null; + } + + for (int i = 0; i < responseHeaderNames.length; i++) { + if (responseHeaderNames[i].equalsIgnoreCase(attribute)) { + return responseHeaderValues[i]; + } + } + + return null; + } + + public CosmosResponseDiagnostics getCosmosResponseDiagnostics() { + return cosmosResponseDiagnostics; + } + + void setCosmosResponseDiagnostics(CosmosResponseDiagnostics cosmosResponseDiagnostics) { + this.cosmosResponseDiagnostics = cosmosResponseDiagnostics; + } + + int getSubStatusCode() { + int subStatusCode = HttpConstants.SubStatusCodes.UNKNOWN; + String subStatusCodeString = this.getHeaderValue(WFConstants.BackendHeaders.SUB_STATUS); + if (StringUtils.isNotEmpty(subStatusCodeString)) { + try { + subStatusCode = Integer.parseInt(subStatusCodeString); + } catch (NumberFormatException e) { + // If value cannot be parsed as Integer, return Unknown. + } + } + return subStatusCode; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/StoreResult.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/StoreResult.java new file mode 100644 index 0000000000000..e48054103547c --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/StoreResult.java @@ -0,0 +1,179 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.InternalServerErrorException; +import com.azure.data.cosmos.internal.Exceptions; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.ISessionToken; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.RequestChargeTracker; +import com.azure.data.cosmos.internal.Strings; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.URI; + +public class StoreResult { + private final static Logger logger = LoggerFactory.getLogger(StoreResult.class); + + private final StoreResponse storeResponse; + private final CosmosClientException exception; + + final public long lsn; + final public String partitionKeyRangeId; + final public long quorumAckedLSN; + final public long globalCommittedLSN; + final public long numberOfReadRegions; + final public long itemLSN; + final public ISessionToken sessionToken; + final public double requestCharge; + final public int currentReplicaSetSize; + final public int currentWriteQuorum; + final public boolean isValid; + final public boolean isGoneException; + final public boolean isNotFoundException; + final public boolean isInvalidPartitionException; + final public URI storePhysicalAddress; + + public StoreResult( + StoreResponse storeResponse, + CosmosClientException exception, + String partitionKeyRangeId, + long lsn, + long quorumAckedLsn, + double requestCharge, + int currentReplicaSetSize, + int currentWriteQuorum, + boolean isValid, + URI storePhysicalAddress, + long globalCommittedLSN, + int numberOfReadRegions, + long itemLSN, + ISessionToken sessionToken) { + this.storeResponse = storeResponse; + this.exception = exception; + this.partitionKeyRangeId = partitionKeyRangeId; + this.lsn = lsn; + this.quorumAckedLSN = quorumAckedLsn; + this.requestCharge = requestCharge; + this.currentReplicaSetSize = currentReplicaSetSize; + this.currentWriteQuorum = currentWriteQuorum; + this.isValid = isValid; + this.isGoneException = this.exception != null && this.exception.statusCode() == HttpConstants.StatusCodes.GONE; + this.isNotFoundException = this.exception != null && this.exception.statusCode() == HttpConstants.StatusCodes.NOTFOUND; + this.isInvalidPartitionException = this.exception != null + && Exceptions.isNameCacheStale(this.exception); + this.storePhysicalAddress = storePhysicalAddress; + this.globalCommittedLSN = globalCommittedLSN; + this.numberOfReadRegions = numberOfReadRegions; + this.itemLSN = itemLSN; + this.sessionToken = sessionToken; + } + + public CosmosClientException getException() throws InternalServerErrorException { + if (this.exception == null) { + String message = "Exception should be available but found none"; + assert false : message; + logger.error(message); + throw new InternalServerErrorException(RMResources.InternalServerError); + } + + return exception; + } + + public StoreResponse toResponse() throws CosmosClientException { + return toResponse(null); + } + + public StoreResponse toResponse(RequestChargeTracker requestChargeTracker) throws CosmosClientException { + if (!this.isValid) { + if (this.exception == null) { + logger.error("Exception not set for invalid response"); + throw new InternalServerErrorException(RMResources.InternalServerError); + } + + throw this.exception; + } + + if (requestChargeTracker != null && this.isValid) { + StoreResult.setRequestCharge(this.storeResponse, this.exception, requestChargeTracker.getTotalRequestCharge()); + } + + if (this.exception != null) { + throw exception; + } + + return this.storeResponse; + } + + private static void setRequestCharge(StoreResponse response, CosmosClientException cosmosClientException, double totalRequestCharge) { + if (cosmosClientException != null) { + cosmosClientException.responseHeaders().put(HttpConstants.HttpHeaders.REQUEST_CHARGE, + Double.toString(totalRequestCharge)); + } + // Set total charge as final charge for the response. + else if (response.getResponseHeaderNames() != null) { + for (int i = 0; i < response.getResponseHeaderNames().length; ++i) { + if (Strings.areEqualIgnoreCase( + response.getResponseHeaderNames()[i], + HttpConstants.HttpHeaders.REQUEST_CHARGE)) { + response.getResponseHeaderValues()[i] = Double.toString(totalRequestCharge); + break; + } + } + } + } + + @Override + public String toString() { + int statusCode = 0; + int subStatusCode = HttpConstants.SubStatusCodes.UNKNOWN; + + if (this.storeResponse != null) { + statusCode = this.storeResponse.getStatus(); + subStatusCode = this.storeResponse.getSubStatusCode(); + } else if (this.exception != null) { + statusCode = this.exception.statusCode(); + subStatusCode = this.exception.subStatusCode(); + } + + return "storePhysicalAddress: " + this.storePhysicalAddress + + ", lsn: " + this.lsn + + ", globalCommittedLsn: " + this.globalCommittedLSN + + ", partitionKeyRangeId: " + this.partitionKeyRangeId + + ", isValid: " + this.isValid + + ", statusCode: " + statusCode + + ", subStatusCode: " + subStatusCode + + ", isGone: " + this.isGoneException + + ", isNotFound: " + this.isNotFoundException + + ", isInvalidPartition: " + this.isInvalidPartitionException + + ", requestCharge: " + this.requestCharge + + ", itemLSN: " + this.itemLSN + + ", sessionToken: " + (this.sessionToken != null ? this.sessionToken.convertToString() : null) + + ", exception: " + BridgeInternal.getInnerErrorMessage(this.exception); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/TimeoutHelper.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/TimeoutHelper.java new file mode 100644 index 0000000000000..c497f87dc20ae --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/TimeoutHelper.java @@ -0,0 +1,62 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.GoneException; +import com.azure.data.cosmos.RequestTimeoutException; + +import java.time.Duration; +import java.time.Instant; + +public class TimeoutHelper { + private final Instant startTime; + private final Duration timeOut; + + public TimeoutHelper(Duration timeOut) { + this.startTime = Instant.now(); + this.timeOut = timeOut; + } + + public boolean isElapsed() { + Duration elapsed = Duration.ofMillis(Instant.now().toEpochMilli() - startTime.toEpochMilli()); + return elapsed.compareTo(this.timeOut) >= 0; + } + + public Duration getRemainingTime() { + Duration elapsed = Duration.ofMillis(Instant.now().toEpochMilli() - startTime.toEpochMilli()); + return this.timeOut.minus(elapsed); + } + + public void throwTimeoutIfElapsed() throws RequestTimeoutException { + if (this.isElapsed()) { + throw new RequestTimeoutException(); + } + } + + public void throwGoneIfElapsed() throws GoneException { + if (this.isElapsed()) { + throw new GoneException(); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/TransportClient.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/TransportClient.java new file mode 100644 index 0000000000000..2af1c428b5528 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/TransportClient.java @@ -0,0 +1,41 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import reactor.core.publisher.Mono; + +import java.net.URI; + +public abstract class TransportClient implements AutoCloseable { + + // Uses requests's ResourceOperation to determine the operation + public Mono invokeResourceOperationAsync(URI physicalAddress, RxDocumentServiceRequest request) { + return this.invokeStoreAsync(physicalAddress, request); + } + + protected abstract Mono invokeStoreAsync( + URI physicalAddress, + RxDocumentServiceRequest request); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/TransportException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/TransportException.java new file mode 100644 index 0000000000000..306a820700c42 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/TransportException.java @@ -0,0 +1,49 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +// TODO: DANOBLE: Use a TransportException derivative wherever CorruptFrameException is thrown in RntbdTransportClient +// * Continue to throw IllegalArgumentException, IllegalStateException, and NullPointerException. +// * Continue to complete all pending requests with a GoneException. +// Customers should then expect to see these causes for GoneException errors originating in RntbdTransportClient: +// - TransportException +// - ReadTimeoutException +// - WriteTimeoutException +// These causes for GoneException errors will be logged as issues because they indicate a problem in the +// RntbdTransportClient code: +// - IllegalArgumentException +// - IllegalStateException +// - NullPointerException +// Any other exceptions caught by the RntbdTransportClient code will also be logged as issues because they +// indicate something unexpected happened. +// NOTES: +// We throw a derivative in one place: RntbdContextException in RntbdContext.decode. This is a special case +// that is handled by RntbdRequestManager.userEventTriggered. + +public class TransportException extends RuntimeException { + public TransportException(String message, Throwable cause) { + super(message, cause, /* enableSuppression */ true, /* writableStackTrace */ false); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/WFConstants.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/WFConstants.java new file mode 100644 index 0000000000000..d99a09217abdd --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/WFConstants.java @@ -0,0 +1,98 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +public class WFConstants { + public static class BackendHeaders { + public static final String RESOURCE_ID = "x-docdb-resource-id"; + public static final String OWNER_ID = "x-docdb-owner-id"; + public static final String ENTITY_ID = "x-docdb-entity-id"; + public static final String DATABASE_ENTITY_MAX_COUNT = "x-ms-database-entity-max-count"; + public static final String DATABASE_ENTITY_CURRENT_COUNT = "x-ms-database-entity-current-count"; + public static final String COLLECTION_ENTITY_MAX_COUNT = "x-ms-collection-entity-max-count"; + public static final String COLLECTION_ENTITY_CURRENT_COUNT = "x-ms-collection-entity-current-count"; + public static final String USER_ENTITY_MAX_COUNT = "x-ms-user-entity-max-count"; + public static final String USER_ENTITY_CURRENT_COUNT = "x-ms-user-entity-current-count"; + public static final String PERMISSION_ENTITY_MAX_COUNT = "x-ms-permission-entity-max-count"; + public static final String PERMISSION_ENTITY_CURRENT_COUNT = "x-ms-permission-entity-current-count"; + public static final String ROOT_ENTITY_MAX_COUNT = "x-ms-root-entity-max-count"; + public static final String ROOT_ENTITY_CURRENT_COUNT = "x-ms-root-entity-current-count"; + public static final String RESOURCE_SCHEMA_NAME = "x-ms-resource-schema-name"; + public static final String LSN = "lsn"; + public static final String QUORUM_ACKED_LSN = "x-ms-quorum-acked-lsn"; + public static final String QUORUM_ACKED_LLSN = "x-ms-cosmos-quorum-acked-llsn"; + public static final String CURRENT_WRITE_QUORUM = "x-ms-current-write-quorum"; + public static final String CURRENT_REPLICA_SET_SIZE = "x-ms-current-replica-set-size"; + public static final String COLLECTION_PARTITION_INDEX = "collection-partition-index"; + public static final String COLLECTION_SERVICE_INDEX = "collection-service-index"; + public static final String STATUS = "Status"; + public static final String ACTIVITY_ID = "ActivityId"; + public static final String IS_FANOUT_REQUEST = "x-ms-is-fanout-request"; + public static final String PRIMARY_MASTER_KEY = "x-ms-primary-master-key"; + public static final String SECONDARY_MASTER_KEY = "x-ms-secondary-master-key"; + public static final String PRIMARY_READONLY_KEY = "x-ms-primary-readonly-key"; + public static final String SECONDARY_READONLY_KEY = "x-ms-secondary-readonly-key"; + public static final String BIND_REPLICA_DIRECTIVE = "x-ms-bind-replica"; + public static final String DATABASE_ACCOUNT_ID = "x-ms-database-account-id"; + public static final String REQUEST_VALIDATION_FAILURE = "x-ms-request-validation-failure"; + public static final String SUB_STATUS = "x-ms-substatus"; + public static final String PARTITION_KEY_RANGE_ID = "x-ms-documentdb-partitionkeyrangeid"; + public static final String BIND_MIN_EFFECTIVE_PARTITION_KEY = "x-ms-documentdb-bindmineffectivepartitionkey"; + public static final String BIND_MAX_EFFECTIVE_PARTITION_KEY = "x-ms-documentdb-bindmaxeffectivepartitionkey"; + public static final String BIND_PARTITION_KEY_RANGE_ID = "x-ms-documentdb-bindpartitionkeyrangeid"; + public static final String BIND_PARTITION_KEY_RANGE_RID_PREFIX = "x-ms-documentdb-bindpartitionkeyrangeridprefix"; + public static final String MINIMUM_ALLOWED_CLIENT_VERSION = "x-ms-documentdb-minimumallowedclientversion"; + public static final String PARTITION_COUNT = "x-ms-documentdb-partitioncount"; + public static final String COLLECTION_RID = "x-ms-documentdb-collection-rid"; + public static final String XP_ROLE = "x-ms-xp-role"; + public static final String HAS_TENTATIVE_WRITES = "x-ms-cosmosdb-has-tentative-writes"; + public static final String IS_RU_PER_MINUTE_USED = "x-ms-documentdb-is-ru-per-minute-used"; + public static final String QUERY_METRICS = "x-ms-documentdb-query-metrics"; + public static final String GLOBAL_COMMITTED_LSN = "x-ms-global-Committed-lsn"; + public static final String NUMBER_OF_READ_REGIONS = "x-ms-number-of-read-regions"; + public static final String OFFER_REPLACE_PENDING = "x-ms-offer-replace-pending"; + public static final String ITEM_LSN = "x-ms-item-lsn"; + public static final String REMOTE_STORAGE_TYPE = "x-ms-remote-storage-type"; + public static final String RESTORE_STATE = "x-ms-restore-state"; + public static final String COLLECTION_SECURITY_IDENTIFIER = "x-ms-collection-security-identifier"; + public static final String RESTORE_PARAMS = "x-ms-restore-params"; + public static final String SHARE_THROUGHPUT = "x-ms-share-throughput"; + public static final String PARTITION_RESOURCE_FILTER = "x-ms-partition-resource-filter"; + public static final String FEDERATION_ID_FOR_AUTH = "x-ms-federation-for-auth"; + public static final String FORCE_QUERY_SCAN = "x-ms-documentdb-force-query-scan"; + public static final String ENABLE_DYNAMIC_RID_RANGE_ALLOCATION = "x-ms-enable-dynamic-rid-range-allocation"; + public static final String EXCLUDE_SYSTEM_PROPERTIES = "x-ms-exclude-system-properties"; + public static final String LOCAL_LSN = "x-ms-cosmos-llsn"; + public static final String QUORUM_ACKED_LOCAL_LSN = "x-ms-cosmos-quorum-acked-llsn"; + public static final String ITEM_LOCAL_LSN = "x-ms-cosmos-item-llsn"; + public static final String BINARY_ID = "x-ms-binary-id"; + public static final String TIME_TO_LIVE_IN_SECONDS = "x-ms-time-to-live-in-seconds"; + public static final String EFFECTIVE_PARTITION_KEY = "x-ms-effective-partition-key"; + public static final String BINARY_PASSTHROUGH_REQUEST = "x-ms-binary-passthrough-request"; + public static final String FANOUT_OPERATION_STATE = "x-ms-fanout-operation-state"; + public static final String CONTENT_SERIALIZATION_FORMAT = "x-ms-documentdb-content-serialization-format"; + public static final String ALLOW_TENTATIVE_WRITES = "x-ms-cosmos-allow-tentative-writes"; + public static final String IS_USER_REQUEST = "x-ms-cosmos-internal-is-user-request"; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/WebExceptionUtility.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/WebExceptionUtility.java new file mode 100644 index 0000000000000..5363972807f46 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/WebExceptionUtility.java @@ -0,0 +1,97 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.Utils; +import io.netty.channel.ChannelException; + +import javax.net.ssl.SSLHandshakeException; +import javax.net.ssl.SSLPeerUnverifiedException; +import java.io.IOException; +import java.net.ConnectException; +import java.net.NoRouteToHostException; +import java.net.UnknownHostException; + +public class WebExceptionUtility { + public static boolean isWebExceptionRetriable(Exception ex) { + Exception iterator = ex; + + while (iterator != null) { + if (WebExceptionUtility.isWebExceptionRetriableInternal(iterator)) { + return true; + } + + Throwable t = iterator.getCause(); + iterator = Utils.as(t, Exception.class); + } + + return false; + } + + private static boolean isWebExceptionRetriableInternal(Exception ex) { + + IOException webEx = Utils.as(ex, IOException.class); + if (webEx == null) { + return false; + } + + // any network failure for which we are certain the request hasn't reached the service endpoint. + if (webEx instanceof ConnectException || + webEx instanceof UnknownHostException || + webEx instanceof SSLHandshakeException || + webEx instanceof NoRouteToHostException || + webEx instanceof SSLPeerUnverifiedException) { + return true; + } + + return false; + } + + public static boolean isNetworkFailure(Exception ex) { + Exception iterator = ex; + + while (iterator != null) { + if (WebExceptionUtility.isNetworkFailureInternal(iterator)) { + return true; + } + + Throwable t = iterator.getCause(); + iterator = Utils.as(t, Exception.class); + } + + return false; + } + + private static boolean isNetworkFailureInternal(Exception ex) { + if (ex instanceof IOException) { + return true; + } + + if (ex instanceof ChannelException) { + return true; + } + + return false; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdClientChannelHandler.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdClientChannelHandler.java new file mode 100644 index 0000000000000..a91f7859fefdc --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdClientChannelHandler.java @@ -0,0 +1,139 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.EventLoop; +import io.netty.channel.pool.ChannelPool; +import io.netty.channel.pool.ChannelPoolHandler; +import io.netty.handler.logging.LoggingHandler; +import io.netty.handler.ssl.SslHandler; +import io.netty.handler.timeout.ReadTimeoutHandler; +import io.netty.handler.timeout.WriteTimeoutHandler; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.net.ssl.SSLEngine; +import java.util.concurrent.TimeUnit; + +import static com.google.common.base.Preconditions.checkNotNull; + +public class RntbdClientChannelHandler extends ChannelInitializer implements ChannelPoolHandler { + + private static Logger logger = LoggerFactory.getLogger(RntbdClientChannelHandler.class); + private final RntbdEndpoint.Config config; + + RntbdClientChannelHandler(final RntbdEndpoint.Config config) { + checkNotNull(config, "config"); + this.config = config; + } + + /** + * Called by {@link ChannelPool#acquire} after a {@link Channel} is acquired + *

+ * This method is called within the {@link EventLoop} of the {@link Channel}. + * + * @param channel a channel that was just acquired + */ + @Override + public void channelAcquired(final Channel channel) { + logger.trace("{} CHANNEL ACQUIRED", channel); + } + + /** + * Called by {@link ChannelPool#release} after a {@link Channel} is created + *

+ * This method is called within the {@link EventLoop} of the {@link Channel}. + * + * @param channel a channel that was just created + */ + @Override + public void channelCreated(final Channel channel) { + logger.trace("{} CHANNEL CREATED", channel); + this.initChannel(channel); + } + + /** + * Called by {@link ChannelPool#release} after a {@link Channel} is released + *

+ * This method is called within the {@link EventLoop} of the {@link Channel}. + * + * @param channel a channel that was just released + */ + @Override + public void channelReleased(final Channel channel) { + logger.trace("{} CHANNEL RELEASED", channel); + } + + /** + * Called by @{ChannelPipeline} initializer after the current channel is registered to an event loop. + *

+ * This method constructs this pipeline: + *

{@code
+     * ChannelPipeline {
+     *     (ReadTimeoutHandler#0 = io.netty.handler.timeout.ReadTimeoutHandler),
+     *     (SslHandler#0 = io.netty.handler.ssl.SslHandler),
+     *     (RntbdContextNegotiator#0 = com.microsoft.azure.cosmosdb.internal.directconnectivity.rntbd.RntbdContextNegotiator),
+     *     (RntbdResponseDecoder#0 = com.microsoft.azure.cosmosdb.internal.directconnectivity.rntbd.RntbdResponseDecoder),
+     *     (RntbdRequestEncoder#0 = com.microsoft.azure.cosmosdb.internal.directconnectivity.rntbd.RntbdRequestEncoder),
+     *     (WriteTimeoutHandler#0 = io.netty.handler.timeout.WriteTimeoutHandler),
+     *     (RntbdRequestManager#0 = com.microsoft.azure.cosmosdb.internal.directconnectivity.rntbd.RntbdRequestManager),
+     * }
+     * }
+ * + * @param channel a channel that was just registered with an event loop + */ + @Override + protected void initChannel(final Channel channel) { + + checkNotNull(channel); + + final RntbdRequestManager requestManager = new RntbdRequestManager(this.config.getMaxRequestsPerChannel()); + final long readerIdleTime = this.config.getReceiveHangDetectionTime(); + final long writerIdleTime = this.config.getSendHangDetectionTime(); + final ChannelPipeline pipeline = channel.pipeline(); + + pipeline.addFirst( + new RntbdContextNegotiator(requestManager, this.config.getUserAgent()), + new RntbdResponseDecoder(), + new RntbdRequestEncoder(), + new WriteTimeoutHandler(writerIdleTime, TimeUnit.NANOSECONDS), + requestManager + ); + + if (this.config.getWireLogLevel() != null) { + pipeline.addFirst(new LoggingHandler(this.config.getWireLogLevel())); + } + + final SSLEngine sslEngine = this.config.getSslContext().newEngine(channel.alloc()); + + pipeline.addFirst( + new ReadTimeoutHandler(readerIdleTime, TimeUnit.NANOSECONDS), + new SslHandler(sslEngine) + ); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdClientChannelPool.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdClientChannelPool.java new file mode 100644 index 0000000000000..132b3fc16b481 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdClientChannelPool.java @@ -0,0 +1,266 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import com.fasterxml.jackson.databind.ser.std.StdSerializer; +import io.netty.bootstrap.Bootstrap; +import io.netty.channel.Channel; +import io.netty.channel.pool.ChannelHealthChecker; +import io.netty.channel.pool.FixedChannelPool; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.Promise; +import org.apache.commons.lang3.reflect.FieldUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.net.SocketAddress; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdReporter.reportIssue; +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdReporter.reportIssueUnless; +import static com.google.common.base.Preconditions.checkState; + +@JsonSerialize(using = RntbdClientChannelPool.JsonSerializer.class) +public final class RntbdClientChannelPool extends FixedChannelPool { + + // region Fields + + private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelPool.class); + private static final AtomicReference pendingAcquireCount = new AtomicReference<>(); + + private final AtomicInteger availableChannelCount; + private final AtomicBoolean closed; + private final int maxChannels; + private final int maxRequestsPerChannel; + + // endregion + + // region Methods + + /** + * Initializes a newly created {@link RntbdClientChannelPool} object + * + * @param bootstrap the {@link Bootstrap} that is used for connections + * @param config the {@link RntbdEndpoint.Config} that is used for the channel pool instance created + */ + RntbdClientChannelPool(final Bootstrap bootstrap, final RntbdEndpoint.Config config) { + + super(bootstrap, new RntbdClientChannelHandler(config), ChannelHealthChecker.ACTIVE, null, + -1L, config.getMaxChannelsPerEndpoint(), Integer.MAX_VALUE, true + ); + + this.maxRequestsPerChannel = config.getMaxRequestsPerChannel(); + this.maxChannels = config.getMaxChannelsPerEndpoint(); + this.availableChannelCount = new AtomicInteger(); + this.closed = new AtomicBoolean(); + } + + @Override + public Future acquire(Promise promise) { + this.throwIfClosed(); + return super.acquire(promise); + } + + @Override + public Future release(Channel channel, Promise promise) { + this.throwIfClosed(); + return super.release(channel, promise); + } + + @Override + public void close() { + if (this.closed.compareAndSet(false, true)) { + this.availableChannelCount.set(0); + super.close(); + } + } + + public int availableChannelCount() { + return this.availableChannelCount.get(); + } + + public int maxChannels() { + return this.maxChannels; + } + + public int maxRequestsPerChannel() { + return this.maxRequestsPerChannel; + } + + public int pendingAcquisitionCount() { + + Field field = pendingAcquireCount.get(); + + if (field == null) { + synchronized (pendingAcquireCount) { + field = pendingAcquireCount.get(); + if (field == null) { + field = FieldUtils.getDeclaredField(FixedChannelPool.class, "pendingAcquireCount", true); + pendingAcquireCount.set(field); + } + } + } + + try { + return (int)FieldUtils.readField(field, this); + } catch (IllegalAccessException error) { + reportIssue(logger, this, "could not access field due to ", error); + } + + return -1; + } + + /** + * Poll a {@link Channel} out of internal storage to reuse it + *

+ * Maintainers: Implementations of this method must be thread-safe and this type's base class, {@link FixedChannelPool}, + * ensures thread safety. It does this by calling this method serially on a single-threaded EventExecutor. As a + * result this method need not (and should not) be synchronized. + * + * @return a value of {@code null}, if no {@link Channel} is ready to be reused + * + * @see #acquire(Promise) + */ + @Override + protected Channel pollChannel() { + + final Channel first = super.pollChannel(); + + if (first == null) { + return null; + } + + if (this.closed.get()) { + return first; // because we're being called following a call to close (from super.close) + } + + if (this.isInactiveOrServiceableChannel(first)) { + return this.decrementAvailableChannelCountAndAccept(first); + } + + super.offerChannel(first); // because we need a non-null sentinel to stop the search for a channel + + for (Channel next = super.pollChannel(); next != first; super.offerChannel(next), next = super.pollChannel()) { + if (this.isInactiveOrServiceableChannel(next)) { + return this.decrementAvailableChannelCountAndAccept(next); + } + } + + super.offerChannel(first); // because we choose not to check any channel more than once in a single call + return null; + } + + /** + * Offer a {@link Channel} back to the internal storage + *

+ * Maintainers: Implementations of this method must be thread-safe. + * + * @param channel the {@link Channel} to return to internal storage + * @return {@code true}, if the {@link Channel} could be added to internal storage; otherwise {@code false} + */ + @Override + protected boolean offerChannel(final Channel channel) { + if (super.offerChannel(channel)) { + this.availableChannelCount.incrementAndGet(); + return true; + } + return false; + } + + public SocketAddress remoteAddress() { + return this.bootstrap().config().remoteAddress(); + } + + @Override + public String toString() { + return "RntbdClientChannelPool(" + RntbdObjectMapper.toJson(this) + ")"; + } + + // endregion + + // region Privates + + private Channel decrementAvailableChannelCountAndAccept(final Channel first) { + this.availableChannelCount.decrementAndGet(); + return first; + } + + private boolean isInactiveOrServiceableChannel(final Channel channel) { + + if (!channel.isActive()) { + return true; + } + + final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); + + if (requestManager == null) { + reportIssueUnless(!channel.isActive(), logger, this, "{} active with no request manager", channel); + return true; // inactive + } + + return requestManager.isServiceable(this.maxRequestsPerChannel); + } + + private void throwIfClosed() { + checkState(!this.closed.get(), "%s is closed", this); + } + + // endregion + + // region Types + + static final class JsonSerializer extends StdSerializer { + + public JsonSerializer() { + this(null); + } + + public JsonSerializer(Class type) { + super(type); + } + + @Override + public void serialize(RntbdClientChannelPool value, JsonGenerator generator, SerializerProvider provider) throws IOException { + generator.writeStartObject(); + generator.writeStringField("remoteAddress", value.remoteAddress().toString()); + generator.writeNumberField("maxChannels", value.maxChannels()); + generator.writeNumberField("maxRequestsPerChannel", value.maxRequestsPerChannel()); + generator.writeObjectFieldStart("state"); + generator.writeBooleanField("isClosed", value.closed.get()); + generator.writeNumberField("acquiredChannelCount", value.acquiredChannelCount()); + generator.writeNumberField("availableChannelCount", value.availableChannelCount()); + generator.writeNumberField("pendingAcquisitionCount", value.pendingAcquisitionCount()); + generator.writeEndObject(); + generator.writeEndObject(); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdConstants.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdConstants.java new file mode 100644 index 0000000000000..a6bc01c32f95a --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdConstants.java @@ -0,0 +1,751 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; + +import java.util.EnumSet; +import java.util.stream.Collector; + +final class RntbdConstants { + + static final int CurrentProtocolVersion = 0x00000001; + + private RntbdConstants() { + } + + public enum RntbdConsistencyLevel { + + Strong((byte)0x00), + BoundedStaleness((byte)0x01), + Session((byte)0x02), + Eventual((byte)0x03), + ConsistentPrefix((byte)0x04), + + Invalid((byte)0xFF); + + private final byte id; + + RntbdConsistencyLevel(final byte id) { + this.id = id; + } + + public byte id() { + return this.id; + } + } + + public enum RntbdContentSerializationFormat { + + JsonText((byte)0x00), + CosmosBinary((byte)0x01), + + Invalid((byte)0xFF); + + private final byte id; + + RntbdContentSerializationFormat(final byte id) { + this.id = id; + } + + public byte id() { + return this.id; + } + } + + @SuppressWarnings("UnstableApiUsage") + enum RntbdContextHeader implements RntbdHeader { + + ProtocolVersion((short)0x0000, RntbdTokenType.ULong, false), + ClientVersion((short)0x0001, RntbdTokenType.SmallString, false), + ServerAgent((short)0x0002, RntbdTokenType.SmallString, true), + ServerVersion((short)0x0003, RntbdTokenType.SmallString, true), + IdleTimeoutInSeconds((short)0x0004, RntbdTokenType.ULong, false), + UnauthenticatedTimeoutInSeconds((short)0x0005, RntbdTokenType.ULong, false); + + public static final ImmutableMap map; + public static final ImmutableSet set = Sets.immutableEnumSet(EnumSet.allOf(RntbdContextHeader.class)); + + static { + final Collector> collector = ImmutableMap.toImmutableMap(RntbdContextHeader::id, h -> h); + map = set.stream().collect(collector); + } + + private final short id; + private final boolean isRequired; + private final RntbdTokenType type; + + RntbdContextHeader(final short id, final RntbdTokenType type, final boolean isRequired) { + this.id = id; + this.type = type; + this.isRequired = isRequired; + } + + public boolean isRequired() { + return this.isRequired; + } + + public short id() { + return this.id; + } + + public RntbdTokenType type() { + return this.type; + } + } + + enum RntbdContextRequestHeader implements RntbdHeader { + + ProtocolVersion((short)0x0000, RntbdTokenType.ULong, true), + ClientVersion((short)0x0001, RntbdTokenType.SmallString, true), + UserAgent((short)0x0002, RntbdTokenType.SmallString, true); + + public static final ImmutableMap map; + public static final ImmutableSet set = Sets.immutableEnumSet(EnumSet.allOf(RntbdContextRequestHeader.class)); + + static { + final Collector> collector = ImmutableMap.toImmutableMap(h -> h.id(), h -> h); + map = set.stream().collect(collector); + } + + private final short id; + private final boolean isRequired; + private final RntbdTokenType type; + + RntbdContextRequestHeader(final short id, final RntbdTokenType type, final boolean isRequired) { + this.id = id; + this.type = type; + this.isRequired = isRequired; + } + + public boolean isRequired() { + return this.isRequired; + } + + public short id() { + return this.id; + } + + public RntbdTokenType type() { + return this.type; + } + } + + public enum RntbdEnumerationDirection { + + Invalid((byte)0x00), + + Forward((byte)0x01), + Reverse((byte)0x02); + + private final byte id; + + RntbdEnumerationDirection(final byte id) { + this.id = id; + } + + public byte id() { + return this.id; + } + } + + public enum RntbdFanoutOperationState { + + Started((byte)0x01), + Completed((byte)0x02); + + private final byte id; + + RntbdFanoutOperationState(final byte id) { + this.id = id; + } + + public byte id() { + return this.id; + } + } + + enum RntbdIndexingDirective { + + Default((byte)0x00), + Include((byte)0x01), + Exclude((byte)0x02), + Invalid((byte)0xFF); + + private final byte id; + + RntbdIndexingDirective(final byte id) { + this.id = id; + } + + public static RntbdIndexingDirective fromId(final byte id) { + switch (id) { + case (byte)0x00: + return Default; + case (byte)0x01: + return Include; + case (byte)0x02: + return Exclude; + case (byte)0xFF: + return Invalid; + } + throw new IllegalArgumentException("id"); + } + + public byte id() { + return this.id; + } + } + + public enum RntbdMigrateCollectionDirective { + + Thaw((byte)0x00), + Freeze((byte)0x01), + + Invalid((byte)0xFF); + + private final byte id; + + RntbdMigrateCollectionDirective(final byte id) { + this.id = id; + } + + public byte id() { + return this.id; + } + } + + enum RntbdOperationType { + + Connection((short)0x0000), + Create((short)0x0001), + Update((short)0x0002), + Read((short)0x0003), + ReadFeed((short)0x0004), + Delete((short)0x0005), + Replace((short)0x0006), + // Obsolete and now undefined: JPathQuery((short)0x0007), + ExecuteJavaScript((short)0x0008), + SQLQuery((short)0x0009), + Pause((short)0x000A), + Resume((short)0x000B), + Stop((short)0x000C), + Recycle((short)0x000D), + Crash((short)0x000E), + Query((short)0x000F), + ForceConfigRefresh((short)0x0010), + Head((short)0x0011), + HeadFeed((short)0x0012), + Upsert((short)0x0013), + Recreate((short)0x0014), + Throttle((short)0x0015), + GetSplitPoint((short)0x0016), + PreCreateValidation((short)0x0017), + BatchApply((short)0x0018), + AbortSplit((short)0x0019), + CompleteSplit((short)0x001A), + OfferUpdateOperation((short)0x001B), + OfferPreGrowValidation((short)0x001C), + BatchReportThroughputUtilization((short)0x001D), + CompletePartitionMigration((short)0x001E), + AbortPartitionMigration((short)0x001F), + PreReplaceValidation((short)0x0020), + AddComputeGatewayRequestCharges((short)0x0021), + MigratePartition((short)0x0022); + + private final short id; + + RntbdOperationType(final short id) { + this.id = id; + } + + public static RntbdOperationType fromId(final short id) throws IllegalArgumentException { + + switch (id) { + case 0x0000: + return RntbdOperationType.Connection; + case 0x0001: + return RntbdOperationType.Create; + case 0x0002: + return RntbdOperationType.Update; + case 0x0003: + return RntbdOperationType.Read; + case 0x0004: + return RntbdOperationType.ReadFeed; + case 0x0005: + return RntbdOperationType.Delete; + case 0x0006: + return RntbdOperationType.Replace; + // Obsolete and now undefined: case 0x0007: return RntbdOperationType.JPathQuery; + case 0x0008: + return RntbdOperationType.ExecuteJavaScript; + case 0x0009: + return RntbdOperationType.SQLQuery; + case 0x000A: + return RntbdOperationType.Pause; + case 0x000B: + return RntbdOperationType.Resume; + case 0x000C: + return RntbdOperationType.Stop; + case 0x000D: + return RntbdOperationType.Recycle; + case 0x000E: + return RntbdOperationType.Crash; + case 0x000F: + return RntbdOperationType.Query; + case 0x0010: + return RntbdOperationType.ForceConfigRefresh; + case 0x0011: + return RntbdOperationType.Head; + case 0x0012: + return RntbdOperationType.HeadFeed; + case 0x0013: + return RntbdOperationType.Upsert; + case 0x0014: + return RntbdOperationType.Recreate; + case 0x0015: + return RntbdOperationType.Throttle; + case 0x0016: + return RntbdOperationType.GetSplitPoint; + case 0x0017: + return RntbdOperationType.PreCreateValidation; + case 0x0018: + return RntbdOperationType.BatchApply; + case 0x0019: + return RntbdOperationType.AbortSplit; + case 0x001A: + return RntbdOperationType.CompleteSplit; + case 0x001B: + return RntbdOperationType.OfferUpdateOperation; + case 0x001C: + return RntbdOperationType.OfferPreGrowValidation; + case 0x001D: + return RntbdOperationType.BatchReportThroughputUtilization; + case 0x001E: + return RntbdOperationType.CompletePartitionMigration; + case 0x001F: + return RntbdOperationType.AbortPartitionMigration; + case 0x0020: + return RntbdOperationType.PreReplaceValidation; + case 0x0021: + return RntbdOperationType.AddComputeGatewayRequestCharges; + case 0x0022: + return RntbdOperationType.MigratePartition; + } + throw new IllegalArgumentException("id"); + } + + public short id() { + return this.id; + } + } + + public enum RntbdReadFeedKeyType { + + Invalid((byte)0x00), + ResourceId((byte)0x01), + EffectivePartitionKey((byte)0x02); + + private final byte id; + + RntbdReadFeedKeyType(final byte id) { + this.id = id; + } + + public byte id() { + return this.id; + } + } + + public enum RntbdRemoteStorageType { + + Invalid((byte)0x00), + NotSpecified((byte)0x01), + Standard((byte)0x02), + Premium((byte)0x03); + + private final byte id; + + RntbdRemoteStorageType(final byte id) { + this.id = id; + } + + public byte id() { + return this.id; + } + } + + public enum RntbdRequestHeader implements RntbdHeader { + + ResourceId((short)0x0000, RntbdTokenType.Bytes, false), + AuthorizationToken((short)0x0001, RntbdTokenType.String, false), + PayloadPresent((short)0x0002, RntbdTokenType.Byte, true), + Date((short)0x0003, RntbdTokenType.SmallString, false), + PageSize((short)0x0004, RntbdTokenType.ULong, false), + SessionToken((short)0x0005, RntbdTokenType.String, false), + ContinuationToken((short)0x0006, RntbdTokenType.String, false), + IndexingDirective((short)0x0007, RntbdTokenType.Byte, false), + Match((short)0x0008, RntbdTokenType.String, false), + PreTriggerInclude((short)0x0009, RntbdTokenType.String, false), + PostTriggerInclude((short)0x000A, RntbdTokenType.String, false), + IsFanout((short)0x000B, RntbdTokenType.Byte, false), + CollectionPartitionIndex((short)0x000C, RntbdTokenType.ULong, false), + CollectionServiceIndex((short)0x000D, RntbdTokenType.ULong, false), + PreTriggerExclude((short)0x000E, RntbdTokenType.String, false), + PostTriggerExclude((short)0x000F, RntbdTokenType.String, false), + ConsistencyLevel((short)0x0010, RntbdTokenType.Byte, false), + EntityId((short)0x0011, RntbdTokenType.String, false), + ResourceSchemaName((short)0x0012, RntbdTokenType.SmallString, false), + ReplicaPath((short)0x0013, RntbdTokenType.String, true), + ResourceTokenExpiry((short)0x0014, RntbdTokenType.ULong, false), + DatabaseName((short)0x0015, RntbdTokenType.String, false), + CollectionName((short)0x0016, RntbdTokenType.String, false), + DocumentName((short)0x0017, RntbdTokenType.String, false), + AttachmentName((short)0x0018, RntbdTokenType.String, false), + UserName((short)0x0019, RntbdTokenType.String, false), + PermissionName((short)0x001A, RntbdTokenType.String, false), + StoredProcedureName((short)0x001B, RntbdTokenType.String, false), + UserDefinedFunctionName((short)0x001C, RntbdTokenType.String, false), + TriggerName((short)0x001D, RntbdTokenType.String, false), + EnableScanInQuery((short)0x001E, RntbdTokenType.Byte, false), + EmitVerboseTracesInQuery((short)0x001F, RntbdTokenType.Byte, false), + ConflictName((short)0x0020, RntbdTokenType.String, false), + BindReplicaDirective((short)0x0021, RntbdTokenType.String, false), + PrimaryMasterKey((short)0x0022, RntbdTokenType.String, false), + SecondaryMasterKey((short)0x0023, RntbdTokenType.String, false), + PrimaryReadonlyKey((short)0x0024, RntbdTokenType.String, false), + SecondaryReadonlyKey((short)0x0025, RntbdTokenType.String, false), + ProfileRequest((short)0x0026, RntbdTokenType.Byte, false), + EnableLowPrecisionOrderBy((short)0x0027, RntbdTokenType.Byte, false), + ClientVersion((short)0x0028, RntbdTokenType.SmallString, false), + CanCharge((short)0x0029, RntbdTokenType.Byte, false), + CanThrottle((short)0x002A, RntbdTokenType.Byte, false), + PartitionKey((short)0x002B, RntbdTokenType.String, false), + PartitionKeyRangeId((short)0x002C, RntbdTokenType.String, false), + NotUsed2D((short)0x002D, RntbdTokenType.Invalid, false), + NotUsed2E((short)0x002E, RntbdTokenType.Invalid, false), + NotUsed2F((short)0x002F, RntbdTokenType.Invalid, false), + // not used 0x0030, + MigrateCollectionDirective((short)0x0031, RntbdTokenType.Byte, false), + NotUsed32((short)0x0032, RntbdTokenType.Invalid, false), + SupportSpatialLegacyCoordinates((short)0x0033, RntbdTokenType.Byte, false), + PartitionCount((short)0x0034, RntbdTokenType.ULong, false), + CollectionRid((short)0x0035, RntbdTokenType.String, false), + PartitionKeyRangeName((short)0x0036, RntbdTokenType.String, false), + // not used((short)0x0037), RoundTripTimeInMsec + // not used((short)0x0038), RequestMessageSentTime + // not used((short)0x0039), RequestMessageTimeOffset + SchemaName((short)0x003A, RntbdTokenType.String, false), + FilterBySchemaRid((short)0x003B, RntbdTokenType.String, false), + UsePolygonsSmallerThanAHemisphere((short)0x003C, RntbdTokenType.Byte, false), + GatewaySignature((short)0x003D, RntbdTokenType.String, false), + EnableLogging((short)0x003E, RntbdTokenType.Byte, false), + A_IM((short)0x003F, RntbdTokenType.String, false), + PopulateQuotaInfo((short)0x0040, RntbdTokenType.Byte, false), + DisableRUPerMinuteUsage((short)0x0041, RntbdTokenType.Byte, false), + PopulateQueryMetrics((short)0x0042, RntbdTokenType.Byte, false), + ResponseContinuationTokenLimitInKb((short)0x0043, RntbdTokenType.ULong, false), + PopulatePartitionStatistics((short)0x0044, RntbdTokenType.Byte, false), + RemoteStorageType((short)0x0045, RntbdTokenType.Byte, false), + CollectionRemoteStorageSecurityIdentifier((short)0x0046, RntbdTokenType.String, false), + IfModifiedSince((short)0x0047, RntbdTokenType.String, false), + PopulateCollectionThroughputInfo((short)0x0048, RntbdTokenType.Byte, false), + RemainingTimeInMsOnClientRequest((short)0x0049, RntbdTokenType.ULong, false), + ClientRetryAttemptCount((short)0x004A, RntbdTokenType.ULong, false), + TargetLsn((short)0x004B, RntbdTokenType.LongLong, false), + TargetGlobalCommittedLsn((short)0x004C, RntbdTokenType.LongLong, false), + TransportRequestID((short)0x004D, RntbdTokenType.ULong, false), + RestoreMetadaFilter((short)0x004E, RntbdTokenType.String, false), + RestoreParams((short)0x004F, RntbdTokenType.String, false), + ShareThroughput((short)0x0050, RntbdTokenType.Byte, false), + PartitionResourceFilter((short)0x0051, RntbdTokenType.String, false), + IsReadOnlyScript((short)0x0052, RntbdTokenType.Byte, false), + IsAutoScaleRequest((short)0x0053, RntbdTokenType.Byte, false), + ForceQueryScan((short)0x0054, RntbdTokenType.Byte, false), + // not used((short)0x0055), LeaseSeqNumber + CanOfferReplaceComplete((short)0x0056, RntbdTokenType.Byte, false), + ExcludeSystemProperties((short)0x0057, RntbdTokenType.Byte, false), + BinaryId((short)0x0058, RntbdTokenType.Bytes, false), + TimeToLiveInSeconds((short)0x0059, RntbdTokenType.Long, false), + EffectivePartitionKey((short)0x005A, RntbdTokenType.Bytes, false), + BinaryPassthroughRequest((short)0x005B, RntbdTokenType.Byte, false), + UserDefinedTypeName((short)0x005C, RntbdTokenType.String, false), + EnableDynamicRidRangeAllocation((short)0x005D, RntbdTokenType.Byte, false), + EnumerationDirection((short)0x005E, RntbdTokenType.Byte, false), + StartId((short)0x005F, RntbdTokenType.Bytes, false), + EndId((short)0x0060, RntbdTokenType.Bytes, false), + FanoutOperationState((short)0x0061, RntbdTokenType.Byte, false), + StartEpk((short)0x0062, RntbdTokenType.Bytes, false), + EndEpk((short)0x0063, RntbdTokenType.Bytes, false), + ReadFeedKeyType((short)0x0064, RntbdTokenType.Byte, false), + ContentSerializationFormat((short)0x0065, RntbdTokenType.Byte, false), + AllowTentativeWrites((short)0x0066, RntbdTokenType.Byte, false), + IsUserRequest((short)0x0067, RntbdTokenType.Byte, false), + SharedOfferThroughput((short)0x0068, RntbdTokenType.ULong, false); + + public static final ImmutableMap map; + public static final ImmutableSet set = Sets.immutableEnumSet(EnumSet.allOf(RntbdRequestHeader.class)); + + static { + final Collector> collector = ImmutableMap.toImmutableMap(RntbdRequestHeader::id, h -> h); + map = set.stream().collect(collector); + } + + private final short id; + private final boolean isRequired; + private final RntbdTokenType type; + + RntbdRequestHeader(final short id, final RntbdTokenType type, final boolean isRequired) { + this.id = id; + this.type = type; + this.isRequired = isRequired; + } + + public boolean isRequired() { + return this.isRequired; + } + + public short id() { + return this.id; + } + + public RntbdTokenType type() { + return this.type; + } + } + + enum RntbdResourceType { + + Connection((short)0x0000), + Database((short)0x0001), + Collection((short)0x0002), + Document((short)0x0003), + Attachment((short)0x0004), + User((short)0x0005), + Permission((short)0x0006), + StoredProcedure((short)0x0007), + Conflict((short)0x0008), + Trigger((short)0x0009), + UserDefinedFunction((short)0x000A), + Module((short)0x000B), + Replica((short)0x000C), + ModuleCommand((short)0x000D), + Record((short)0x000E), + Offer((short)0x000F), + PartitionSetInformation((short)0x0010), + XPReplicatorAddress((short)0x0011), + MasterPartition((short)0x0012), + ServerPartition((short)0x0013), + DatabaseAccount((short)0x0014), + Topology((short)0x0015), + PartitionKeyRange((short)0x0016), + // Obsolete and now undefined: Timestamp((short)0x0017), + Schema((short)0x0018), + BatchApply((short)0x0019), + RestoreMetadata((short)0x001A), + ComputeGatewayCharges((short)0x001B), + RidRange((short)0x001C), + UserDefinedType((short)0x001D); + + private final short id; + + RntbdResourceType(final short id) { + this.id = id; + } + + public static RntbdResourceType fromId(final short id) throws IllegalArgumentException { + switch (id) { + case 0x0000: + return RntbdResourceType.Connection; + case 0x0001: + return RntbdResourceType.Database; + case 0x0002: + return RntbdResourceType.Collection; + case 0x0003: + return RntbdResourceType.Document; + case 0x0004: + return RntbdResourceType.Attachment; + case 0x0005: + return RntbdResourceType.User; + case 0x0006: + return RntbdResourceType.Permission; + case 0x0007: + return RntbdResourceType.StoredProcedure; + case 0x0008: + return RntbdResourceType.Conflict; + case 0x0009: + return RntbdResourceType.Trigger; + case 0x000A: + return RntbdResourceType.UserDefinedFunction; + case 0x000B: + return RntbdResourceType.Module; + case 0x000C: + return RntbdResourceType.Replica; + case 0x000D: + return RntbdResourceType.ModuleCommand; + case 0x000E: + return RntbdResourceType.Record; + case 0x000F: + return RntbdResourceType.Offer; + case 0x0010: + return RntbdResourceType.PartitionSetInformation; + case 0x0011: + return RntbdResourceType.XPReplicatorAddress; + case 0x0012: + return RntbdResourceType.MasterPartition; + case 0x0013: + return RntbdResourceType.ServerPartition; + case 0x0014: + return RntbdResourceType.DatabaseAccount; + case 0x0015: + return RntbdResourceType.Topology; + case 0x0016: + return RntbdResourceType.PartitionKeyRange; + // Obsolete and now undefined: case 0x0017: return RntbdResourceType.Timestamp; + case 0x0018: + return RntbdResourceType.Schema; + case 0x0019: + return RntbdResourceType.BatchApply; + case 0x001A: + return RntbdResourceType.RestoreMetadata; + case 0x001B: + return RntbdResourceType.ComputeGatewayCharges; + case 0x001C: + return RntbdResourceType.RidRange; + case 0x001D: + return RntbdResourceType.UserDefinedType; + } + throw new IllegalArgumentException(String.format("id: %d", id)); + } + + public short id() { + return this.id; + } + } + + public enum RntbdResponseHeader implements RntbdHeader { + + PayloadPresent((short)0x0000, RntbdTokenType.Byte, true), + // not used((short)0x0001), + LastStateChangeDateTime((short)0x0002, RntbdTokenType.SmallString, false), + ContinuationToken((short)0x0003, RntbdTokenType.String, false), + ETag((short)0x0004, RntbdTokenType.String, false), + // not used((short)0x005,) + // not used((short)0x006,) + ReadsPerformed((short)0x0007, RntbdTokenType.ULong, false), + WritesPerformed((short)0x0008, RntbdTokenType.ULong, false), + QueriesPerformed((short)0x0009, RntbdTokenType.ULong, false), + IndexTermsGenerated((short)0x000A, RntbdTokenType.ULong, false), + ScriptsExecuted((short)0x000B, RntbdTokenType.ULong, false), + RetryAfterMilliseconds((short)0x000C, RntbdTokenType.ULong, false), + IndexingDirective((short)0x000D, RntbdTokenType.Byte, false), + StorageMaxResoureQuota((short)0x000E, RntbdTokenType.String, false), + StorageResourceQuotaUsage((short)0x000F, RntbdTokenType.String, false), + SchemaVersion((short)0x0010, RntbdTokenType.SmallString, false), + CollectionPartitionIndex((short)0x0011, RntbdTokenType.ULong, false), + CollectionServiceIndex((short)0x0012, RntbdTokenType.ULong, false), + LSN((short)0x0013, RntbdTokenType.LongLong, false), + ItemCount((short)0x0014, RntbdTokenType.ULong, false), + RequestCharge((short)0x0015, RntbdTokenType.Double, false), + // not used((short)0x0016), + OwnerFullName((short)0x0017, RntbdTokenType.String, false), + OwnerId((short)0x0018, RntbdTokenType.String, false), + DatabaseAccountId((short)0x0019, RntbdTokenType.String, false), + QuorumAckedLSN((short)0x001A, RntbdTokenType.LongLong, false), + RequestValidationFailure((short)0x001B, RntbdTokenType.Byte, false), + SubStatus((short)0x001C, RntbdTokenType.ULong, false), + CollectionUpdateProgress((short)0x001D, RntbdTokenType.ULong, false), + CurrentWriteQuorum((short)0x001E, RntbdTokenType.ULong, false), + CurrentReplicaSetSize((short)0x001F, RntbdTokenType.ULong, false), + CollectionLazyIndexProgress((short)0x0020, RntbdTokenType.ULong, false), + PartitionKeyRangeId((short)0x0021, RntbdTokenType.String, false), + // not used((short)0x0022), RequestMessageReceivedTime + // not used((short)0x0023), ResponseMessageSentTime + // not used((short)0x0024), ResponseMessageTimeOffset + LogResults((short)0x0025, RntbdTokenType.String, false), + XPRole((short)0x0026, RntbdTokenType.ULong, false), + IsRUPerMinuteUsed((short)0x0027, RntbdTokenType.Byte, false), + QueryMetrics((short)0x0028, RntbdTokenType.String, false), + GlobalCommittedLSN((short)0x0029, RntbdTokenType.LongLong, false), + NumberOfReadRegions((short)0x0030, RntbdTokenType.ULong, false), + OfferReplacePending((short)0x0031, RntbdTokenType.Byte, false), + ItemLSN((short)0x0032, RntbdTokenType.LongLong, false), + RestoreState((short)0x0033, RntbdTokenType.String, false), + CollectionSecurityIdentifier((short)0x0034, RntbdTokenType.String, false), + TransportRequestID((short)0x0035, RntbdTokenType.ULong, false), + ShareThroughput((short)0x0036, RntbdTokenType.Byte, false), + // not used((short)0x0037), LeaseSeqNumber + DisableRntbdChannel((short)0x0038, RntbdTokenType.Byte, false), + ServerDateTimeUtc((short)0x0039, RntbdTokenType.SmallString, false), + LocalLSN((short)0x003A, RntbdTokenType.LongLong, false), + QuorumAckedLocalLSN((short)0x003B, RntbdTokenType.LongLong, false), + ItemLocalLSN((short)0x003C, RntbdTokenType.LongLong, false), + HasTentativeWrites((short)0x003D, RntbdTokenType.Byte, false), + SessionToken((short)0x003E, RntbdTokenType.String, false); + + public static final ImmutableMap map; + public static final ImmutableSet set = Sets.immutableEnumSet(EnumSet.allOf(RntbdResponseHeader.class)); + + static { + final Collector> collector = ImmutableMap.toImmutableMap(RntbdResponseHeader::id, header -> header); + map = set.stream().collect(collector); + } + + private final short id; + private final boolean isRequired; + private final RntbdTokenType type; + + RntbdResponseHeader(final short id, final RntbdTokenType type, final boolean isRequired) { + this.id = id; + this.type = type; + this.isRequired = isRequired; + } + + public boolean isRequired() { + return this.isRequired; + } + + public short id() { + return this.id; + } + + public RntbdTokenType type() { + return this.type; + } + } + + interface RntbdHeader { + + boolean isRequired(); + + short id(); + + String name(); + + RntbdTokenType type(); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdContext.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdContext.java new file mode 100644 index 0000000000000..10f2fd894fb5f --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdContext.java @@ -0,0 +1,207 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import com.azure.data.cosmos.internal.directconnectivity.ServerProperties; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.node.ObjectNode; +import io.netty.buffer.ByteBuf; +import io.netty.handler.codec.http.HttpResponseStatus; + +import java.util.Collections; +import java.util.HashMap; +import java.util.UUID; + +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.CurrentProtocolVersion; +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdContextHeader; +import static com.google.common.base.Preconditions.checkState; + +public final class RntbdContext { + + private final RntbdResponseStatus frame; + private final Headers headers; + private ServerProperties serverProperties; + + private RntbdContext(final RntbdResponseStatus frame, final Headers headers) { + this.frame = frame; + this.headers = headers; + } + + @JsonProperty + public UUID getActivityId() { + return this.frame.getActivityId(); + } + + @JsonProperty + public String getClientVersion() { + return this.headers.clientVersion.getValue(String.class); + } + + @JsonProperty + public long getIdleTimeoutInSeconds() { + return this.headers.idleTimeoutInSeconds.getValue(Long.class); + } + + @JsonProperty + public int getProtocolVersion() { + return this.headers.protocolVersion.getValue(Long.class).intValue(); + } + + @JsonProperty + public ServerProperties getServerProperties() { + return this.serverProperties == null ? (this.serverProperties = new ServerProperties( + this.headers.serverAgent.getValue(String.class), + this.headers.serverVersion.getValue(String.class)) + ) : this.serverProperties; + } + + @JsonIgnore + public String getServerVersion() { + return this.headers.serverVersion.getValue(String.class); + } + + @JsonProperty + public int getStatusCode() { + return this.frame.getStatusCode(); + } + + @JsonProperty + public long getUnauthenticatedTimeoutInSeconds() { + return this.headers.unauthenticatedTimeoutInSeconds.getValue(Long.class); + } + + public static RntbdContext decode(final ByteBuf in) { + + in.markReaderIndex(); + + final RntbdResponseStatus frame = RntbdResponseStatus.decode(in); + final int statusCode = frame.getStatusCode(); + final int headersLength = frame.getHeadersLength(); + + if (statusCode < 200 || statusCode >= 400) { + if (!RntbdFramer.canDecodePayload(in, in.readerIndex() + headersLength)) { + in.resetReaderIndex(); + return null; + } + } + + final Headers headers = Headers.decode(in.readSlice(headersLength)); + + if (statusCode < 200 || statusCode >= 400) { + + final ObjectNode details = RntbdObjectMapper.readTree(in.readSlice(in.readIntLE())); + final HashMap map = new HashMap<>(4); + + if (headers.clientVersion.isPresent()) { + map.put("requiredClientVersion", headers.clientVersion.getValue()); + } + + if (headers.protocolVersion.isPresent()) { + map.put("requiredProtocolVersion", headers.protocolVersion.getValue()); + } + + if (headers.serverAgent.isPresent()) { + map.put("serverAgent", headers.serverAgent.getValue()); + } + + if (headers.serverVersion.isPresent()) { + map.put("serverVersion", headers.serverVersion.getValue()); + } + + throw new RntbdContextException(frame.getStatus(), details, Collections.unmodifiableMap(map)); + } + + return new RntbdContext(frame, headers); + } + + public void encode(final ByteBuf out) { + + final int start = out.writerIndex(); + + this.frame.encode(out); + this.headers.encode(out); + + final int length = out.writerIndex() - start; + checkState(length == this.frame.getLength()); + } + + public static RntbdContext from(final RntbdContextRequest request, final ServerProperties properties, final HttpResponseStatus status) { + + // NOTE TO CODE REVIEWERS + // ---------------------- + // In its current form this method is meant to enable a limited set of test scenarios. It will be revised as + // required to support test scenarios as they are developed. + + final Headers headers = new Headers(); + + headers.clientVersion.setValue(request.getClientVersion()); + headers.idleTimeoutInSeconds.setValue(0); + headers.protocolVersion.setValue(CurrentProtocolVersion); + headers.serverAgent.setValue(properties.getAgent()); + headers.serverVersion.setValue(properties.getVersion()); + headers.unauthenticatedTimeoutInSeconds.setValue(0); + + final int length = RntbdResponseStatus.LENGTH + headers.computeLength(); + final UUID activityId = request.getActivityId(); + + final RntbdResponseStatus frame = new RntbdResponseStatus(length, status, activityId); + + return new RntbdContext(frame, headers); + } + + @Override + public String toString() { + return RntbdObjectMapper.toJson(this); + } + + private static final class Headers extends RntbdTokenStream { + + RntbdToken clientVersion; + RntbdToken idleTimeoutInSeconds; + RntbdToken protocolVersion; + RntbdToken serverAgent; + RntbdToken serverVersion; + RntbdToken unauthenticatedTimeoutInSeconds; + + Headers() { + + super(RntbdContextHeader.set, RntbdContextHeader.map); + + this.clientVersion = this.get(RntbdContextHeader.ClientVersion); + this.idleTimeoutInSeconds = this.get(RntbdContextHeader.IdleTimeoutInSeconds); + this.protocolVersion = this.get(RntbdContextHeader.ProtocolVersion); + this.serverAgent = this.get(RntbdContextHeader.ServerAgent); + this.serverVersion = this.get(RntbdContextHeader.ServerVersion); + this.unauthenticatedTimeoutInSeconds = this.get(RntbdContextHeader.UnauthenticatedTimeoutInSeconds); + } + + static Headers decode(final ByteBuf in) { + final Headers headers = new Headers(); + Headers.decode(in, headers); + return headers; + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdContextDecoder.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdContextDecoder.java new file mode 100644 index 0000000000000..d874549ccd907 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdContextDecoder.java @@ -0,0 +1,69 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.ByteToMessageDecoder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; + +class RntbdContextDecoder extends ByteToMessageDecoder { + + private static final Logger logger = LoggerFactory.getLogger(RntbdContextDecoder.class); + + /** + * Deserialize from an input {@link ByteBuf} to an {@link RntbdContext} instance + *

+ * This method decodes an {@link RntbdContext} or {@link RntbdContextException} instance and fires a user event. + * + * @param context the {@link ChannelHandlerContext} to which this {@link RntbdContextDecoder} belongs + * @param in the {@link ByteBuf} from which to readTree data + * @param out the {@link List} to which decoded messages should be added + */ + @Override + protected void decode(final ChannelHandlerContext context, final ByteBuf in, final List out) { + + if (RntbdFramer.canDecodeHead(in)) { + + Object result; + + try { + final RntbdContext rntbdContext = RntbdContext.decode(in); + context.fireUserEventTriggered(rntbdContext); + result = rntbdContext; + } catch (RntbdContextException error) { + context.fireUserEventTriggered(error); + result = error; + } finally { + in.discardReadBytes(); + } + + logger.debug("{} DECODE COMPLETE: {}", context.channel(), result); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdContextException.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdContextException.java new file mode 100644 index 0000000000000..992e7664040f1 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdContextException.java @@ -0,0 +1,61 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosError; +import com.azure.data.cosmos.internal.directconnectivity.TransportException; +import com.fasterxml.jackson.databind.node.ObjectNode; +import io.netty.handler.codec.http.HttpResponseStatus; + +import java.util.Map; + +public final class RntbdContextException extends TransportException { + + final private CosmosError cosmosError; + final private Map responseHeaders; + final private HttpResponseStatus status; + + RntbdContextException(HttpResponseStatus status, ObjectNode details, Map responseHeaders) { + + super(status + ": " + details, null); + + this.cosmosError = BridgeInternal.createCosmosError(details); + this.responseHeaders = responseHeaders; + this.status = status; + } + + public CosmosError getCosmosError() { + return cosmosError; + } + + public Map getResponseHeaders() { + return responseHeaders; + } + + public HttpResponseStatus getStatus() { + return status; + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdContextNegotiator.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdContextNegotiator.java new file mode 100644 index 0000000000000..58aa970ee3e73 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdContextNegotiator.java @@ -0,0 +1,120 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import com.azure.data.cosmos.internal.UserAgentContainer; +import com.azure.data.cosmos.internal.Utils; +import io.netty.buffer.ByteBuf; +import io.netty.channel.Channel; +import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.ChannelPromise; +import io.netty.channel.CombinedChannelDuplexHandler; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.CompletableFuture; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; + +public final class RntbdContextNegotiator extends CombinedChannelDuplexHandler { + + private static final Logger logger = LoggerFactory.getLogger(RntbdContextNegotiator.class); + private final RntbdRequestManager manager; + private final UserAgentContainer userAgent; + + private volatile boolean pendingRntbdContextRequest = true; + + public RntbdContextNegotiator(final RntbdRequestManager manager, final UserAgentContainer userAgent) { + + super(new RntbdContextDecoder(), new RntbdContextRequestEncoder()); + + checkNotNull(manager, "manager"); + checkNotNull(userAgent, "userAgent"); + + this.manager = manager; + this.userAgent = userAgent; + } + + /** + * Called once a write operation is made. The write operation will write the messages through the + * {@link ChannelPipeline}. Those are then ready to be flushed to the actual {@link Channel} once + * {@link Channel#flush()} is called + * + * @param context the {@link ChannelHandlerContext} for which the write operation is made + * @param message the message to write + * @param promise the {@link ChannelPromise} to notify once the operation completes + * @throws Exception thrown if an error occurs + */ + @Override + public void write( + final ChannelHandlerContext context, final Object message, final ChannelPromise promise + ) throws Exception { + + checkArgument(message instanceof ByteBuf, "message: %s", message.getClass()); + final ByteBuf out = (ByteBuf)message; + + if (this.manager.hasRntbdContext()) { + context.writeAndFlush(out, promise); + } else { + if (this.pendingRntbdContextRequest) { + // Thread safe: netty guarantees that no channel handler methods are called concurrently + this.startRntbdContextRequest(context); + this.pendingRntbdContextRequest = false; + } + this.manager.pendWrite(out, promise); + } + } + + // region Privates + + private void startRntbdContextRequest(final ChannelHandlerContext context) throws Exception { + + logger.debug("{} START CONTEXT REQUEST", context.channel()); + + final Channel channel = context.channel(); + final RntbdContextRequest request = new RntbdContextRequest(Utils.randomUUID(), this.userAgent); + final CompletableFuture contextRequestFuture = this.manager.getRntbdContextRequestFuture(); + + super.write(context, request, channel.newPromise().addListener((ChannelFutureListener)future -> { + + if (future.isSuccess()) { + contextRequestFuture.complete(request); + return; + } + + if (future.isCancelled()) { + contextRequestFuture.cancel(true); + return; + } + + contextRequestFuture.completeExceptionally(future.cause()); + })); + } + + // endregion +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdContextRequest.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdContextRequest.java new file mode 100644 index 0000000000000..eb439ab06c67f --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdContextRequest.java @@ -0,0 +1,158 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import com.azure.data.cosmos.internal.UserAgentContainer; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectWriter; +import io.netty.buffer.ByteBuf; +import io.netty.handler.codec.CorruptedFrameException; + +import java.nio.charset.StandardCharsets; +import java.util.UUID; + +import static com.azure.data.cosmos.internal.HttpConstants.Versions; +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.CurrentProtocolVersion; +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdContextRequestHeader; +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdOperationType; +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdResourceType; + +public final class RntbdContextRequest { + + @JsonProperty + private final UUID activityId; + + @JsonProperty + private final Headers headers; + + RntbdContextRequest(final UUID activityId, final UserAgentContainer userAgent) { + this(activityId, new Headers(userAgent)); + } + + private RntbdContextRequest(final UUID activityId, final Headers headers) { + this.activityId = activityId; + this.headers = headers; + } + + public UUID getActivityId() { + return this.activityId; + } + + public String getClientVersion() { + return this.headers.clientVersion.getValue(String.class); + } + + public static RntbdContextRequest decode(final ByteBuf in) { + + final int resourceOperationTypeCode = in.getInt(in.readerIndex() + Integer.BYTES); + + if (resourceOperationTypeCode != 0) { + final String reason = String.format("resourceOperationCode=0x%08X", resourceOperationTypeCode); + throw new IllegalStateException(reason); + } + + final int start = in.readerIndex(); + final int expectedLength = in.readIntLE(); + + final RntbdRequestFrame header = RntbdRequestFrame.decode(in); + final Headers headers = Headers.decode(in.readSlice(expectedLength - (in.readerIndex() - start))); + + final int observedLength = in.readerIndex() - start; + + if (observedLength != expectedLength) { + final String reason = String.format("expectedLength=%d, observeredLength=%d", expectedLength, observedLength); + throw new IllegalStateException(reason); + } + + in.discardReadBytes(); + return new RntbdContextRequest(header.getActivityId(), headers); + } + + public void encode(final ByteBuf out) { + + final int expectedLength = RntbdRequestFrame.LENGTH + this.headers.computeLength(); + final int start = out.writerIndex(); + + out.writeIntLE(expectedLength); + + final RntbdRequestFrame header = new RntbdRequestFrame(this.getActivityId(), RntbdOperationType.Connection, RntbdResourceType.Connection); + header.encode(out); + this.headers.encode(out); + + final int observedLength = out.writerIndex() - start; + + if (observedLength != expectedLength) { + final String reason = String.format("expectedLength=%d, observeredLength=%d", expectedLength, observedLength); + throw new IllegalStateException(reason); + } + } + + @Override + public String toString() { + final ObjectWriter writer = RntbdObjectMapper.writer(); + try { + return writer.writeValueAsString(this); + } catch (final JsonProcessingException error) { + throw new CorruptedFrameException(error); + } + } + + private static final class Headers extends RntbdTokenStream { + + private static final byte[] ClientVersion = Versions.CURRENT_VERSION.getBytes(StandardCharsets.UTF_8); + + @JsonProperty + RntbdToken clientVersion; + + @JsonProperty + RntbdToken protocolVersion; + + @JsonProperty + RntbdToken userAgent; + + Headers(final UserAgentContainer container) { + + this(); + + this.clientVersion.setValue(ClientVersion); + this.protocolVersion.setValue(CurrentProtocolVersion); + this.userAgent.setValue(container.getUserAgent()); + } + + private Headers() { + + super(RntbdContextRequestHeader.set, RntbdContextRequestHeader.map); + + this.clientVersion = this.get(RntbdContextRequestHeader.ClientVersion); + this.protocolVersion = this.get(RntbdContextRequestHeader.ProtocolVersion); + this.userAgent = this.get(RntbdContextRequestHeader.UserAgent); + } + + static Headers decode(final ByteBuf in) { + return Headers.decode(in, new Headers()); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdContextRequestDecoder.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdContextRequestDecoder.java new file mode 100644 index 0000000000000..473b98fc8ace8 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdContextRequestDecoder.java @@ -0,0 +1,90 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.ByteToMessageDecoder; + +import java.util.List; + +public class RntbdContextRequestDecoder extends ByteToMessageDecoder { + + public RntbdContextRequestDecoder() { + this.setSingleDecode(true); + } + + /** + * Prepare for decoding an @{link RntbdContextRequest} or fire a channel readTree event to pass the input message along + * + * @param context the {@link ChannelHandlerContext} which this {@link ByteToMessageDecoder} belongs to + * @param message the message to be decoded + * @throws Exception thrown if an error occurs + */ + @Override + public void channelRead(final ChannelHandlerContext context, final Object message) throws Exception { + + if (message instanceof ByteBuf) { + + final ByteBuf in = (ByteBuf)message; + final int resourceOperationType = in.getInt(in.readerIndex() + Integer.BYTES); + + if (resourceOperationType == 0) { + assert this.isSingleDecode(); + super.channelRead(context, message); + return; + } + } + context.fireChannelRead(message); + } + + /** + * Decode an RntbdContextRequest from an {@link ByteBuf} stream + *

+ * This method will be called till either an input {@link ByteBuf} has nothing to readTree on return from this method or + * till nothing is readTree from the input {@link ByteBuf}. + * + * @param context the {@link ChannelHandlerContext} which this {@link ByteToMessageDecoder} belongs to + * @param in the {@link ByteBuf} from which to readTree data + * @param out the {@link List} to which decoded messages should be added + * @throws IllegalStateException thrown if an error occurs + */ + @Override + protected void decode(final ChannelHandlerContext context, final ByteBuf in, final List out) throws IllegalStateException { + + final RntbdContextRequest request; + in.markReaderIndex(); + + try { + request = RntbdContextRequest.decode(in); + } catch (final IllegalStateException error) { + in.resetReaderIndex(); + throw error; + } + + in.discardReadBytes(); + out.add(request); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdContextRequestEncoder.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdContextRequestEncoder.java new file mode 100644 index 0000000000000..02d1f1b13a803 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdContextRequestEncoder.java @@ -0,0 +1,75 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.MessageToByteEncoder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +final class RntbdContextRequestEncoder extends MessageToByteEncoder { + + private static final Logger Logger = LoggerFactory.getLogger(RntbdContextRequestEncoder.class); + + /** + * Returns {@code true} if the given message is an @{link RntbdContextRequest} instance + *

+ * If {@code false} this message should be passed to the next @{link ChannelOutboundHandler} in the pipeline. + * + * @param message the message to encode + * @return @{code true}, if the given message is an an @{link RntbdContextRequest} instance; otherwise @{false} + */ + @Override + public boolean acceptOutboundMessage(final Object message) { + return message instanceof RntbdContextRequest; + } + + /** + * Encode an @{link RntbdContextRequest} message into a {@link ByteBuf} + *

+ * This method will be called for each written message that can be handled by this encoder. + * + * @param context the {@link ChannelHandlerContext} which this {@link MessageToByteEncoder} belongs to + * @param message the message to encode + * @param out the {@link ByteBuf} into which the encoded message will be written + * @throws IllegalStateException is thrown if an error occurs + */ + @Override + protected void encode(final ChannelHandlerContext context, final Object message, final ByteBuf out) throws IllegalStateException { + + final RntbdContextRequest request = (RntbdContextRequest)message; + out.markWriterIndex(); + + try { + request.encode(out); + } catch (final IllegalStateException error) { + out.resetWriterIndex(); + throw error; + } + + Logger.debug("{}: ENCODE COMPLETE: request={}", context.channel(), request); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdEndpoint.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdEndpoint.java new file mode 100644 index 0000000000000..d7c10dd127988 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdEndpoint.java @@ -0,0 +1,119 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import com.azure.data.cosmos.internal.UserAgentContainer; +import io.netty.handler.logging.LogLevel; +import io.netty.handler.ssl.SslContext; + +import java.net.URI; +import java.util.stream.Stream; + +import static com.azure.data.cosmos.internal.directconnectivity.RntbdTransportClient.Options; +import static com.google.common.base.Preconditions.checkNotNull; + +public interface RntbdEndpoint extends AutoCloseable { + + String getName(); + + @Override + void close() throws RuntimeException; + + RntbdRequestRecord request(RntbdRequestArgs requestArgs); + + interface Provider extends AutoCloseable { + + @Override + void close() throws RuntimeException; + + Config config(); + + int count(); + + RntbdEndpoint get(URI physicalAddress); + + Stream list(); + } + + final class Config { + + private final Options options; + private final SslContext sslContext; + private final LogLevel wireLogLevel; + + public Config(final Options options, final SslContext sslContext, final LogLevel wireLogLevel) { + + checkNotNull(options, "options"); + checkNotNull(sslContext, "sslContext"); + + this.options = options; + this.sslContext = sslContext; + this.wireLogLevel = wireLogLevel; + } + + public int getConnectionTimeout() { + final long value = this.options.getConnectionTimeout().toMillis(); + assert value <= Integer.MAX_VALUE; + return (int)value; + } + + public int getMaxChannelsPerEndpoint() { + return this.options.getMaxChannelsPerEndpoint(); + } + + public int getMaxRequestsPerChannel() { + return this.options.getMaxRequestsPerChannel(); + } + + public long getReceiveHangDetectionTime() { + return this.options.getReceiveHangDetectionTime().toNanos(); + } + + public long getRequestTimeout() { + return this.options.getRequestTimeout().toNanos(); + } + + public long getSendHangDetectionTime() { + return this.options.getSendHangDetectionTime().toNanos(); + } + + public SslContext getSslContext() { + return this.sslContext; + } + + public UserAgentContainer getUserAgent() { + return this.options.getUserAgent(); + } + + public LogLevel getWireLogLevel() { + return this.wireLogLevel; + } + + @Override + public String toString() { + return RntbdObjectMapper.toJson(this); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdFramer.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdFramer.java new file mode 100644 index 0000000000000..c856587132d0b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdFramer.java @@ -0,0 +1,96 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import io.netty.buffer.ByteBuf; +import io.netty.handler.codec.CorruptedFrameException; + +import static com.google.common.base.Preconditions.checkNotNull; + +final class RntbdFramer { + + private RntbdFramer() { + } + + static boolean canDecodeHead(final ByteBuf in) throws CorruptedFrameException { + + checkNotNull(in, "in"); + + if (in.readableBytes() < RntbdResponseStatus.LENGTH) { + return false; + } + + final int start = in.readerIndex(); + final long length = in.getUnsignedIntLE(start); + + if (length > Integer.MAX_VALUE) { + final String reason = String.format("Head frame length exceeds Integer.MAX_VALUE, %d: %d", + Integer.MAX_VALUE, length + ); + throw new CorruptedFrameException(reason); + } + + if (length < Integer.BYTES) { + final String reason = String.format("Head frame length is less than size of length field, %d: %d", + Integer.BYTES, length + ); + throw new CorruptedFrameException(reason); + } + + return length <= in.readableBytes(); + } + + static boolean canDecodePayload(final ByteBuf in, final int start) { + + checkNotNull(in, "in"); + + final int readerIndex = in.readerIndex(); + + if (start < readerIndex) { + throw new IllegalArgumentException("start < in.readerIndex()"); + } + + final int offset = start - readerIndex; + + if (in.readableBytes() - offset < Integer.BYTES) { + return false; + } + + final long length = in.getUnsignedIntLE(start); + + if (length > Integer.MAX_VALUE) { + final String reason = String.format("Payload frame length exceeds Integer.MAX_VALUE, %d: %d", + Integer.MAX_VALUE, length + ); + throw new CorruptedFrameException(reason); + } + + return offset + Integer.BYTES + length <= in.readableBytes(); + } + + static boolean canDecodePayload(final ByteBuf in) { + return canDecodePayload(in, in.readerIndex()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdMetrics.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdMetrics.java new file mode 100644 index 0000000000000..e7aee53bc5d63 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdMetrics.java @@ -0,0 +1,159 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import com.codahale.metrics.Gauge; +import com.codahale.metrics.Meter; +import com.codahale.metrics.MetricFilter; +import com.codahale.metrics.MetricRegistry; +import com.codahale.metrics.RatioGauge; +import com.fasterxml.jackson.annotation.JsonPropertyOrder; +import com.google.common.base.Stopwatch; + +import java.time.Duration; + +@JsonPropertyOrder({ + "lifetime", "requests", "responses", "errorResponses", "responseRate", "completionRate", "throughput" +}) +public final class RntbdMetrics implements AutoCloseable { + + // region Fields + + private static final MetricRegistry registry = new MetricRegistry(); + + private final Gauge completionRate; + private final Meter errorResponses; + private final Stopwatch lifetime; + private final String prefix; + private final Meter requests; + private final Gauge responseRate; + private final Meter responses; + + // endregion + + // region Constructors + + public RntbdMetrics(final String name) { + + this.lifetime = Stopwatch.createStarted(); + this.prefix = name + '.'; + + this.requests = registry.register(this.prefix + "requests", new Meter()); + this.responses = registry.register(this.prefix + "responses", new Meter()); + this.errorResponses = registry.register(this.prefix + "errorResponses", new Meter()); + this.responseRate = registry.register(this.prefix + "responseRate", new ResponseRate(this)); + this.completionRate = registry.register(this.prefix + "completionRate", new CompletionRate(this)); + } + + // endregion + + // region Accessors + + public double getCompletionRate() { + return this.completionRate.getValue(); + } + + public long getErrorResponses() { + return this.errorResponses.getCount(); + } + + public double getLifetime() { + final Duration elapsed = this.lifetime.elapsed(); + return elapsed.getSeconds() + (1E-9D * elapsed.getNano()); + } + + public long getRequests() { + return this.requests.getCount(); + } + + public double getResponseRate() { + return this.responseRate.getValue(); + } + + public long getResponses() { + return this.responses.getCount(); + } + + public double getThroughput() { + return this.responses.getMeanRate(); + } + + // endregion + + // region Methods + + @Override + public void close() { + registry.removeMatching(MetricFilter.startsWith(this.prefix)); + } + + public final void incrementErrorResponseCount() { + this.errorResponses.mark(); + } + + public final void incrementRequestCount() { + this.requests.mark(); + } + + public final void incrementResponseCount() { + this.responses.mark(); + } + + @Override + public String toString() { + return RntbdObjectMapper.toJson(this); + } + + // endregion + + private static final class CompletionRate extends RatioGauge { + + private final RntbdMetrics metrics; + + private CompletionRate(RntbdMetrics metrics) { + this.metrics = metrics; + } + + @Override + protected Ratio getRatio() { + return Ratio.of(this.metrics.responses.getCount() - this.metrics.errorResponses.getCount(), + this.metrics.requests.getCount()); + } + } + + private static final class ResponseRate extends RatioGauge { + + private final RntbdMetrics metrics; + + private ResponseRate(RntbdMetrics metrics) { + this.metrics = metrics; + } + + @Override + protected Ratio getRatio() { + return Ratio.of(this.metrics.responses.getCount(), this.metrics.requests.getCount()); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdObjectMapper.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdObjectMapper.java new file mode 100644 index 0000000000000..5b67ecf321514 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdObjectMapper.java @@ -0,0 +1,106 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectWriter; +import com.fasterxml.jackson.databind.node.JsonNodeType; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.fasterxml.jackson.databind.ser.PropertyFilter; +import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufInputStream; +import io.netty.handler.codec.CorruptedFrameException; +import io.netty.handler.codec.EncoderException; + +import java.io.IOException; +import java.io.InputStream; + +import static com.google.common.base.Preconditions.checkNotNull; + +public final class RntbdObjectMapper { + + private static final SimpleFilterProvider filterProvider; + private static final ObjectMapper objectMapper; + private static final ObjectWriter objectWriter; + + static { + objectMapper = new ObjectMapper().setFilterProvider(filterProvider = new SimpleFilterProvider()); + objectWriter = objectMapper.writer(); + } + + private RntbdObjectMapper() { + } + + static ObjectNode readTree(final RntbdResponse response) { + checkNotNull(response, "response"); + return readTree(response.getContent()); + } + + static ObjectNode readTree(final ByteBuf in) { + + checkNotNull(in, "in"); + final JsonNode node; + + try (final InputStream istream = new ByteBufInputStream(in)) { + node = objectMapper.readTree(istream); + } catch (final IOException error) { + throw new CorruptedFrameException(error); + } + + if (node.isObject()) { + return (ObjectNode)node; + } + + final String cause = String.format("Expected %s, not %s", JsonNodeType.OBJECT, node.getNodeType()); + throw new CorruptedFrameException(cause); + } + + static void registerPropertyFilter(final Class type, final Class filter) { + + checkNotNull(type, "type"); + checkNotNull(filter, "filter"); + + try { + filterProvider.addFilter(type.getSimpleName(), filter.newInstance()); + } catch (final ReflectiveOperationException error) { + throw new IllegalStateException(error); + } + } + + public static String toJson(Object value) { + try { + return objectWriter.writeValueAsString(value); + } catch (final JsonProcessingException error) { + throw new EncoderException(error); + } + } + + public static ObjectWriter writer() { + return objectWriter; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdReporter.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdReporter.java new file mode 100644 index 0000000000000..c47aa2f9e4933 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdReporter.java @@ -0,0 +1,85 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import org.apache.commons.lang3.exception.ExceptionUtils; +import org.slf4j.Logger; +import org.slf4j.helpers.FormattingTuple; +import org.slf4j.helpers.MessageFormatter; + +import java.io.File; +import java.net.URL; + +public final class RntbdReporter { + + private static final String codeSource; + + static { + String value; + try { + URL url = RntbdReporter.class.getProtectionDomain().getCodeSource().getLocation(); + File file = new File(url.toURI()); + value = file.getName(); + } catch (Throwable error) { + value = "azure-cosmosdb-direct"; + } + codeSource = value; + } + + private RntbdReporter() { + } + + public static void reportIssue(Logger logger, Object subject, String format, Object... arguments) { + if (logger.isErrorEnabled()) { + doReportIssue(logger, subject, format, arguments); + } + } + + public static void reportIssueUnless( + boolean predicate, Logger logger, Object subject, String format, Object... arguments + ) { + if (!predicate && logger.isErrorEnabled()) { + doReportIssue(logger, subject, format, arguments); + } + } + + private static void doReportIssue(Logger logger, Object subject, String format, Object[] arguments) { + + FormattingTuple formattingTuple = MessageFormatter.arrayFormat(format, arguments); + StackTraceElement[] stackTraceElements = new Exception().getStackTrace(); + Throwable throwable = formattingTuple.getThrowable(); + + if (throwable == null) { + logger.error("Report this {} issue to ensure it is addressed:\n[{}]\n[{}]\n[{}]", + codeSource, subject, stackTraceElements[2], formattingTuple.getMessage() + ); + } else { + logger.error("Report this {} issue to ensure it is addressed:\n[{}]\n[{}]\n[{}{}{}]", + codeSource, subject, stackTraceElements[2], formattingTuple.getMessage(), + throwable, ExceptionUtils.getStackTrace(throwable) + ); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequest.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequest.java new file mode 100644 index 0000000000000..777e439396358 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequest.java @@ -0,0 +1,128 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.fasterxml.jackson.annotation.JsonIgnore; +import io.netty.buffer.ByteBuf; + +import java.util.UUID; + +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdRequestHeader; +import static com.google.common.base.Preconditions.checkNotNull; + +public final class RntbdRequest { + + private static final byte[] EmptyByteArray = {}; + + private final RntbdRequestFrame frame; + private final RntbdRequestHeaders headers; + private final byte[] payload; + + private RntbdRequest(final RntbdRequestFrame frame, final RntbdRequestHeaders headers, final byte[] payload) { + + checkNotNull(frame, "frame"); + checkNotNull(headers, "headers"); + + this.frame = frame; + this.headers = headers; + this.payload = payload == null ? EmptyByteArray : payload; + } + + public UUID getActivityId() { + return this.frame.getActivityId(); + } + + @JsonIgnore + @SuppressWarnings("unchecked") + public T getHeader(final RntbdRequestHeader header) { + return (T)this.headers.get(header).getValue(); + } + + public Long getTransportRequestId() { + return this.getHeader(RntbdRequestHeader.TransportRequestID); + } + + public static RntbdRequest decode(final ByteBuf in) { + + final int resourceOperationCode = in.getInt(in.readerIndex() + Integer.BYTES); + + if (resourceOperationCode == 0) { + final String reason = String.format("resourceOperationCode=0x%08X", resourceOperationCode); + throw new IllegalStateException(reason); + } + + final int start = in.readerIndex(); + final int expectedLength = in.readIntLE(); + + final RntbdRequestFrame header = RntbdRequestFrame.decode(in); + final RntbdRequestHeaders metadata = RntbdRequestHeaders.decode(in); + final ByteBuf payloadBuf = in.readSlice(expectedLength - (in.readerIndex() - start)); + + final int observedLength = in.readerIndex() - start; + + if (observedLength != expectedLength) { + final String reason = String.format("expectedLength=%d, observedLength=%d", expectedLength, observedLength); + throw new IllegalStateException(reason); + } + + final byte[] payload = new byte[payloadBuf.readableBytes()]; + payloadBuf.readBytes(payload); + in.discardReadBytes(); + + return new RntbdRequest(header, metadata, payload); + } + + void encode(final ByteBuf out) { + + final int expectedLength = RntbdRequestFrame.LENGTH + this.headers.computeLength(); + final int start = out.readerIndex(); + + out.writeIntLE(expectedLength); + this.frame.encode(out); + this.headers.encode(out); + + assert out.writerIndex() - start == expectedLength; + + if (this.payload.length > 0) { + out.writeIntLE(this.payload.length); + out.writeBytes(this.payload); + } + } + + public static RntbdRequest from(final RntbdRequestArgs args) { + + final RxDocumentServiceRequest serviceRequest = args.getServiceRequest(); + + final RntbdRequestFrame frame = new RntbdRequestFrame( + args.getActivityId(), + serviceRequest.getOperationType(), + serviceRequest.getResourceType()); + + final RntbdRequestHeaders headers = new RntbdRequestHeaders(args, frame); + + return new RntbdRequest(frame, headers, serviceRequest.getContent()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestArgs.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestArgs.java new file mode 100644 index 0000000000000..bf350c0e9b048 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestArgs.java @@ -0,0 +1,130 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonPropertyOrder; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import com.fasterxml.jackson.databind.ser.std.ToStringSerializer; +import com.google.common.base.Stopwatch; +import io.netty.channel.ChannelHandlerContext; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; + +import java.math.BigDecimal; +import java.net.URI; +import java.time.Duration; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static com.google.common.base.Preconditions.checkNotNull; + +@JsonPropertyOrder({ + "transportRequestId", "origin", "replicaPath", "activityId", "operationType", "resourceType", "birthTime", + "lifetime" +}) +public final class RntbdRequestArgs { + + private static final AtomicLong instanceCount = new AtomicLong(); + private static final String simpleClassName = RntbdRequestArgs.class.getSimpleName(); + + private final UUID activityId; + private final long birthTime; + private final Stopwatch lifetime; + private final String origin; + private final URI physicalAddress; + private final String replicaPath; + private final RxDocumentServiceRequest serviceRequest; + private final long transportRequestId; + + public RntbdRequestArgs(final RxDocumentServiceRequest serviceRequest, final URI physicalAddress) { + this.activityId = UUID.fromString(serviceRequest.getActivityId()); + this.birthTime = System.nanoTime(); + this.lifetime = Stopwatch.createStarted(); + this.origin = physicalAddress.getScheme() + "://" + physicalAddress.getAuthority(); + this.physicalAddress = physicalAddress; + this.replicaPath = StringUtils.stripEnd(physicalAddress.getPath(), "/"); + this.serviceRequest = serviceRequest; + this.transportRequestId = instanceCount.incrementAndGet(); + } + + public UUID getActivityId() { + return this.activityId; + } + + public long getBirthTime() { + return this.birthTime; + } + + @JsonSerialize(using = ToStringSerializer.class) + public Duration getLifetime() { + return this.lifetime.elapsed(); + } + + public String getOrigin() { + return this.origin; + } + + @JsonIgnore + public URI getPhysicalAddress() { + return this.physicalAddress; + } + + public String getReplicaPath() { + return this.replicaPath; + } + + @JsonIgnore + public RxDocumentServiceRequest getServiceRequest() { + return this.serviceRequest; + } + + public long getTransportRequestId() { + return this.transportRequestId; + } + + @Override + public String toString() { + return simpleClassName + '(' + RntbdObjectMapper.toJson(this) + ')'; + } + + public void traceOperation(final Logger logger, final ChannelHandlerContext context, final String operationName, final Object... args) { + + checkNotNull(logger, "logger"); + + if (logger.isTraceEnabled()) { + final BigDecimal lifetime = BigDecimal.valueOf(this.lifetime.elapsed().toNanos(), 6); + logger.trace("{},{},\"{}({})\",\"{}\",\"{}\"", this.birthTime, lifetime, operationName, + Stream.of(args).map(arg -> + arg == null ? "null" : arg.toString()).collect(Collectors.joining(",") + ), + this, context + ); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestDecoder.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestDecoder.java new file mode 100644 index 0000000000000..53837fcfb2ac2 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestDecoder.java @@ -0,0 +1,85 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.ByteToMessageDecoder; + +import java.util.List; + +public final class RntbdRequestDecoder extends ByteToMessageDecoder { + /** + * Prepare for decoding an @{link RntbdRequest} or fire a channel readTree event to pass the input message along + * + * @param context the {@link ChannelHandlerContext} which this {@link ByteToMessageDecoder} belongs to + * @param message the message to be decoded + * @throws Exception thrown if an error occurs + */ + @Override + public void channelRead(final ChannelHandlerContext context, final Object message) throws Exception { + + if (message instanceof ByteBuf) { + + final ByteBuf in = (ByteBuf)message; + final int resourceOperationType = in.getInt(in.readerIndex() + Integer.BYTES); + + if (resourceOperationType != 0) { + super.channelRead(context, message); + return; + } + } + + context.fireChannelRead(message); + } + + /** + * Decode the input {@link ByteBuf} to an RntbdRequest instance + *

+ * This method will be called till either the input {@link ByteBuf} has nothing to readTree after return from this + * method or till nothing was readTree from the input {@link ByteBuf}. + * + * @param context the {@link ChannelHandlerContext} which this {@link ByteToMessageDecoder} belongs to + * @param in the {@link ByteBuf} from which to readTree data + * @param out the {@link List} to which decoded messages should be added + * @throws IllegalStateException thrown if an error occurs + */ + @Override + protected void decode(final ChannelHandlerContext context, final ByteBuf in, final List out) throws IllegalStateException { + + final RntbdRequest request; + in.markReaderIndex(); + + try { + request = RntbdRequest.decode(in); + } catch (final IllegalStateException error) { + in.resetReaderIndex(); + throw error; + } + + in.discardReadBytes(); + out.add(request); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestEncoder.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestEncoder.java new file mode 100644 index 0000000000000..d96fd9c9df3ba --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestEncoder.java @@ -0,0 +1,78 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelOutboundHandler; +import io.netty.handler.codec.MessageToByteEncoder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public final class RntbdRequestEncoder extends MessageToByteEncoder { + + private static final Logger logger = LoggerFactory.getLogger(RntbdRequestEncoder.class); + + /** + * Returns {@code true} if the given message is an @{link RntbdRequest} instance + *

+ * If {@code false} this message should be passed to the next {@link ChannelOutboundHandler} in the pipeline. + * + * @param message the message to encode + * @return {@code true}, if the given message is an an {@link RntbdRequest} instance; otherwise @{false} + */ + @Override + public boolean acceptOutboundMessage(final Object message) { + return message instanceof RntbdRequestArgs; + } + + /** + * Encode a message into a {@link ByteBuf} + *

+ * This method will be called for each message that can be written by this encoder. + * + * @param context the {@link ChannelHandlerContext} which this {@link MessageToByteEncoder} belongs encode + * @param message the message to encode + * @param out the {@link ByteBuf} into which the encoded message will be written + */ + @Override + protected void encode(final ChannelHandlerContext context, final Object message, final ByteBuf out) throws Exception { + + final RntbdRequest request = RntbdRequest.from((RntbdRequestArgs)message); + final int start = out.writerIndex(); + + try { + request.encode(out); + } catch (final Throwable error) { + out.writerIndex(start); + throw error; + } + + if (logger.isDebugEnabled()) { + final int length = out.writerIndex() - start; + logger.debug("{}: ENCODE COMPLETE: length={}, request={}", context.channel(), length, request); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestFrame.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestFrame.java new file mode 100644 index 0000000000000..d650ce5be4a59 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestFrame.java @@ -0,0 +1,234 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.internal.ResourceType; +import io.netty.buffer.ByteBuf; + +import java.util.Locale; +import java.util.UUID; + +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdOperationType; +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdResourceType; + +final class RntbdRequestFrame { + + // region Fields + + static final int LENGTH = Integer.BYTES // messageLength + + Short.BYTES // resourceType + + Short.BYTES // operationType + + 2 * Long.BYTES; // activityId + + private final UUID activityId; + private final RntbdOperationType operationType; + private final RntbdResourceType resourceType; + + // region Constructors + + RntbdRequestFrame(final UUID activityId, final OperationType operationType, final ResourceType resourceType) { + this(activityId, map(operationType), map(resourceType)); + } + + RntbdRequestFrame(final UUID activityId, final RntbdOperationType operationType, final RntbdResourceType resourceType) { + this.activityId = activityId; + this.operationType = operationType; + this.resourceType = resourceType; + } + + // endregion + + // region Methods + + UUID getActivityId() { + return this.activityId; + } + + RntbdOperationType getOperationType() { + return this.operationType; + } + + RntbdResourceType getResourceType() { + return this.resourceType; + } + + static RntbdRequestFrame decode(final ByteBuf in) { + + final RntbdResourceType resourceType = RntbdResourceType.fromId(in.readShortLE()); + final RntbdOperationType operationType = RntbdOperationType.fromId(in.readShortLE()); + final UUID activityId = RntbdUUID.decode(in); + + return new RntbdRequestFrame(activityId, operationType, resourceType); + } + + void encode(final ByteBuf out) { + out.writeShortLE(this.resourceType.id()); + out.writeShortLE(this.operationType.id()); + RntbdUUID.encode(this.activityId, out); + } + + private static RntbdResourceType map(final ResourceType resourceType) { + + switch (resourceType) { + case Attachment: + return RntbdResourceType.Attachment; + case DocumentCollection: + return RntbdResourceType.Collection; + case Conflict: + return RntbdResourceType.Conflict; + case Database: + return RntbdResourceType.Database; + case Document: + return RntbdResourceType.Document; + case Module: + return RntbdResourceType.Module; + case ModuleCommand: + return RntbdResourceType.ModuleCommand; + case Record: + return RntbdResourceType.Record; + case Permission: + return RntbdResourceType.Permission; + case Replica: + return RntbdResourceType.Replica; + case StoredProcedure: + return RntbdResourceType.StoredProcedure; + case Trigger: + return RntbdResourceType.Trigger; + case User: + return RntbdResourceType.User; + case UserDefinedType: + return RntbdResourceType.UserDefinedType; + case UserDefinedFunction: + return RntbdResourceType.UserDefinedFunction; + case Offer: + return RntbdResourceType.Offer; + case PartitionSetInformation: + return RntbdResourceType.PartitionSetInformation; + case XPReplicatorAddress: + return RntbdResourceType.XPReplicatorAddress; + case MasterPartition: + return RntbdResourceType.MasterPartition; + case ServerPartition: + return RntbdResourceType.ServerPartition; + case DatabaseAccount: + return RntbdResourceType.DatabaseAccount; + case Topology: + return RntbdResourceType.Topology; + case PartitionKeyRange: + return RntbdResourceType.PartitionKeyRange; + case Schema: + return RntbdResourceType.Schema; + case BatchApply: + return RntbdResourceType.BatchApply; + case RestoreMetadata: + return RntbdResourceType.RestoreMetadata; + case ComputeGatewayCharges: + return RntbdResourceType.ComputeGatewayCharges; + case RidRange: + return RntbdResourceType.RidRange; + default: + final String reason = String.format(Locale.ROOT, "Unrecognized resource type: %s", resourceType); + throw new UnsupportedOperationException(reason); + } + } + + private static RntbdOperationType map(final OperationType operationType) { + + switch (operationType) { + case Crash: + return RntbdOperationType.Crash; + case Create: + return RntbdOperationType.Create; + case Delete: + return RntbdOperationType.Delete; + case ExecuteJavaScript: + return RntbdOperationType.ExecuteJavaScript; + case Query: + return RntbdOperationType.Query; + case Pause: + return RntbdOperationType.Pause; + case Read: + return RntbdOperationType.Read; + case ReadFeed: + return RntbdOperationType.ReadFeed; + case Recreate: + return RntbdOperationType.Recreate; + case Recycle: + return RntbdOperationType.Recycle; + case Replace: + return RntbdOperationType.Replace; + case Resume: + return RntbdOperationType.Resume; + case Stop: + return RntbdOperationType.Stop; + case SqlQuery: + return RntbdOperationType.SQLQuery; + case Update: + return RntbdOperationType.Update; + case ForceConfigRefresh: + return RntbdOperationType.ForceConfigRefresh; + case Head: + return RntbdOperationType.Head; + case HeadFeed: + return RntbdOperationType.HeadFeed; + case Upsert: + return RntbdOperationType.Upsert; + case Throttle: + return RntbdOperationType.Throttle; + case PreCreateValidation: + return RntbdOperationType.PreCreateValidation; + case GetSplitPoint: + return RntbdOperationType.GetSplitPoint; + case AbortSplit: + return RntbdOperationType.AbortSplit; + case CompleteSplit: + return RntbdOperationType.CompleteSplit; + case BatchApply: + return RntbdOperationType.BatchApply; + case OfferUpdateOperation: + return RntbdOperationType.OfferUpdateOperation; + case OfferPreGrowValidation: + return RntbdOperationType.OfferPreGrowValidation; + case BatchReportThroughputUtilization: + return RntbdOperationType.BatchReportThroughputUtilization; + case AbortPartitionMigration: + return RntbdOperationType.AbortPartitionMigration; + case CompletePartitionMigration: + return RntbdOperationType.CompletePartitionMigration; + case PreReplaceValidation: + return RntbdOperationType.PreReplaceValidation; + case MigratePartition: + return RntbdOperationType.MigratePartition; + case AddComputeGatewayRequestCharges: + return RntbdOperationType.AddComputeGatewayRequestCharges; + default: + final String reason = String.format(Locale.ROOT, "Unrecognized operation type: %s", operationType); + throw new UnsupportedOperationException(reason); + } + } + + // endregion +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestFramer.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestFramer.java new file mode 100644 index 0000000000000..1dda4f7d544ad --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestFramer.java @@ -0,0 +1,36 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import io.netty.handler.codec.LengthFieldBasedFrameDecoder; + +import java.nio.ByteOrder; + +public final class RntbdRequestFramer extends LengthFieldBasedFrameDecoder { + + public RntbdRequestFramer() { + super(ByteOrder.LITTLE_ENDIAN, Integer.MAX_VALUE, 0, Integer.BYTES, -Integer.BYTES, 0, true); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestHeaders.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestHeaders.java new file mode 100644 index 0000000000000..4a07dbb95744a --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestHeaders.java @@ -0,0 +1,1273 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + + +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.IndexingDirective; +import com.azure.data.cosmos.internal.ContentSerializationFormat; +import com.azure.data.cosmos.internal.EnumerationDirection; +import com.azure.data.cosmos.internal.FanoutOperationState; +import com.azure.data.cosmos.internal.MigrateCollectionDirective; +import com.azure.data.cosmos.internal.Paths; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.ReadFeedKeyType; +import com.azure.data.cosmos.internal.RemoteStorageType; +import com.azure.data.cosmos.internal.ResourceId; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.fasterxml.jackson.annotation.JsonFilter; +import io.netty.buffer.ByteBuf; +import org.apache.commons.lang3.EnumUtils; +import org.apache.commons.lang3.StringUtils; + +import java.util.Base64; +import java.util.Locale; +import java.util.Map; +import java.util.function.Supplier; + +import static com.azure.data.cosmos.internal.HttpConstants.HttpHeaders; +import static com.azure.data.cosmos.internal.directconnectivity.WFConstants.BackendHeaders; +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdConsistencyLevel; +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdContentSerializationFormat; +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdEnumerationDirection; +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdFanoutOperationState; +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdIndexingDirective; +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdMigrateCollectionDirective; +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdOperationType; +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdReadFeedKeyType; +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdRemoteStorageType; +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdRequestHeader; +import static com.google.common.base.Preconditions.checkNotNull; + +@JsonFilter("RntbdToken") +final class RntbdRequestHeaders extends RntbdTokenStream { + + // region Fields + + private static final String UrlTrim = "/+"; + + // endregion + + // region Constructors + + RntbdRequestHeaders(final RntbdRequestArgs args, final RntbdRequestFrame frame) { + + this(); + + checkNotNull(args, "args"); + checkNotNull(frame, "frame"); + + final RxDocumentServiceRequest request = args.getServiceRequest(); + final byte[] content = request.getContent(); + + this.getPayloadPresent().setValue(content != null && content.length > 0); + this.getReplicaPath().setValue(args.getReplicaPath()); + this.getTransportRequestID().setValue(args.getTransportRequestId()); + + final Map headers = request.getHeaders(); + + // Special-case headers + + this.addAimHeader(headers); + this.addAllowScanOnQuery(headers); + this.addBinaryIdIfPresent(headers); + this.addCanCharge(headers); + this.addCanOfferReplaceComplete(headers); + this.addCanThrottle(headers); + this.addCollectionRemoteStorageSecurityIdentifier(headers); + this.addConsistencyLevelHeader(headers); + this.addContentSerializationFormat(headers); + this.addContinuationToken(request); + this.addDateHeader(headers); + this.addDisableRUPerMinuteUsage(headers); + this.addEmitVerboseTracesInQuery(headers); + this.addEnableLogging(headers); + this.addEnableLowPrecisionOrderBy(headers); + this.addEntityId(headers); + this.addEnumerationDirection(headers); + this.addExcludeSystemProperties(headers); + this.addFanoutOperationStateHeader(headers); + this.addIfModifiedSinceHeader(headers); + this.addIndexingDirectiveHeader(headers); + this.addIsAutoScaleRequest(headers); + this.addIsFanout(headers); + this.addIsReadOnlyScript(headers); + this.addIsUserRequest(headers); + this.addMatchHeader(headers, frame.getOperationType()); + this.addMigrateCollectionDirectiveHeader(headers); + this.addPageSize(headers); + this.addPopulateCollectionThroughputInfo(headers); + this.addPopulatePartitionStatistics(headers); + this.addPopulateQueryMetrics(headers); + this.addPopulateQuotaInfo(headers); + this.addProfileRequest(headers); + this.addQueryForceScan(headers); + this.addRemoteStorageType(headers); + this.addResourceIdOrPathHeaders(request); + this.addResponseContinuationTokenLimitInKb(headers); + this.addShareThroughput(headers); + this.addStartAndEndKeys(headers); + this.addSupportSpatialLegacyCoordinates(headers); + this.addUsePolygonsSmallerThanAHemisphere(headers); + + // Normal headers (Strings, Ints, Longs, etc.) + + this.fillTokenFromHeader(headers, this::getAllowTentativeWrites, BackendHeaders.ALLOW_TENTATIVE_WRITES); + this.fillTokenFromHeader(headers, this::getAuthorizationToken, HttpHeaders.AUTHORIZATION); + this.fillTokenFromHeader(headers, this::getBinaryPassThroughRequest, BackendHeaders.BINARY_PASSTHROUGH_REQUEST); + this.fillTokenFromHeader(headers, this::getBindReplicaDirective, BackendHeaders.BIND_REPLICA_DIRECTIVE); + this.fillTokenFromHeader(headers, this::getClientRetryAttemptCount, HttpHeaders.CLIENT_RETRY_ATTEMPT_COUNT); + this.fillTokenFromHeader(headers, this::getCollectionPartitionIndex, BackendHeaders.COLLECTION_PARTITION_INDEX); + this.fillTokenFromHeader(headers, this::getCollectionRid, BackendHeaders.COLLECTION_RID); + this.fillTokenFromHeader(headers, this::getCollectionServiceIndex, BackendHeaders.COLLECTION_SERVICE_INDEX); + this.fillTokenFromHeader(headers, this::getEffectivePartitionKey, BackendHeaders.EFFECTIVE_PARTITION_KEY); + this.fillTokenFromHeader(headers, this::getEnableDynamicRidRangeAllocation, BackendHeaders.ENABLE_DYNAMIC_RID_RANGE_ALLOCATION); + this.fillTokenFromHeader(headers, this::getFilterBySchemaRid, HttpHeaders.FILTER_BY_SCHEMA_RESOURCE_ID); + this.fillTokenFromHeader(headers, this::getGatewaySignature, HttpHeaders.GATEWAY_SIGNATURE); + this.fillTokenFromHeader(headers, this::getPartitionCount, BackendHeaders.PARTITION_COUNT); + this.fillTokenFromHeader(headers, this::getPartitionKey, HttpHeaders.PARTITION_KEY); + this.fillTokenFromHeader(headers, this::getPartitionKeyRangeId, HttpHeaders.PARTITION_KEY_RANGE_ID); + this.fillTokenFromHeader(headers, this::getPartitionResourceFilter, BackendHeaders.PARTITION_RESOURCE_FILTER); + this.fillTokenFromHeader(headers, this::getPostTriggerExclude, HttpHeaders.POST_TRIGGER_EXCLUDE); + this.fillTokenFromHeader(headers, this::getPostTriggerInclude, HttpHeaders.POST_TRIGGER_INCLUDE); + this.fillTokenFromHeader(headers, this::getPreTriggerExclude, HttpHeaders.PRE_TRIGGER_EXCLUDE); + this.fillTokenFromHeader(headers, this::getPreTriggerInclude, HttpHeaders.PRE_TRIGGER_INCLUDE); + this.fillTokenFromHeader(headers, this::getPrimaryMasterKey, BackendHeaders.PRIMARY_MASTER_KEY); + this.fillTokenFromHeader(headers, this::getPrimaryReadonlyKey, BackendHeaders.PRIMARY_READONLY_KEY); + this.fillTokenFromHeader(headers, this::getRemainingTimeInMsOnClientRequest, HttpHeaders.REMAINING_TIME_IN_MS_ON_CLIENT_REQUEST); + this.fillTokenFromHeader(headers, this::getResourceSchemaName, BackendHeaders.RESOURCE_SCHEMA_NAME); + this.fillTokenFromHeader(headers, this::getResourceTokenExpiry, HttpHeaders.RESOURCE_TOKEN_EXPIRY); + this.fillTokenFromHeader(headers, this::getRestoreMetadataFilter, HttpHeaders.RESTORE_METADATA_FILTER); + this.fillTokenFromHeader(headers, this::getRestoreParams, BackendHeaders.RESTORE_PARAMS); + this.fillTokenFromHeader(headers, this::getSecondaryMasterKey, BackendHeaders.SECONDARY_MASTER_KEY); + this.fillTokenFromHeader(headers, this::getSecondaryReadonlyKey, BackendHeaders.SECONDARY_READONLY_KEY); + this.fillTokenFromHeader(headers, this::getSessionToken, HttpHeaders.SESSION_TOKEN); + this.fillTokenFromHeader(headers, this::getSharedOfferThroughput, HttpHeaders.SHARED_OFFER_THROUGHPUT); + this.fillTokenFromHeader(headers, this::getTargetGlobalCommittedLsn, HttpHeaders.TARGET_GLOBAL_COMMITTED_LSN); + this.fillTokenFromHeader(headers, this::getTargetLsn, HttpHeaders.TARGET_LSN); + this.fillTokenFromHeader(headers, this::getTimeToLiveInSeconds, BackendHeaders.TIME_TO_LIVE_IN_SECONDS); + this.fillTokenFromHeader(headers, this::getTransportRequestID, HttpHeaders.TRANSPORT_REQUEST_ID); + + // Will be null in case of direct, which is fine - BE will use the value slice the connection context this. + // When this is used in Gateway, the header value will be populated with the proxied HTTP request's header, + // and BE will respect the per-request value. + + this.fillTokenFromHeader(headers, this::getClientVersion, HttpHeaders.VERSION); + } + + private RntbdRequestHeaders() { + super(RntbdRequestHeader.set, RntbdRequestHeader.map); + } + + // endregion + + // region Methods + + static RntbdRequestHeaders decode(final ByteBuf in) { + final RntbdRequestHeaders metadata = new RntbdRequestHeaders(); + return RntbdRequestHeaders.decode(in, metadata); + } + + // endregion + + // region Privates + + private RntbdToken getAIM() { + return this.get(RntbdRequestHeader.A_IM); + } + + private RntbdToken getAllowTentativeWrites() { + return this.get(RntbdRequestHeader.AllowTentativeWrites); + } + + private RntbdToken getAttachmentName() { + return this.get(RntbdRequestHeader.AttachmentName); + } + + private RntbdToken getAuthorizationToken() { + return this.get(RntbdRequestHeader.AuthorizationToken); + } + + private RntbdToken getBinaryId() { + return this.get(RntbdRequestHeader.BinaryId); + } + + private RntbdToken getBinaryPassThroughRequest() { + return this.get(RntbdRequestHeader.BinaryPassthroughRequest); + } + + private RntbdToken getBindReplicaDirective() { + return this.get(RntbdRequestHeader.BindReplicaDirective); + } + + private RntbdToken getCanCharge() { + return this.get(RntbdRequestHeader.CanCharge); + } + + private RntbdToken getCanOfferReplaceComplete() { + return this.get(RntbdRequestHeader.CanOfferReplaceComplete); + } + + private RntbdToken getCanThrottle() { + return this.get(RntbdRequestHeader.CanThrottle); + } + + private RntbdToken getClientRetryAttemptCount() { + return this.get(RntbdRequestHeader.ClientRetryAttemptCount); + } + + private RntbdToken getClientVersion() { + return this.get(RntbdRequestHeader.ClientVersion); + } + + private RntbdToken getCollectionName() { + return this.get(RntbdRequestHeader.CollectionName); + } + + private RntbdToken getCollectionPartitionIndex() { + return this.get(RntbdRequestHeader.CollectionPartitionIndex); + } + + private RntbdToken getCollectionRemoteStorageSecurityIdentifier() { + return this.get(RntbdRequestHeader.CollectionRemoteStorageSecurityIdentifier); + } + + private RntbdToken getCollectionRid() { + return this.get(RntbdRequestHeader.CollectionRid); + } + + private RntbdToken getCollectionServiceIndex() { + return this.get(RntbdRequestHeader.CollectionServiceIndex); + } + + private RntbdToken getConflictName() { + return this.get(RntbdRequestHeader.ConflictName); + } + + private RntbdToken getConsistencyLevel() { + return this.get(RntbdRequestHeader.ConsistencyLevel); + } + + private RntbdToken getContentSerializationFormat() { + return this.get(RntbdRequestHeader.ContentSerializationFormat); + } + + private RntbdToken getContinuationToken() { + return this.get(RntbdRequestHeader.ContinuationToken); + } + + private RntbdToken getDatabaseName() { + return this.get(RntbdRequestHeader.DatabaseName); + } + + private RntbdToken getDate() { + return this.get(RntbdRequestHeader.Date); + } + + private RntbdToken getDisableRUPerMinuteUsage() { + return this.get(RntbdRequestHeader.DisableRUPerMinuteUsage); + } + + private RntbdToken getDocumentName() { + return this.get(RntbdRequestHeader.DocumentName); + } + + private RntbdToken getEffectivePartitionKey() { + return this.get(RntbdRequestHeader.EffectivePartitionKey); + } + + private RntbdToken getEmitVerboseTracesInQuery() { + return this.get(RntbdRequestHeader.EmitVerboseTracesInQuery); + } + + private RntbdToken getEnableDynamicRidRangeAllocation() { + return this.get(RntbdRequestHeader.EnableDynamicRidRangeAllocation); + } + + private RntbdToken getEnableLogging() { + return this.get(RntbdRequestHeader.EnableLogging); + } + + private RntbdToken getEnableLowPrecisionOrderBy() { + return this.get(RntbdRequestHeader.EnableLowPrecisionOrderBy); + } + + private RntbdToken getEnableScanInQuery() { + return this.get(RntbdRequestHeader.EnableScanInQuery); + } + + private RntbdToken getEndEpk() { + return this.get(RntbdRequestHeader.EndEpk); + } + + private RntbdToken getEndId() { + return this.get(RntbdRequestHeader.EndId); + } + + private RntbdToken getEntityId() { + return this.get(RntbdRequestHeader.EntityId); + } + + private RntbdToken getEnumerationDirection() { + return this.get(RntbdRequestHeader.EnumerationDirection); + } + + private RntbdToken getExcludeSystemProperties() { + return this.get(RntbdRequestHeader.ExcludeSystemProperties); + } + + private RntbdToken getFanoutOperationState() { + return this.get(RntbdRequestHeader.FanoutOperationState); + } + + private RntbdToken getFilterBySchemaRid() { + return this.get(RntbdRequestHeader.FilterBySchemaRid); + } + + private RntbdToken getForceQueryScan() { + return this.get(RntbdRequestHeader.ForceQueryScan); + } + + private RntbdToken getGatewaySignature() { + return this.get(RntbdRequestHeader.GatewaySignature); + } + + private RntbdToken getIfModifiedSince() { + return this.get(RntbdRequestHeader.IfModifiedSince); + } + + private RntbdToken getIndexingDirective() { + return this.get(RntbdRequestHeader.IndexingDirective); + } + + private RntbdToken getIsAutoScaleRequest() { + return this.get(RntbdRequestHeader.IsAutoScaleRequest); + } + + private RntbdToken getIsFanout() { + return this.get(RntbdRequestHeader.IsFanout); + } + + private RntbdToken getIsReadOnlyScript() { + return this.get(RntbdRequestHeader.IsReadOnlyScript); + } + + private RntbdToken getIsUserRequest() { + return this.get(RntbdRequestHeader.IsUserRequest); + } + + private RntbdToken getMatch() { + return this.get(RntbdRequestHeader.Match); + } + + private RntbdToken getMigrateCollectionDirective() { + return this.get(RntbdRequestHeader.MigrateCollectionDirective); + } + + private RntbdToken getPageSize() { + return this.get(RntbdRequestHeader.PageSize); + } + + private RntbdToken getPartitionCount() { + return this.get(RntbdRequestHeader.PartitionCount); + } + + private RntbdToken getPartitionKey() { + return this.get(RntbdRequestHeader.PartitionKey); + } + + private RntbdToken getPartitionKeyRangeId() { + return this.get(RntbdRequestHeader.PartitionKeyRangeId); + } + + private RntbdToken getPartitionKeyRangeName() { + return this.get(RntbdRequestHeader.PartitionKeyRangeName); + } + + private RntbdToken getPartitionResourceFilter() { + return this.get(RntbdRequestHeader.PartitionResourceFilter); + } + + private RntbdToken getPayloadPresent() { + return this.get(RntbdRequestHeader.PayloadPresent); + } + + private RntbdToken getPermissionName() { + return this.get(RntbdRequestHeader.PermissionName); + } + + private RntbdToken getPopulateCollectionThroughputInfo() { + return this.get(RntbdRequestHeader.PopulateCollectionThroughputInfo); + } + + private RntbdToken getPopulatePartitionStatistics() { + return this.get(RntbdRequestHeader.PopulatePartitionStatistics); + } + + private RntbdToken getPopulateQueryMetrics() { + return this.get(RntbdRequestHeader.PopulateQueryMetrics); + } + + private RntbdToken getPopulateQuotaInfo() { + return this.get(RntbdRequestHeader.PopulateQuotaInfo); + } + + private RntbdToken getPostTriggerExclude() { + return this.get(RntbdRequestHeader.PostTriggerExclude); + } + + private RntbdToken getPostTriggerInclude() { + return this.get(RntbdRequestHeader.PostTriggerInclude); + } + + private RntbdToken getPreTriggerExclude() { + return this.get(RntbdRequestHeader.PreTriggerExclude); + } + + private RntbdToken getPreTriggerInclude() { + return this.get(RntbdRequestHeader.PreTriggerInclude); + } + + private RntbdToken getPrimaryMasterKey() { + return this.get(RntbdRequestHeader.PrimaryMasterKey); + } + + private RntbdToken getPrimaryReadonlyKey() { + return this.get(RntbdRequestHeader.PrimaryReadonlyKey); + } + + private RntbdToken getProfileRequest() { + return this.get(RntbdRequestHeader.ProfileRequest); + } + + private RntbdToken getReadFeedKeyType() { + return this.get(RntbdRequestHeader.ReadFeedKeyType); + } + + private RntbdToken getRemainingTimeInMsOnClientRequest() { + return this.get(RntbdRequestHeader.RemainingTimeInMsOnClientRequest); + } + + private RntbdToken getRemoteStorageType() { + return this.get(RntbdRequestHeader.RemoteStorageType); + } + + private RntbdToken getReplicaPath() { + return this.get(RntbdRequestHeader.ReplicaPath); + } + + private RntbdToken getResourceId() { + return this.get(RntbdRequestHeader.ResourceId); + } + + private RntbdToken getResourceSchemaName() { + return this.get(RntbdRequestHeader.ResourceSchemaName); + } + + private RntbdToken getResourceTokenExpiry() { + return this.get(RntbdRequestHeader.ResourceTokenExpiry); + } + + private RntbdToken getResponseContinuationTokenLimitInKb() { + return this.get(RntbdRequestHeader.ResponseContinuationTokenLimitInKb); + } + + private RntbdToken getRestoreMetadataFilter() { + return this.get(RntbdRequestHeader.RestoreMetadaFilter); + } + + private RntbdToken getRestoreParams() { + return this.get(RntbdRequestHeader.RestoreParams); + } + + private RntbdToken getSchemaName() { + return this.get(RntbdRequestHeader.SchemaName); + } + + private RntbdToken getSecondaryMasterKey() { + return this.get(RntbdRequestHeader.SecondaryMasterKey); + } + + private RntbdToken getSecondaryReadonlyKey() { + return this.get(RntbdRequestHeader.SecondaryReadonlyKey); + } + + private RntbdToken getSessionToken() { + return this.get(RntbdRequestHeader.SessionToken); + } + + private RntbdToken getShareThroughput() { + return this.get(RntbdRequestHeader.ShareThroughput); + } + + private RntbdToken getSharedOfferThroughput() { + return this.get(RntbdRequestHeader.SharedOfferThroughput); + } + + private RntbdToken getStartEpk() { + return this.get(RntbdRequestHeader.StartEpk); + } + + private RntbdToken getStartId() { + return this.get(RntbdRequestHeader.StartId); + } + + private RntbdToken getStoredProcedureName() { + return this.get(RntbdRequestHeader.StoredProcedureName); + } + + private RntbdToken getSupportSpatialLegacyCoordinates() { + return this.get(RntbdRequestHeader.SupportSpatialLegacyCoordinates); + } + + private RntbdToken getTargetGlobalCommittedLsn() { + return this.get(RntbdRequestHeader.TargetGlobalCommittedLsn); + } + + private RntbdToken getTargetLsn() { + return this.get(RntbdRequestHeader.TargetLsn); + } + + private RntbdToken getTimeToLiveInSeconds() { + return this.get(RntbdRequestHeader.TimeToLiveInSeconds); + } + + private RntbdToken getTransportRequestID() { + return this.get(RntbdRequestHeader.TransportRequestID); + } + + private RntbdToken getTriggerName() { + return this.get(RntbdRequestHeader.TriggerName); + } + + private RntbdToken getUsePolygonsSmallerThanAHemisphere() { + return this.get(RntbdRequestHeader.UsePolygonsSmallerThanAHemisphere); + } + + private RntbdToken getUserDefinedFunctionName() { + return this.get(RntbdRequestHeader.UserDefinedFunctionName); + } + + private RntbdToken getUserDefinedTypeName() { + return this.get(RntbdRequestHeader.UserDefinedTypeName); + } + + private RntbdToken getUserName() { + return this.get(RntbdRequestHeader.UserName); + } + + private void addAimHeader(final Map headers) { + + final String value = headers.get(HttpHeaders.A_IM); + + if (StringUtils.isNotEmpty(value)) { + this.getAIM().setValue(value); + } + } + + private void addAllowScanOnQuery(final Map headers) { + final String value = headers.get(HttpHeaders.ENABLE_SCAN_IN_QUERY); + if (StringUtils.isNotEmpty(value)) { + this.getEnableScanInQuery().setValue(Boolean.parseBoolean(value)); + } + } + + private void addBinaryIdIfPresent(final Map headers) { + final String value = headers.get(BackendHeaders.BINARY_ID); + if (StringUtils.isNotEmpty(value)) { + this.getBinaryId().setValue(Base64.getDecoder().decode(value)); + } + } + + private void addCanCharge(final Map headers) { + final String value = headers.get(HttpHeaders.CAN_CHARGE); + if (StringUtils.isNotEmpty(value)) { + this.getCanCharge().setValue(Boolean.parseBoolean(value)); + } + } + + private void addCanOfferReplaceComplete(final Map headers) { + final String value = headers.get(HttpHeaders.CAN_OFFER_REPLACE_COMPLETE); + if (StringUtils.isNotEmpty(value)) { + this.getCanOfferReplaceComplete().setValue(Boolean.parseBoolean(value)); + } + } + + private void addCanThrottle(final Map headers) { + final String value = headers.get(HttpHeaders.CAN_THROTTLE); + if (StringUtils.isNotEmpty(value)) { + this.getCanThrottle().setValue(Boolean.parseBoolean(value)); + } + } + + private void addCollectionRemoteStorageSecurityIdentifier(final Map headers) { + final String value = headers.get(HttpHeaders.COLLECTION_REMOTE_STORAGE_SECURITY_IDENTIFIER); + if (StringUtils.isNotEmpty(value)) { + this.getCollectionRemoteStorageSecurityIdentifier().setValue(value); + } + } + + private void addConsistencyLevelHeader(final Map headers) { + + final String value = headers.get(HttpHeaders.CONSISTENCY_LEVEL); + + if (StringUtils.isNotEmpty(value)) { + + final ConsistencyLevel level = EnumUtils.getEnumIgnoreCase(ConsistencyLevel.class, value); + + if (level == null) { + final String reason = String.format(Locale.ROOT, RMResources.InvalidRequestHeaderValue, + HttpHeaders.CONSISTENCY_LEVEL, + value); + throw new IllegalStateException(reason); + } + + switch (level) { + case STRONG: + this.getConsistencyLevel().setValue(RntbdConsistencyLevel.Strong.id()); + break; + case BOUNDED_STALENESS: + this.getConsistencyLevel().setValue(RntbdConsistencyLevel.BoundedStaleness.id()); + break; + case SESSION: + this.getConsistencyLevel().setValue(RntbdConsistencyLevel.Session.id()); + break; + case EVENTUAL: + this.getConsistencyLevel().setValue(RntbdConsistencyLevel.Eventual.id()); + break; + case CONSISTENT_PREFIX: + this.getConsistencyLevel().setValue(RntbdConsistencyLevel.ConsistentPrefix.id()); + break; + default: + assert false; + break; + } + } + } + + private void addContentSerializationFormat(final Map headers) { + + final String value = headers.get(HttpHeaders.CONTENT_SERIALIZATION_FORMAT); + + if (StringUtils.isNotEmpty(value)) { + + final ContentSerializationFormat format = EnumUtils.getEnumIgnoreCase(ContentSerializationFormat.class, value); + + if (format == null) { + final String reason = String.format(Locale.ROOT, RMResources.InvalidRequestHeaderValue, + HttpHeaders.CONTENT_SERIALIZATION_FORMAT, + value); + throw new IllegalStateException(reason); + } + + switch (format) { + case JsonText: + this.getContentSerializationFormat().setValue(RntbdContentSerializationFormat.JsonText.id()); + break; + case CosmosBinary: + this.getContentSerializationFormat().setValue(RntbdContentSerializationFormat.CosmosBinary.id()); + break; + default: + assert false; + } + } + } + + private void addContinuationToken(final RxDocumentServiceRequest request) { + final String value = request.getContinuation(); + if (StringUtils.isNotEmpty(value)) { + this.getContinuationToken().setValue(value); + } + } + + private void addDateHeader(final Map headers) { + + // Since the HTTP date header is overridden by some proxies/http client libraries, we support an additional date + // header and prefer that to the (regular) date header + + String value = headers.get(HttpHeaders.X_DATE); + + if (StringUtils.isEmpty(value)) { + value = headers.get(HttpHeaders.HTTP_DATE); + } + + if (StringUtils.isNotEmpty(value)) { + this.getDate().setValue(value); + } + } + + private void addDisableRUPerMinuteUsage(final Map headers) { + final String value = headers.get(HttpHeaders.DISABLE_RU_PER_MINUTE_USAGE); + if (StringUtils.isNotEmpty(value)) { + this.getDisableRUPerMinuteUsage().setValue(Boolean.parseBoolean(value)); + } + } + + private void addEmitVerboseTracesInQuery(final Map headers) { + final String value = headers.get(HttpHeaders.EMIT_VERBOSE_TRACES_IN_QUERY); + if (StringUtils.isNotEmpty(value)) { + this.getEmitVerboseTracesInQuery().setValue(Boolean.parseBoolean(value)); + } + } + + private void addEnableLogging(final Map headers) { + final String value = headers.get(HttpHeaders.ENABLE_LOGGING); + if (StringUtils.isNotEmpty(value)) { + this.getEnableLogging().setValue(Boolean.parseBoolean(value)); + } + } + + private void addEnableLowPrecisionOrderBy(final Map headers) { + final String value = headers.get(HttpHeaders.ENABLE_LOW_PRECISION_ORDER_BY); + if (StringUtils.isNotEmpty(value)) { + this.getEnableLowPrecisionOrderBy().setValue(Boolean.parseBoolean(value)); + } + } + + private void addEntityId(final Map headers) { + final String value = headers.get(BackendHeaders.ENTITY_ID); + if (StringUtils.isNotEmpty(value)) { + this.getEntityId().setValue(value); + } + } + + private void addEnumerationDirection(final Map headers) { + + final String value = headers.get(HttpHeaders.ENUMERATION_DIRECTION); + + if (StringUtils.isNotEmpty(value)) { + + final EnumerationDirection direction = EnumUtils.getEnumIgnoreCase(EnumerationDirection.class, value); + + if (direction == null) { + final String reason = String.format(Locale.ROOT, RMResources.InvalidRequestHeaderValue, + HttpHeaders.ENUMERATION_DIRECTION, + value); + throw new IllegalStateException(reason); + } + + switch (direction) { + case Forward: + this.getEnumerationDirection().setValue(RntbdEnumerationDirection.Forward.id()); + break; + case Reverse: + this.getEnumerationDirection().setValue(RntbdEnumerationDirection.Reverse.id()); + break; + default: + assert false; + } + } + } + + private void addExcludeSystemProperties(final Map headers) { + final String value = headers.get(BackendHeaders.EXCLUDE_SYSTEM_PROPERTIES); + if (StringUtils.isNotEmpty(value)) { + this.getExcludeSystemProperties().setValue(Boolean.parseBoolean(value)); + } + } + + private void addFanoutOperationStateHeader(final Map headers) { + + final String value = headers.get(BackendHeaders.FANOUT_OPERATION_STATE); + + if (StringUtils.isNotEmpty(value)) { + + final FanoutOperationState format = EnumUtils.getEnumIgnoreCase(FanoutOperationState.class, value); + + if (format == null) { + final String reason = String.format(Locale.ROOT, RMResources.InvalidRequestHeaderValue, + BackendHeaders.FANOUT_OPERATION_STATE, + value); + throw new IllegalStateException(reason); + } + + switch (format) { + case Started: + this.getFanoutOperationState().setValue(RntbdFanoutOperationState.Started.id()); + break; + case Completed: + this.getFanoutOperationState().setValue(RntbdFanoutOperationState.Completed.id()); + break; + default: + assert false; + } + } + } + + private void addIfModifiedSinceHeader(final Map headers) { + final String value = headers.get(HttpHeaders.IF_MODIFIED_SINCE); + if (StringUtils.isNotEmpty(value)) { + this.getIfModifiedSince().setValue(value); + } + } + + private void addIndexingDirectiveHeader(final Map headers) { + + final String value = headers.get(HttpHeaders.INDEXING_DIRECTIVE); + + if (StringUtils.isNotEmpty(value)) { + + final IndexingDirective directive = EnumUtils.getEnumIgnoreCase(IndexingDirective.class, value); + + if (directive == null) { + final String reason = String.format(Locale.ROOT, RMResources.InvalidRequestHeaderValue, + HttpHeaders.INDEXING_DIRECTIVE, + value); + throw new IllegalStateException(reason); + } + + switch (directive) { + case DEFAULT: + this.getIndexingDirective().setValue(RntbdIndexingDirective.Default.id()); + break; + case EXCLUDE: + this.getIndexingDirective().setValue(RntbdIndexingDirective.Exclude.id()); + break; + case INCLUDE: + this.getIndexingDirective().setValue(RntbdIndexingDirective.Include.id()); + break; + default: + assert false; + } + } + } + + private void addIsAutoScaleRequest(final Map headers) { + final String value = headers.get(HttpHeaders.IS_AUTO_SCALE_REQUEST); + if (StringUtils.isNotEmpty(value)) { + this.getIsAutoScaleRequest().setValue(Boolean.parseBoolean(value)); + } + } + + private void addIsFanout(final Map headers) { + final String value = headers.get(BackendHeaders.IS_FANOUT_REQUEST); + if (StringUtils.isNotEmpty(value)) { + this.getIsFanout().setValue(Boolean.parseBoolean(value)); + } + } + + private void addIsReadOnlyScript(final Map headers) { + final String value = headers.get(HttpHeaders.IS_READ_ONLY_SCRIPT); + if (StringUtils.isNotEmpty(value)) { + this.getIsReadOnlyScript().setValue(Boolean.parseBoolean(value)); + } + } + + private void addIsUserRequest(final Map headers) { + final String value = headers.get(BackendHeaders.IS_USER_REQUEST); + if (StringUtils.isNotEmpty(value)) { + this.getIsUserRequest().setValue(Boolean.parseBoolean(value)); + } + } + + private void addMatchHeader(final Map headers, final RntbdOperationType operationType) { + + String match = null; + + switch (operationType) { + case Read: + case ReadFeed: + match = headers.get(HttpHeaders.IF_NONE_MATCH); + break; + default: + match = headers.get(HttpHeaders.IF_MATCH); + break; + } + + if (StringUtils.isNotEmpty(match)) { + this.getMatch().setValue(match); + } + } + + private void addMigrateCollectionDirectiveHeader(final Map headers) { + + final String value = headers.get(HttpHeaders.MIGRATE_COLLECTION_DIRECTIVE); + + if (StringUtils.isNotEmpty(value)) { + + final MigrateCollectionDirective directive = EnumUtils.getEnumIgnoreCase(MigrateCollectionDirective.class, value); + + if (directive == null) { + final String reason = String.format(Locale.ROOT, RMResources.InvalidRequestHeaderValue, + HttpHeaders.MIGRATE_COLLECTION_DIRECTIVE, + value); + throw new IllegalStateException(reason); + } + + switch (directive) { + case Freeze: + this.getMigrateCollectionDirective().setValue(RntbdMigrateCollectionDirective.Freeze.id()); + break; + case Thaw: + this.getMigrateCollectionDirective().setValue(RntbdMigrateCollectionDirective.Thaw.id()); + break; + default: + assert false; + break; + } + } + } + + private void addPageSize(final Map headers) { + + final String value = headers.get(HttpHeaders.PAGE_SIZE); + + if (StringUtils.isNotEmpty(value)) { + final long aLong = parseLong(HttpHeaders.PAGE_SIZE, value, -1, 0xFFFFFFFFL); + this.getPageSize().setValue((int)(aLong < 0 ? 0xFFFFFFFFL : aLong)); + } + } + + private void addPopulateCollectionThroughputInfo(final Map headers) { + final String value = headers.get(HttpHeaders.POPULATE_COLLECTION_THROUGHPUT_INFO); + if (StringUtils.isNotEmpty(value)) { + this.getPopulateCollectionThroughputInfo().setValue(Boolean.parseBoolean(value)); + } + } + + private void addPopulatePartitionStatistics(final Map headers) { + final String value = headers.get(HttpHeaders.POPULATE_PARTITION_STATISTICS); + if (StringUtils.isNotEmpty(value)) { + this.getPopulatePartitionStatistics().setValue(Boolean.parseBoolean(value)); + } + } + + private void addPopulateQueryMetrics(final Map headers) { + final String value = headers.get(HttpHeaders.POPULATE_QUERY_METRICS); + if (StringUtils.isNotEmpty(value)) { + this.getPopulateQueryMetrics().setValue(Boolean.parseBoolean(value)); + } + } + + private void addPopulateQuotaInfo(final Map headers) { + final String value = headers.get(HttpHeaders.POPULATE_QUOTA_INFO); + if (StringUtils.isNotEmpty(value)) { + this.getPopulateQuotaInfo().setValue(Boolean.parseBoolean(value)); + } + } + + private void addProfileRequest(final Map headers) { + final String value = headers.get(HttpHeaders.PROFILE_REQUEST); + if (StringUtils.isNotEmpty(value)) { + this.getProfileRequest().setValue(Boolean.parseBoolean(value)); + } + } + + private void addQueryForceScan(final Map headers) { + final String value = headers.get(HttpHeaders.FORCE_QUERY_SCAN); + if (StringUtils.isNotEmpty(value)) { + this.getForceQueryScan().setValue(Boolean.parseBoolean(value)); + } + } + + private void addRemoteStorageType(final Map headers) { + + final String value = headers.get(BackendHeaders.REMOTE_STORAGE_TYPE); + + if (StringUtils.isNotEmpty(value)) { + + final RemoteStorageType type = EnumUtils.getEnumIgnoreCase(RemoteStorageType.class, value); + + if (type == null) { + final String reason = String.format(Locale.ROOT, RMResources.InvalidRequestHeaderValue, + BackendHeaders.REMOTE_STORAGE_TYPE, + value); + throw new IllegalStateException(reason); + } + + switch (type) { + case Standard: + this.getRemoteStorageType().setValue(RntbdRemoteStorageType.Standard.id()); + break; + case Premium: + this.getRemoteStorageType().setValue(RntbdRemoteStorageType.Premium.id()); + break; + default: + assert false; + } + } + } + + private void addResourceIdOrPathHeaders(final RxDocumentServiceRequest request) { + + final String value = request.getResourceId(); + + if (StringUtils.isNotEmpty(value)) { + // Name-based can also have ResourceId because gateway might have generated it + this.getResourceId().setValue(ResourceId.parse(request.getResourceType(), value)); + } + + if (request.getIsNameBased()) { + + // Assumption: format is like "dbs/dbName/colls/collName/docs/docName" or "/dbs/dbName/colls/collName", + // not "apps/appName/partitions/partitionKey/replicas/replicaId/dbs/dbName" + + final String address = request.getResourceAddress(); + final String[] fragments = address.split(UrlTrim); + int count = fragments.length; + int index = 0; + + if (count > 0 && fragments[0].isEmpty()) { + ++index; + --count; + } + + if (count >= 2) { + switch (fragments[index]) { + case Paths.DATABASES_PATH_SEGMENT: + this.getDatabaseName().setValue(fragments[index + 1]); + break; + default: + final String reason = String.format(Locale.ROOT, RMResources.InvalidResourceAddress, + value, address); + throw new IllegalStateException(reason); + } + } + + if (count >= 4) { + switch (fragments[index + 2]) { + case Paths.COLLECTIONS_PATH_SEGMENT: + this.getCollectionName().setValue(fragments[index + 3]); + break; + case Paths.USERS_PATH_SEGMENT: + this.getUserName().setValue(fragments[index + 3]); + break; + case Paths.USER_DEFINED_TYPES_PATH_SEGMENT: + this.getUserDefinedTypeName().setValue(fragments[index + 3]); + break; + } + } + + if (count >= 6) { + switch (fragments[index + 4]) { + case Paths.DOCUMENTS_PATH_SEGMENT: + this.getDocumentName().setValue(fragments[index + 5]); + break; + case Paths.STORED_PROCEDURES_PATH_SEGMENT: + this.getStoredProcedureName().setValue(fragments[index + 5]); + break; + case Paths.PERMISSIONS_PATH_SEGMENT: + this.getPermissionName().setValue(fragments[index + 5]); + break; + case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT: + this.getUserDefinedFunctionName().setValue(fragments[index + 5]); + break; + case Paths.TRIGGERS_PATH_SEGMENT: + this.getTriggerName().setValue(fragments[index + 5]); + break; + case Paths.CONFLICTS_PATH_SEGMENT: + this.getConflictName().setValue(fragments[index + 5]); + break; + case Paths.PARTITION_KEY_RANGES_PATH_SEGMENT: + this.getPartitionKeyRangeName().setValue(fragments[index + 5]); + break; + case Paths.SCHEMAS_PATH_SEGMENT: + this.getSchemaName().setValue(fragments[index + 5]); + break; + } + } + + if (count >= 8) { + switch (fragments[index + 6]) { + case Paths.ATTACHMENTS_PATH_SEGMENT: + this.getAttachmentName().setValue(fragments[index + 7]); + break; + } + } + } + } + + private void addResponseContinuationTokenLimitInKb(final Map headers) { + + final String value = headers.get(HttpHeaders.RESPONSE_CONTINUATION_TOKEN_LIMIT_IN_KB); + + if (StringUtils.isNotEmpty(value)) { + final long aLong = parseLong(HttpHeaders.RESPONSE_CONTINUATION_TOKEN_LIMIT_IN_KB, value, 0, 0xFFFFFFFFL); + this.getResponseContinuationTokenLimitInKb().setValue((int)(aLong < 0 ? 0xFFFFFFFFL : aLong)); + } + } + + private void addShareThroughput(final Map headers) { + final String value = headers.get(BackendHeaders.SHARE_THROUGHPUT); + if (StringUtils.isNotEmpty(value)) { + this.getShareThroughput().setValue(Boolean.parseBoolean(value)); + } + } + + private void addStartAndEndKeys(final Map headers) { + + String value = headers.get(HttpHeaders.READ_FEED_KEY_TYPE); + + if (StringUtils.isNotEmpty(value)) { + + final ReadFeedKeyType type = EnumUtils.getEnumIgnoreCase(ReadFeedKeyType.class, value); + + if (type == null) { + final String reason = String.format(Locale.ROOT, RMResources.InvalidRequestHeaderValue, + HttpHeaders.READ_FEED_KEY_TYPE, + value); + throw new IllegalStateException(reason); + } + + switch (type) { + case ResourceId: + this.getReadFeedKeyType().setValue(RntbdReadFeedKeyType.ResourceId.id()); + break; + case EffectivePartitionKey: + this.getReadFeedKeyType().setValue(RntbdReadFeedKeyType.EffectivePartitionKey.id()); + break; + default: + assert false; + } + } + + final Base64.Decoder decoder = Base64.getDecoder(); + + value = headers.get(HttpHeaders.START_ID); + + if (StringUtils.isNotEmpty(value)) { + this.getStartId().setValue(decoder.decode(value)); + } + + value = headers.get(HttpHeaders.END_ID); + + if (StringUtils.isNotEmpty(value)) { + this.getEndId().setValue(decoder.decode(value)); + } + + value = headers.get(HttpHeaders.START_EPK); + + if (StringUtils.isNotEmpty(value)) { + this.getStartEpk().setValue(decoder.decode(value)); + } + + value = headers.get(HttpHeaders.END_EPK); + + if (StringUtils.isNotEmpty(value)) { + this.getEndEpk().setValue(decoder.decode(value)); + } + } + + private void addSupportSpatialLegacyCoordinates(final Map headers) { + final String value = headers.get(HttpHeaders.SUPPORT_SPATIAL_LEGACY_COORDINATES); + if (StringUtils.isNotEmpty(value)) { + this.getSupportSpatialLegacyCoordinates().setValue(Boolean.parseBoolean(value)); + } + } + + private void addUsePolygonsSmallerThanAHemisphere(final Map headers) { + final String value = headers.get(HttpHeaders.USE_POLYGONS_SMALLER_THAN_AHEMISPHERE); + if (StringUtils.isNotEmpty(value)) { + this.getUsePolygonsSmallerThanAHemisphere().setValue(Boolean.parseBoolean(value)); + } + } + + private void fillTokenFromHeader(final Map headers, final Supplier supplier, final String name) { + + final String value = headers.get(name); + + if (StringUtils.isNotEmpty(value)) { + + final RntbdToken token = supplier.get(); + + switch (token.getTokenType()) { + + case SmallString: + case String: + case ULongString: { + + token.setValue(value); + break; + } + case Byte: { + + token.setValue(Boolean.parseBoolean(value)); + break; + } + case Double: { + + token.setValue(parseDouble(name, value)); + break; + } + case Long: { + + final long aLong = parseLong(name, value, Integer.MIN_VALUE, Integer.MAX_VALUE); + token.setValue(aLong); + break; + } + case ULong: { + + final long aLong = parseLong(name, value, 0, 0xFFFFFFFFL); + token.setValue(aLong); + break; + } + case LongLong: { + + final long aLong = parseLong(name, value); + token.setValue(aLong); + break; + } + default: { + assert false : "Recognized header has neither special-case nor default handling to convert " + + "from header String to RNTBD token"; + break; + } + } + } + } + + private static double parseDouble(final String name, final String value) { + + final double aDouble; + + try { + aDouble = Double.parseDouble(value); + } catch (final NumberFormatException error) { + final String reason = String.format(Locale.ROOT, RMResources.InvalidRequestHeaderValue, name, value); + throw new IllegalStateException(reason); + } + return aDouble; + } + + private static long parseLong(final String name, final String value) { + final long aLong; + try { + aLong = Long.parseLong(value); + } catch (final NumberFormatException error) { + final String reason = String.format(Locale.ROOT, RMResources.InvalidRequestHeaderValue, name, value); + throw new IllegalStateException(reason); + } + return aLong; + } + + private static long parseLong(final String name, final String value, final long min, final long max) { + final long aLong = parseLong(name, value); + if (!(min <= aLong && aLong <= max)) { + final String reason = String.format(Locale.ROOT, RMResources.InvalidRequestHeaderValue, name, aLong); + throw new IllegalStateException(reason); + } + return aLong; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestManager.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestManager.java new file mode 100644 index 0000000000000..5f270c5136925 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestManager.java @@ -0,0 +1,796 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import com.azure.data.cosmos.BadRequestException; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ConflictException; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosError; +import com.azure.data.cosmos.ForbiddenException; +import com.azure.data.cosmos.GoneException; +import com.azure.data.cosmos.InternalServerErrorException; +import com.azure.data.cosmos.InvalidPartitionException; +import com.azure.data.cosmos.LockedException; +import com.azure.data.cosmos.MethodNotAllowedException; +import com.azure.data.cosmos.NotFoundException; +import com.azure.data.cosmos.PartitionIsMigratingException; +import com.azure.data.cosmos.PartitionKeyRangeGoneException; +import com.azure.data.cosmos.PartitionKeyRangeIsSplittingException; +import com.azure.data.cosmos.PreconditionFailedException; +import com.azure.data.cosmos.RequestEntityTooLargeException; +import com.azure.data.cosmos.RequestRateTooLargeException; +import com.azure.data.cosmos.RequestTimeoutException; +import com.azure.data.cosmos.RetryWithException; +import com.azure.data.cosmos.ServiceUnavailableException; +import com.azure.data.cosmos.UnauthorizedException; +import com.azure.data.cosmos.internal.directconnectivity.StoreResponse; +import com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdResponseHeader; +import com.google.common.base.Strings; +import io.netty.buffer.ByteBuf; +import io.netty.channel.Channel; +import io.netty.channel.ChannelException; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandler; +import io.netty.channel.ChannelOption; +import io.netty.channel.ChannelOutboundHandler; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.ChannelPromise; +import io.netty.channel.CoalescingBufferQueue; +import io.netty.channel.EventLoop; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.ssl.SslHandler; +import io.netty.util.ReferenceCountUtil; +import io.netty.util.Timeout; +import io.netty.util.concurrent.EventExecutor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.SocketAddress; +import java.util.Map; +import java.util.Optional; +import java.util.UUID; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; + +import static com.azure.data.cosmos.internal.HttpConstants.StatusCodes; +import static com.azure.data.cosmos.internal.HttpConstants.SubStatusCodes; +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdReporter.reportIssue; +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdReporter.reportIssueUnless; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkState; + +public final class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { + + // region Fields + + private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); + + private final CompletableFuture contextFuture = new CompletableFuture<>(); + private final CompletableFuture contextRequestFuture = new CompletableFuture<>(); + private final ConcurrentHashMap pendingRequests; + private final int pendingRequestLimit; + + private boolean closingExceptionally = false; + private CoalescingBufferQueue pendingWrites; + + // endregion + + public RntbdRequestManager(int capacity) { + checkArgument(capacity > 0, "capacity: %s", capacity); + this.pendingRequests = new ConcurrentHashMap<>(capacity); + this.pendingRequestLimit = capacity; + } + + // region ChannelHandler methods + + /** + * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. + * + * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs + */ + @Override + public void handlerAdded(final ChannelHandlerContext context) { + this.traceOperation(context, "handlerAdded"); + } + + /** + * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events + * anymore. + * + * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs + */ + @Override + public void handlerRemoved(final ChannelHandlerContext context) { + this.traceOperation(context, "handlerRemoved"); + } + + // endregion + + // region ChannelInboundHandler methods + + /** + * The {@link Channel} of the {@link ChannelHandlerContext} is now active + * + * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs + */ + @Override + public void channelActive(final ChannelHandlerContext context) { + this.traceOperation(context, "channelActive"); + context.fireChannelActive(); + } + + /** + * Completes all pending requests exceptionally when a channel reaches the end of its lifetime + *

+ * This method will only be called after the channel is closed. + * + * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs + */ + @Override + public void channelInactive(final ChannelHandlerContext context) { + this.traceOperation(context, "channelInactive"); + context.fireChannelInactive(); + } + + @Override + public void channelRead(final ChannelHandlerContext context, final Object message) { + + this.traceOperation(context, "channelRead"); + + if (message instanceof RntbdResponse) { + + try { + this.messageReceived(context, (RntbdResponse)message); + } catch (Throwable throwable) { + reportIssue(logger, context, "{} ", message, throwable); + this.exceptionCaught(context, throwable); + } finally { + ReferenceCountUtil.release(message); + } + + } else { + + final IllegalStateException error = new IllegalStateException( + Strings.lenientFormat("expected message of %s, not %s: %s", + RntbdResponse.class, message.getClass(), message + ) + ); + + reportIssue(logger, context, "", error); + this.exceptionCaught(context, error); + } + } + + /** + * Invoked when the last message read by the current read operation has been consumed + *

+ * If {@link ChannelOption#AUTO_READ} is off, no further attempt to read an inbound data from the current + * {@link Channel} will be made until {@link ChannelHandlerContext#read} is called. This leaves time + * for outbound messages to be written. + * + * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs + */ + @Override + public void channelReadComplete(final ChannelHandlerContext context) { + this.traceOperation(context, "channelReadComplete"); + context.fireChannelReadComplete(); + } + + /** + * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} + *

+ * This method then calls {@link ChannelHandlerContext#fireChannelRegistered()} to forward to the next + * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. + *

+ * Sub-classes may override this method to change behavior. + * + * @param context the {@link ChannelHandlerContext} for which the bind operation is made + */ + @Override + public void channelRegistered(final ChannelHandlerContext context) { + + this.traceOperation(context, "channelRegistered"); + + checkState(this.pendingWrites == null, "pendingWrites: %s", this.pendingWrites); + this.pendingWrites = new CoalescingBufferQueue(context.channel()); + + context.fireChannelRegistered(); + } + + /** + * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} + * + * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs + */ + @Override + public void channelUnregistered(final ChannelHandlerContext context) { + + this.traceOperation(context, "channelUnregistered"); + + checkState(this.pendingWrites != null, "pendingWrites: null"); + this.completeAllPendingRequestsExceptionally(context, ClosedWithPendingRequestsException.INSTANCE); + this.pendingWrites = null; + + context.fireChannelUnregistered(); + } + + /** + * Gets called once the writable state of a {@link Channel} changed. You can check the state with + * {@link Channel#isWritable()}. + * + * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs + */ + @Override + public void channelWritabilityChanged(final ChannelHandlerContext context) { + this.traceOperation(context, "channelWritabilityChanged"); + context.fireChannelWritabilityChanged(); + } + + /** + * Processes {@link ChannelHandlerContext#fireExceptionCaught(Throwable)} in the {@link ChannelPipeline} + * + * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs + * @param cause Exception caught + */ + @Override + @SuppressWarnings("deprecation") + public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { + + // TODO: DANOBLE: replace RntbdRequestManager.exceptionCaught with read/write listeners + // Notes: + // ChannelInboundHandler.exceptionCaught is deprecated and--today, prior to deprecation--only catches read-- + // i.e., inbound--exceptions. + // Replacements: + // * read listener: unclear as there is no obvious replacement + // * write listener: implemented by RntbdTransportClient.DefaultEndpoint.doWrite + // Links: + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/373213 + + this.traceOperation(context, "exceptionCaught", cause); + + if (!this.closingExceptionally) { + + reportIssueUnless(cause != ClosedWithPendingRequestsException.INSTANCE, logger, context, + "expected an exception other than ", ClosedWithPendingRequestsException.INSTANCE); + + this.completeAllPendingRequestsExceptionally(context, cause); + context.pipeline().flush().close(); + } + } + + /** + * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline + *

+ * All but inbound request management events are ignored. + * + * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs + * @param event An object representing a user event + */ + @Override + public void userEventTriggered(final ChannelHandlerContext context, final Object event) { + + this.traceOperation(context, "userEventTriggered", event); + + try { + if (event instanceof RntbdContext) { + this.contextFuture.complete((RntbdContext)event); + this.removeContextNegotiatorAndFlushPendingWrites(context); + return; + } + if (event instanceof RntbdContextException) { + this.contextFuture.completeExceptionally((RntbdContextException)event); + context.pipeline().flush().close(); + return; + } + context.fireUserEventTriggered(event); + + } catch (Throwable error) { + reportIssue(logger, context, "{}: ", event, error); + this.exceptionCaught(context, error); + } + } + + // endregion + + // region ChannelOutboundHandler methods + + /** + * Called once a bind operation is made. + * + * @param context the {@link ChannelHandlerContext} for which the bind operation is made + * @param localAddress the {@link SocketAddress} to which it should bound + * @param promise the {@link ChannelPromise} to notify once the operation completes + */ + @Override + public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { + this.traceOperation(context, "bind", localAddress); + context.bind(localAddress, promise); + } + + /** + * Called once a close operation is made. + * + * @param context the {@link ChannelHandlerContext} for which the close operation is made + * @param promise the {@link ChannelPromise} to notify once the operation completes + */ + @Override + public void close(final ChannelHandlerContext context, final ChannelPromise promise) { + + this.traceOperation(context, "close"); + + this.completeAllPendingRequestsExceptionally(context, ClosedWithPendingRequestsException.INSTANCE); + final SslHandler sslHandler = context.pipeline().get(SslHandler.class); + + if (sslHandler != null) { + // Netty 4.1.36.Final: SslHandler.closeOutbound must be called before closing the pipeline + // This ensures that all SSL engine and ByteBuf resources are released + // This is something that does not occur in the call to ChannelPipeline.close that follows + sslHandler.closeOutbound(); + } + + context.close(promise); + } + + /** + * Called once a connect operation is made. + * + * @param context the {@link ChannelHandlerContext} for which the connect operation is made + * @param remoteAddress the {@link SocketAddress} to which it should connect + * @param localAddress the {@link SocketAddress} which is used as source on connect + * @param promise the {@link ChannelPromise} to notify once the operation completes + */ + @Override + public void connect( + final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, + final ChannelPromise promise + ) { + this.traceOperation(context, "connect", remoteAddress, localAddress); + context.connect(remoteAddress, localAddress, promise); + } + + /** + * Called once a deregister operation is made from the current registered {@link EventLoop}. + * + * @param context the {@link ChannelHandlerContext} for which the close operation is made + * @param promise the {@link ChannelPromise} to notify once the operation completes + */ + @Override + public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { + this.traceOperation(context, "deregister"); + context.deregister(promise); + } + + /** + * Called once a disconnect operation is made. + * + * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made + * @param promise the {@link ChannelPromise} to notify once the operation completes + */ + @Override + public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { + this.traceOperation(context, "disconnect"); + context.disconnect(promise); + } + + /** + * Called once a flush operation is made + *

+ * The flush operation will try to flush out all previous written messages that are pending. + * + * @param context the {@link ChannelHandlerContext} for which the flush operation is made + */ + @Override + public void flush(final ChannelHandlerContext context) { + this.traceOperation(context, "flush"); + context.flush(); + } + + /** + * Intercepts {@link ChannelHandlerContext#read} + * + * @param context the {@link ChannelHandlerContext} for which the read operation is made + */ + @Override + public void read(final ChannelHandlerContext context) { + this.traceOperation(context, "read"); + context.read(); + } + + /** + * Called once a write operation is made + *

+ * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed + * to the actual {@link Channel}. This will occur when {@link Channel#flush} is called. + * + * @param context the {@link ChannelHandlerContext} for which the write operation is made + * @param message the message to write + * @param promise the {@link ChannelPromise} to notify once the operation completes + */ + @Override + public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { + + // TODO: DANOBLE: Ensure that all write errors are reported with a root cause of type EncoderException + + this.traceOperation(context, "write", message); + + if (message instanceof RntbdRequestRecord) { + + context.write(this.addPendingRequestRecord(context, (RntbdRequestRecord)message), promise); + + } else { + + final IllegalStateException error = new IllegalStateException( + Strings.lenientFormat("expected message of %s, not %s: %s", + RntbdRequestRecord.class, message.getClass(), message + ) + ); + + reportIssue(logger, context, "", error); + this.exceptionCaught(context, error); + } + } + + // endregion + + // region Private and package private methods + + CompletableFuture getRntbdContextRequestFuture() { + return this.contextRequestFuture; + } + + boolean hasRntbdContext() { + return this.contextFuture.getNow(null) != null; + } + + boolean isServiceable(final int demand) { + final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); + return this.pendingRequests.size() < limit; + } + + void pendWrite(final ByteBuf out, final ChannelPromise promise) { + this.pendingWrites.add(out, promise); + } + + private RntbdRequestArgs addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { + + return this.pendingRequests.compute(record.getTransportRequestId(), (id, current) -> { + + reportIssueUnless(current == null, logger, context, "id: {}, current: {}, request: {}", id, current, record); + + final Timeout pendingRequestTimeout = record.newTimeout(timeout -> { + + // We don't wish to complete on the timeout thread, but rather on a thread doled out by our executor + + EventExecutor executor = context.executor(); + + if (executor.inEventLoop()) { + record.expire(); + } else { + executor.next().execute(record::expire); + } + }); + + record.whenComplete((response, error) -> { + this.pendingRequests.remove(id); + pendingRequestTimeout.cancel(); + }); + + return record; + + }).getArgs(); + } + + private Optional getRntbdContext() { + return Optional.of(this.contextFuture.getNow(null)); + } + + private void completeAllPendingRequestsExceptionally(final ChannelHandlerContext context, final Throwable throwable) { + + if (this.closingExceptionally) { + + reportIssueUnless(throwable == ClosedWithPendingRequestsException.INSTANCE, logger, context, + "throwable: ", throwable); + + reportIssueUnless(this.pendingRequests.isEmpty() && this.pendingWrites.isEmpty(), logger, context, + "pendingRequests: {}, pendingWrites: {}", this.pendingRequests.isEmpty(), + this.pendingWrites.isEmpty()); + + return; + } + + this.closingExceptionally = true; + + if (!this.pendingWrites.isEmpty()) { + this.pendingWrites.releaseAndFailAll(context, ClosedWithPendingRequestsException.INSTANCE); + } + + if (!this.pendingRequests.isEmpty()) { + + if (!this.contextRequestFuture.isDone()) { + this.contextRequestFuture.completeExceptionally(throwable); + } + + if (!this.contextFuture.isDone()) { + this.contextFuture.completeExceptionally(throwable); + } + + final int count = this.pendingRequests.size(); + Exception contextRequestException = null; + String phrase = null; + + if (this.contextRequestFuture.isCompletedExceptionally()) { + + try { + this.contextRequestFuture.get(); + } catch (final CancellationException error) { + phrase = "RNTBD context request write cancelled"; + contextRequestException = error; + } catch (final Exception error) { + phrase = "RNTBD context request write failed"; + contextRequestException = error; + } catch (final Throwable error) { + phrase = "RNTBD context request write failed"; + contextRequestException = new ChannelException(error); + } + + } else if (this.contextFuture.isCompletedExceptionally()) { + + try { + this.contextFuture.get(); + } catch (final CancellationException error) { + phrase = "RNTBD context request read cancelled"; + contextRequestException = error; + } catch (final Exception error) { + phrase = "RNTBD context request read failed"; + contextRequestException = error; + } catch (final Throwable error) { + phrase = "RNTBD context request read failed"; + contextRequestException = new ChannelException(error); + } + + } else { + + phrase = "closed exceptionally"; + } + + final String message = Strings.lenientFormat("%s %s with %s pending requests", context, phrase, count); + final Exception cause; + + if (throwable == ClosedWithPendingRequestsException.INSTANCE) { + + cause = contextRequestException == null + ? ClosedWithPendingRequestsException.INSTANCE + : contextRequestException; + + } else { + + cause = throwable instanceof Exception + ? (Exception)throwable + : new ChannelException(throwable); + } + + for (RntbdRequestRecord record : this.pendingRequests.values()) { + + final Map requestHeaders = record.getArgs().getServiceRequest().getHeaders(); + final String requestUri = record.getArgs().getPhysicalAddress().toString(); + + final GoneException error = new GoneException(message, cause, (Map)null, requestUri); + BridgeInternal.setRequestHeaders(error, requestHeaders); + + record.completeExceptionally(error); + } + } + } + + /** + * This method is called for each incoming message of type {@link StoreResponse} to complete a request + * + * @param context {@link ChannelHandlerContext} encode to which this {@link RntbdRequestManager} belongs + * @param response the message encode handle + */ + private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { + + final Long transportRequestId = response.getTransportRequestId(); + + if (transportRequestId == null) { + reportIssue(logger, context, "{} ignored because there is no transport request identifier, response"); + return; + } + + final RntbdRequestRecord pendingRequest = this.pendingRequests.get(transportRequestId); + + if (pendingRequest == null) { + reportIssue(logger, context, "{} ignored because there is no matching pending request", response); + return; + } + + final HttpResponseStatus status = response.getStatus(); + final UUID activityId = response.getActivityId(); + + if (HttpResponseStatus.OK.code() <= status.code() && status.code() < HttpResponseStatus.MULTIPLE_CHOICES.code()) { + + final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); + pendingRequest.complete(storeResponse); + + } else { + + // Map response to a CosmosClientException + + final CosmosClientException cause; + + // ..Fetch required header values + + final long lsn = response.getHeader(RntbdResponseHeader.LSN); + final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); + + // ..Create Error instance + + final CosmosError cosmosError = response.hasPayload() ? + BridgeInternal.createCosmosError(RntbdObjectMapper.readTree(response)) : + new CosmosError(Integer.toString(status.code()), status.reasonPhrase(), status.codeClass().name()); + + // ..Map RNTBD response headers to HTTP response headers + + final Map responseHeaders = response.getHeaders().asMap( + this.getRntbdContext().orElseThrow(IllegalStateException::new), activityId + ); + + // ..Create CosmosClientException based on status and sub-status codes + + switch (status.code()) { + + case StatusCodes.BADREQUEST: + cause = new BadRequestException(cosmosError, lsn, partitionKeyRangeId, responseHeaders); + break; + + case StatusCodes.CONFLICT: + cause = new ConflictException(cosmosError, lsn, partitionKeyRangeId, responseHeaders); + break; + + case StatusCodes.FORBIDDEN: + cause = new ForbiddenException(cosmosError, lsn, partitionKeyRangeId, responseHeaders); + break; + + case StatusCodes.GONE: + + final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); + + switch (subStatusCode) { + case SubStatusCodes.COMPLETING_SPLIT: + cause = new PartitionKeyRangeIsSplittingException(cosmosError, lsn, partitionKeyRangeId, responseHeaders); + break; + case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: + cause = new PartitionIsMigratingException(cosmosError, lsn, partitionKeyRangeId, responseHeaders); + break; + case SubStatusCodes.NAME_CACHE_IS_STALE: + cause = new InvalidPartitionException(cosmosError, lsn, partitionKeyRangeId, responseHeaders); + break; + case SubStatusCodes.PARTITION_KEY_RANGE_GONE: + cause = new PartitionKeyRangeGoneException(cosmosError, lsn, partitionKeyRangeId, responseHeaders); + break; + default: + cause = new GoneException(cosmosError, lsn, partitionKeyRangeId, responseHeaders); + break; + } + break; + + case StatusCodes.INTERNAL_SERVER_ERROR: + cause = new InternalServerErrorException(cosmosError, lsn, partitionKeyRangeId, responseHeaders); + break; + + case StatusCodes.LOCKED: + cause = new LockedException(cosmosError, lsn, partitionKeyRangeId, responseHeaders); + break; + + case StatusCodes.METHOD_NOT_ALLOWED: + cause = new MethodNotAllowedException(cosmosError, lsn, partitionKeyRangeId, responseHeaders); + break; + + case StatusCodes.NOTFOUND: + cause = new NotFoundException(cosmosError, lsn, partitionKeyRangeId, responseHeaders); + break; + + case StatusCodes.PRECONDITION_FAILED: + cause = new PreconditionFailedException(cosmosError, lsn, partitionKeyRangeId, responseHeaders); + break; + + case StatusCodes.REQUEST_ENTITY_TOO_LARGE: + cause = new RequestEntityTooLargeException(cosmosError, lsn, partitionKeyRangeId, responseHeaders); + break; + + case StatusCodes.REQUEST_TIMEOUT: + cause = new RequestTimeoutException(cosmosError, lsn, partitionKeyRangeId, responseHeaders); + break; + + case StatusCodes.RETRY_WITH: + cause = new RetryWithException(cosmosError, lsn, partitionKeyRangeId, responseHeaders); + break; + + case StatusCodes.SERVICE_UNAVAILABLE: + cause = new ServiceUnavailableException(cosmosError, lsn, partitionKeyRangeId, responseHeaders); + break; + + case StatusCodes.TOO_MANY_REQUESTS: + cause = new RequestRateTooLargeException(cosmosError, lsn, partitionKeyRangeId, responseHeaders); + break; + + case StatusCodes.UNAUTHORIZED: + cause = new UnauthorizedException(cosmosError, lsn, partitionKeyRangeId, responseHeaders); + break; + + default: + cause = BridgeInternal.createCosmosClientException(status.code(), cosmosError, responseHeaders); + break; + } + + pendingRequest.completeExceptionally(cause); + } + } + + private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { + + final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); + negotiator.removeInboundHandler(); + negotiator.removeOutboundHandler(); + + if (!this.pendingWrites.isEmpty()) { + this.pendingWrites.writeAndRemoveAll(context); + } + } + + private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { + logger.trace("{}\n{}\n{}", operationName, context, args); + } + + // endregion + + // region Types + + private static class ClosedWithPendingRequestsException extends RuntimeException { + + static ClosedWithPendingRequestsException INSTANCE = new ClosedWithPendingRequestsException(); + + // TODO: DANOBLE: Consider revising strategy for closing an RntbdTransportClient with pending requests + // One possibility: + // A channel associated with an RntbdTransportClient will not be closed immediately, if there are any pending + // requests on it. Instead it will be scheduled to close after the request timeout interval (default: 60s) has + // elapsed. + // Algorithm: + // When the RntbdTransportClient is closed, it closes each of its RntbdServiceEndpoint instances. In turn each + // RntbdServiceEndpoint closes its RntbdClientChannelPool. The RntbdClientChannelPool.close method should + // schedule closure of any channel with pending requests for later; when the request timeout interval has + // elapsed or--ideally--when all pending requests have completed. + // Links: + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/388987 + + private ClosedWithPendingRequestsException() { + super(null, null, /* enableSuppression */ false, /* writableStackTrace */ false); + } + } + + // endregion +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestRecord.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestRecord.java new file mode 100644 index 0000000000000..405a9673019ab --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestRecord.java @@ -0,0 +1,94 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.RequestTimeoutException; +import com.azure.data.cosmos.internal.directconnectivity.StoreResponse; +import io.netty.util.Timeout; +import io.netty.util.TimerTask; + +import java.time.Duration; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; + +import static com.google.common.base.Preconditions.checkNotNull; + +public final class RntbdRequestRecord extends CompletableFuture { + + private static final String simpleClassName = RntbdRequestRecord.class.getSimpleName(); + + private final RntbdRequestArgs args; + private final RntbdRequestTimer timer; + + public RntbdRequestRecord(final RntbdRequestArgs args, final RntbdRequestTimer timer) { + + checkNotNull(args, "args"); + checkNotNull(timer, "timer"); + + this.args = args; + this.timer = timer; + } + + public UUID getActivityId() { + return this.args.getActivityId(); + } + + public RntbdRequestArgs getArgs() { + return this.args; + } + + public long getBirthTime() { + return this.args.getBirthTime(); + } + + public Duration getLifetime() { + return this.args.getLifetime(); + } + + public long getTransportRequestId() { + return this.args.getTransportRequestId(); + } + + public boolean expire() { + + final long timeoutInterval = this.timer.getRequestTimeout(TimeUnit.MILLISECONDS); + final String message = String.format("Request timeout interval (%,d ms) elapsed", timeoutInterval); + final RequestTimeoutException error = new RequestTimeoutException(message, this.args.getPhysicalAddress()); + + BridgeInternal.setRequestHeaders(error, this.args.getServiceRequest().getHeaders()); + + return this.completeExceptionally(error); + } + + public Timeout newTimeout(final TimerTask task) { + return this.timer.newTimeout(task); + } + + @Override + public String toString() { + return simpleClassName + '(' + RntbdObjectMapper.toJson(this.args) + ')'; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestTimer.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestTimer.java new file mode 100644 index 0000000000000..54d6ae366dd1e --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdRequestTimer.java @@ -0,0 +1,62 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import io.netty.util.HashedWheelTimer; +import io.netty.util.Timeout; +import io.netty.util.Timer; +import io.netty.util.TimerTask; + +import java.util.concurrent.TimeUnit; + +public final class RntbdRequestTimer implements AutoCloseable { + + private static final long FIVE_MILLISECONDS = 5000000L; + private final long requestTimeout; + private final Timer timer; + + public RntbdRequestTimer(final long requestTimeout) { + + // Inspection of the HashWheelTimer code indicates that our choice of a 5 millisecond timer resolution ensures + // a request will timeout within 10 milliseconds of the specified requestTimeout interval. This is because + // cancellation of a timeout takes two timer resolution units to complete. + + this.timer = new HashedWheelTimer(FIVE_MILLISECONDS, TimeUnit.NANOSECONDS); + this.requestTimeout = requestTimeout; + } + + public long getRequestTimeout(TimeUnit unit) { + return unit.convert(requestTimeout, TimeUnit.NANOSECONDS); + } + + @Override + public void close() throws RuntimeException { + this.timer.stop(); + } + + public Timeout newTimeout(final TimerTask task) { + return this.timer.newTimeout(task, this.requestTimeout, TimeUnit.NANOSECONDS); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdResponse.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdResponse.java new file mode 100644 index 0000000000000..fcb19bb4647da --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdResponse.java @@ -0,0 +1,287 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import com.azure.data.cosmos.internal.directconnectivity.StoreResponse; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonPropertyOrder; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import com.fasterxml.jackson.databind.ser.std.StdSerializer; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.EmptyByteBuf; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.util.ReferenceCounted; +import io.netty.util.ResourceLeakDetector; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; + +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdResponseHeader; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; +import static java.lang.Math.min; + +@JsonPropertyOrder({ "frame", "headers", "content" }) +public final class RntbdResponse implements ReferenceCounted { + + // region Fields + + private static final String simpleClassName = RntbdResponse.class.getSimpleName(); + + @JsonProperty + @JsonSerialize(using = PayloadSerializer.class) + private final ByteBuf content; + + @JsonProperty + private final RntbdResponseStatus frame; + + @JsonProperty + private final RntbdResponseHeaders headers; + + private AtomicInteger referenceCount = new AtomicInteger(); + + // endregion + + public RntbdResponse(final UUID activityId, final int statusCode, final Map map, final ByteBuf content) { + + this.headers = RntbdResponseHeaders.fromMap(map, content.readableBytes() > 0); + this.content = content.retain(); + + final HttpResponseStatus status = HttpResponseStatus.valueOf(statusCode); + final int length = RntbdResponseStatus.LENGTH + this.headers.computeLength(); + + this.frame = new RntbdResponseStatus(length, status, activityId); + } + + private RntbdResponse(final RntbdResponseStatus frame, final RntbdResponseHeaders headers, final ByteBuf content) { + + this.frame = frame; + this.headers = headers; + this.content = content.retain(); + } + + public UUID getActivityId() { + return this.frame.getActivityId(); + } + + @JsonIgnore + public ByteBuf getContent() { + return this.content; + } + + @JsonIgnore + public RntbdResponseHeaders getHeaders() { + return this.headers; + } + + @JsonIgnore + public HttpResponseStatus getStatus() { + return this.frame.getStatus(); + } + + @JsonIgnore + public Long getTransportRequestId() { + return this.getHeader(RntbdResponseHeader.TransportRequestID); + } + + static RntbdResponse decode(final ByteBuf in) { + + in.markReaderIndex(); + + final RntbdResponseStatus frame = RntbdResponseStatus.decode(in); + final RntbdResponseHeaders headers = RntbdResponseHeaders.decode(in.readSlice(frame.getHeadersLength())); + + final boolean hasPayload = headers.isPayloadPresent(); + final ByteBuf content; + + if (hasPayload) { + + if (!RntbdFramer.canDecodePayload(in)) { + in.resetReaderIndex(); + return null; + } + + content = in.readSlice(in.readIntLE()); + + } else { + + content = new EmptyByteBuf(in.alloc()); + } + + return new RntbdResponse(frame, headers, content); + } + + public void encode(final ByteBuf out) { + + final int start = out.writerIndex(); + + this.frame.encode(out); + this.headers.encode(out); + + final int length = out.writerIndex() - start; + checkState(length == this.frame.getLength()); + + if (this.hasPayload()) { + out.writeIntLE(this.content.readableBytes()); + out.writeBytes(this.content); + } else if (this.content.readableBytes() > 0) { + throw new IllegalStateException(); + } + } + + @JsonIgnore + @SuppressWarnings("unchecked") + public T getHeader(final RntbdResponseHeader header) { + return (T)this.headers.get(header).getValue(); + } + + public boolean hasPayload() { + return this.headers.isPayloadPresent(); + } + + /** + * Returns the reference count of this object. If {@code 0}, it means this object has been deallocated. + */ + @Override + public int refCnt() { + return this.referenceCount.get(); + } + + /** + * Decreases the reference count by {@code 1} and deallocate this object if the reference count reaches {@code 0} + * + * @return {@code true} if and only if the reference count became {@code 0} and this object is de-allocated + */ + @Override + public boolean release() { + return this.release(1); + } + + /** + * Decreases the reference count by {@code decrement} and de-allocates this object if the reference count reaches {@code 0} + * + * @param decrement amount of the decrease + * @return {@code true} if and only if the reference count became {@code 0} and this object has been de-allocated + */ + @Override + public boolean release(final int decrement) { + + return this.referenceCount.getAndAccumulate(decrement, (value, n) -> { + value = value - min(value, n); + if (value == 0) { + assert this.headers != null && this.content != null; + this.headers.releaseBuffers(); + this.content.release(); + } + return value; + }) == 0; + } + + /** + * Increases the reference count by {@code 1}. + */ + @Override + public ReferenceCounted retain() { + this.referenceCount.incrementAndGet(); + return this; + } + + /** + * Increases the reference count by the specified {@code increment}. + * + * @param increment amount of the increase + */ + @Override + public ReferenceCounted retain(final int increment) { + this.referenceCount.addAndGet(increment); + return this; + } + + StoreResponse toStoreResponse(final RntbdContext context) { + + checkNotNull(context, "context"); + final int length = this.content.readableBytes(); + + return new StoreResponse( + this.getStatus().code(), + this.headers.asList(context, this.getActivityId()), + length == 0 ? null : this.content.readCharSequence(length, StandardCharsets.UTF_8).toString() + ); + } + + @Override + public String toString() { + return simpleClassName + '(' + RntbdObjectMapper.toJson(this) + ')'; + } + + /** + * Records the current access location of this object for debugging purposes + *

+ * If this object is determined to be leaked, the information recorded by this operation will be provided to you + * via {@link ResourceLeakDetector}. This method is a shortcut to {@link #touch(Object) touch(null)}. + */ + @Override + public ReferenceCounted touch() { + return this; + } + + /** + * Records the current access location of this object with additional arbitrary information for debugging purposes + *

+ * If this object is determined to be leaked, the information recorded by this operation will be + * provided to you via {@link ResourceLeakDetector}. + * + * @param hint information useful for debugging (unused) + */ + @Override + public ReferenceCounted touch(final Object hint) { + return this; + } + + private static class PayloadSerializer extends StdSerializer { + + public PayloadSerializer() { + super(ByteBuf.class, true); + } + + @Override + public void serialize(final ByteBuf value, final JsonGenerator generator, final SerializerProvider provider) throws IOException { + + final int length = value.readableBytes(); + + generator.writeStartObject(); + generator.writeObjectField("length", length); + generator.writeObjectField("content", ByteBufUtil.hexDump(value, 0, length)); + generator.writeEndObject(); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdResponseDecoder.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdResponseDecoder.java new file mode 100644 index 0000000000000..5c26cdfe0d490 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdResponseDecoder.java @@ -0,0 +1,62 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.ByteToMessageDecoder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; + +public final class RntbdResponseDecoder extends ByteToMessageDecoder { + + private static final Logger Logger = LoggerFactory.getLogger(RntbdResponseDecoder.class); + + /** + * Deserialize from an input {@link ByteBuf} to an {@link RntbdResponse} instance + *

+ * This method is called till it reads no bytes from the {@link ByteBuf} or there is no more data to be readTree. + * + * @param context the {@link ChannelHandlerContext} to which this {@link RntbdResponseDecoder} belongs + * @param in the {@link ByteBuf} to which data to be decoded is readTree + * @param out the {@link List} to which decoded messages are added + */ + @Override + protected void decode(final ChannelHandlerContext context, final ByteBuf in, final List out) { + + if (RntbdFramer.canDecodeHead(in)) { + + final RntbdResponse response = RntbdResponse.decode(in); + + if (response != null) { + Logger.debug("{} DECODE COMPLETE: {}", context.channel(), response); + in.discardReadBytes(); + out.add(response.retain()); + } + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdResponseHeaders.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdResponseHeaders.java new file mode 100644 index 0000000000000..de97775a2a527 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdResponseHeaders.java @@ -0,0 +1,535 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import com.fasterxml.jackson.annotation.JsonFilter; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectWriter; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import io.netty.buffer.ByteBuf; +import io.netty.handler.codec.CorruptedFrameException; + +import java.math.BigDecimal; +import java.util.AbstractMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.function.BiConsumer; +import java.util.function.Function; + +import static com.azure.data.cosmos.internal.HttpConstants.HttpHeaders; +import static com.azure.data.cosmos.internal.directconnectivity.WFConstants.BackendHeaders; +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdIndexingDirective; +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdResponseHeader; + +@JsonFilter("RntbdToken") +class RntbdResponseHeaders extends RntbdTokenStream { + + // region Fields + + @JsonProperty + private final RntbdToken LSN; + @JsonProperty + private final RntbdToken collectionLazyIndexProgress; + @JsonProperty + private final RntbdToken collectionPartitionIndex; + @JsonProperty + private final RntbdToken collectionSecurityIdentifier; + @JsonProperty + private final RntbdToken collectionServiceIndex; + @JsonProperty + private final RntbdToken collectionUpdateProgress; + @JsonProperty + private final RntbdToken continuationToken; + @JsonProperty + private final RntbdToken currentReplicaSetSize; + @JsonProperty + private final RntbdToken currentWriteQuorum; + @JsonProperty + private final RntbdToken databaseAccountId; + @JsonProperty + private final RntbdToken disableRntbdChannel; + @JsonProperty + private final RntbdToken eTag; + @JsonProperty + private final RntbdToken globalCommittedLSN; + @JsonProperty + private final RntbdToken hasTentativeWrites; + @JsonProperty + private final RntbdToken indexTermsGenerated; + @JsonProperty + private final RntbdToken indexingDirective; + @JsonProperty + private final RntbdToken isRUPerMinuteUsed; + @JsonProperty + private final RntbdToken itemCount; + @JsonProperty + private final RntbdToken itemLSN; + @JsonProperty + private final RntbdToken itemLocalLSN; + @JsonProperty + private final RntbdToken lastStateChangeDateTime; + @JsonProperty + private final RntbdToken localLSN; + @JsonProperty + private final RntbdToken logResults; + @JsonProperty + private final RntbdToken numberOfReadRegions; + @JsonProperty + private final RntbdToken offerReplacePending; + @JsonProperty + private final RntbdToken ownerFullName; + @JsonProperty + private final RntbdToken ownerId; + @JsonProperty + private final RntbdToken partitionKeyRangeId; + @JsonProperty + private final RntbdToken payloadPresent; + @JsonProperty + private final RntbdToken queriesPerformed; + @JsonProperty + private final RntbdToken queryMetrics; + @JsonProperty + private final RntbdToken quorumAckedLSN; + @JsonProperty + private final RntbdToken quorumAckedLocalLSN; + @JsonProperty + private final RntbdToken readsPerformed; + @JsonProperty + private final RntbdToken requestCharge; + @JsonProperty + private final RntbdToken requestValidationFailure; + @JsonProperty + private final RntbdToken restoreState; + @JsonProperty + private final RntbdToken retryAfterMilliseconds; + @JsonProperty + private final RntbdToken schemaVersion; + @JsonProperty + private final RntbdToken scriptsExecuted; + @JsonProperty + private final RntbdToken serverDateTimeUtc; + @JsonProperty + private final RntbdToken sessionToken; + @JsonProperty + private final RntbdToken shareThroughput; + @JsonProperty + private final RntbdToken storageMaxResoureQuota; + @JsonProperty + private final RntbdToken storageResourceQuotaUsage; + @JsonProperty + private final RntbdToken subStatus; + @JsonProperty + private final RntbdToken transportRequestID; + @JsonProperty + private final RntbdToken writesPerformed; + @JsonProperty + private final RntbdToken xpRole; + + // endregion + + private RntbdResponseHeaders() { + + super(RntbdResponseHeader.set, RntbdResponseHeader.map); + + this.LSN = this.get(RntbdResponseHeader.LSN); + this.collectionLazyIndexProgress = this.get(RntbdResponseHeader.CollectionLazyIndexProgress); + this.collectionPartitionIndex = this.get(RntbdResponseHeader.CollectionPartitionIndex); + this.collectionSecurityIdentifier = this.get(RntbdResponseHeader.CollectionSecurityIdentifier); + this.collectionServiceIndex = this.get(RntbdResponseHeader.CollectionServiceIndex); + this.collectionUpdateProgress = this.get(RntbdResponseHeader.CollectionUpdateProgress); + this.continuationToken = this.get(RntbdResponseHeader.ContinuationToken); + this.currentReplicaSetSize = this.get(RntbdResponseHeader.CurrentReplicaSetSize); + this.currentWriteQuorum = this.get(RntbdResponseHeader.CurrentWriteQuorum); + this.databaseAccountId = this.get(RntbdResponseHeader.DatabaseAccountId); + this.disableRntbdChannel = this.get(RntbdResponseHeader.DisableRntbdChannel); + this.eTag = this.get(RntbdResponseHeader.ETag); + this.globalCommittedLSN = this.get(RntbdResponseHeader.GlobalCommittedLSN); + this.hasTentativeWrites = this.get(RntbdResponseHeader.HasTentativeWrites); + this.indexTermsGenerated = this.get(RntbdResponseHeader.IndexTermsGenerated); + this.indexingDirective = this.get(RntbdResponseHeader.IndexingDirective); + this.isRUPerMinuteUsed = this.get(RntbdResponseHeader.IsRUPerMinuteUsed); + this.itemCount = this.get(RntbdResponseHeader.ItemCount); + this.itemLSN = this.get(RntbdResponseHeader.ItemLSN); + this.itemLocalLSN = this.get(RntbdResponseHeader.ItemLocalLSN); + this.lastStateChangeDateTime = this.get(RntbdResponseHeader.LastStateChangeDateTime); + this.localLSN = this.get(RntbdResponseHeader.LocalLSN); + this.logResults = this.get(RntbdResponseHeader.LogResults); + this.numberOfReadRegions = this.get(RntbdResponseHeader.NumberOfReadRegions); + this.offerReplacePending = this.get(RntbdResponseHeader.OfferReplacePending); + this.ownerFullName = this.get(RntbdResponseHeader.OwnerFullName); + this.ownerId = this.get(RntbdResponseHeader.OwnerId); + this.partitionKeyRangeId = this.get(RntbdResponseHeader.PartitionKeyRangeId); + this.payloadPresent = this.get(RntbdResponseHeader.PayloadPresent); + this.queriesPerformed = this.get(RntbdResponseHeader.QueriesPerformed); + this.queryMetrics = this.get(RntbdResponseHeader.QueryMetrics); + this.quorumAckedLSN = this.get(RntbdResponseHeader.QuorumAckedLSN); + this.quorumAckedLocalLSN = this.get(RntbdResponseHeader.QuorumAckedLocalLSN); + this.readsPerformed = this.get(RntbdResponseHeader.ReadsPerformed); + this.requestCharge = this.get(RntbdResponseHeader.RequestCharge); + this.requestValidationFailure = this.get(RntbdResponseHeader.RequestValidationFailure); + this.restoreState = this.get(RntbdResponseHeader.RestoreState); + this.retryAfterMilliseconds = this.get(RntbdResponseHeader.RetryAfterMilliseconds); + this.schemaVersion = this.get(RntbdResponseHeader.SchemaVersion); + this.scriptsExecuted = this.get(RntbdResponseHeader.ScriptsExecuted); + this.serverDateTimeUtc = this.get(RntbdResponseHeader.ServerDateTimeUtc); + this.sessionToken = this.get(RntbdResponseHeader.SessionToken); + this.shareThroughput = this.get(RntbdResponseHeader.ShareThroughput); + this.storageMaxResoureQuota = this.get(RntbdResponseHeader.StorageMaxResoureQuota); + this.storageResourceQuotaUsage = this.get(RntbdResponseHeader.StorageResourceQuotaUsage); + this.subStatus = this.get(RntbdResponseHeader.SubStatus); + this.transportRequestID = this.get(RntbdResponseHeader.TransportRequestID); + this.writesPerformed = this.get(RntbdResponseHeader.WritesPerformed); + this.xpRole = this.get(RntbdResponseHeader.XPRole); + } + + boolean isPayloadPresent() { + return this.payloadPresent.isPresent() && this.payloadPresent.getValue(Byte.class) != 0x00; + } + + List> asList(final RntbdContext context, final UUID activityId) { + + final ImmutableList.Builder> builder = ImmutableList.builderWithExpectedSize(this.computeCount() + 2); + builder.add(new Entry(HttpHeaders.SERVER_VERSION, context.getServerVersion())); + builder.add(new Entry(HttpHeaders.ACTIVITY_ID, activityId.toString())); + + this.collectEntries((token, toEntry) -> { + if (token.isPresent()) { + builder.add(toEntry.apply(token)); + } + }); + + return builder.build(); + } + + public Map asMap(final RntbdContext context, final UUID activityId) { + + final ImmutableMap.Builder builder = ImmutableMap.builderWithExpectedSize(this.computeCount() + 2); + builder.put(new Entry(HttpHeaders.SERVER_VERSION, context.getServerVersion())); + builder.put(new Entry(HttpHeaders.ACTIVITY_ID, activityId.toString())); + + this.collectEntries((token, toEntry) -> { + if (token.isPresent()) { + builder.put(toEntry.apply(token)); + } + }); + + return builder.build(); + } + + static RntbdResponseHeaders decode(final ByteBuf in) { + final RntbdResponseHeaders headers = new RntbdResponseHeaders(); + RntbdTokenStream.decode(in, headers); + return headers; + } + + public static RntbdResponseHeaders fromMap(final Map map, final boolean payloadPresent) { + + final RntbdResponseHeaders headers = new RntbdResponseHeaders(); + headers.payloadPresent.setValue(payloadPresent); + headers.setValues(map); + + return headers; + } + + public void setValues(final Map headers) { + + this.mapValue(this.LSN, BackendHeaders.LSN, Long::parseLong, headers); + this.mapValue(this.collectionLazyIndexProgress, HttpHeaders.COLLECTION_LAZY_INDEXING_PROGRESS, Integer::parseInt, headers); + this.mapValue(this.collectionLazyIndexProgress, BackendHeaders.COLLECTION_PARTITION_INDEX, Integer::parseInt, headers); + this.mapValue(this.collectionSecurityIdentifier, BackendHeaders.COLLECTION_SECURITY_IDENTIFIER, String::toString, headers); + this.mapValue(this.collectionServiceIndex, BackendHeaders.COLLECTION_SERVICE_INDEX, Integer::parseInt, headers); + this.mapValue(this.collectionUpdateProgress, HttpHeaders.COLLECTION_INDEX_TRANSFORMATION_PROGRESS, Integer::parseInt, headers); + this.mapValue(this.continuationToken, HttpHeaders.CONTINUATION, String::toString, headers); + this.mapValue(this.currentReplicaSetSize, BackendHeaders.CURRENT_REPLICA_SET_SIZE, Integer::parseInt, headers); + this.mapValue(this.currentWriteQuorum, BackendHeaders.CURRENT_WRITE_QUORUM, Integer::parseInt, headers); + this.mapValue(this.databaseAccountId, BackendHeaders.DATABASE_ACCOUNT_ID, String::toString, headers); + this.mapValue(this.disableRntbdChannel, HttpHeaders.DISABLE_RNTBD_CHANNEL, Boolean::parseBoolean, headers); + this.mapValue(this.eTag, HttpHeaders.E_TAG, String::toString, headers); + this.mapValue(this.globalCommittedLSN, BackendHeaders.GLOBAL_COMMITTED_LSN, Long::parseLong, headers); + this.mapValue(this.hasTentativeWrites, BackendHeaders.HAS_TENTATIVE_WRITES, Boolean::parseBoolean, headers); + this.mapValue(this.indexingDirective, HttpHeaders.INDEXING_DIRECTIVE, RntbdIndexingDirective::valueOf, headers); + this.mapValue(this.isRUPerMinuteUsed, BackendHeaders.IS_RU_PER_MINUTE_USED, Byte::parseByte, headers); + this.mapValue(this.itemCount, HttpHeaders.ITEM_COUNT, Integer::parseInt, headers); + this.mapValue(this.itemLSN, BackendHeaders.ITEM_LSN, Long::parseLong, headers); + this.mapValue(this.itemLocalLSN, BackendHeaders.ITEM_LOCAL_LSN, Long::parseLong, headers); + this.mapValue(this.lastStateChangeDateTime, HttpHeaders.LAST_STATE_CHANGE_UTC, String::toString, headers); + this.mapValue(this.lastStateChangeDateTime, HttpHeaders.LAST_STATE_CHANGE_UTC, String::toString, headers); + this.mapValue(this.localLSN, BackendHeaders.LOCAL_LSN, Long::parseLong, headers); + this.mapValue(this.logResults, HttpHeaders.LOG_RESULTS, String::toString, headers); + this.mapValue(this.numberOfReadRegions, BackendHeaders.NUMBER_OF_READ_REGIONS, Integer::parseInt, headers); + this.mapValue(this.offerReplacePending, BackendHeaders.OFFER_REPLACE_PENDING, Boolean::parseBoolean, headers); + this.mapValue(this.ownerFullName, HttpHeaders.OWNER_FULL_NAME, String::toString, headers); + this.mapValue(this.ownerId, HttpHeaders.OWNER_ID, String::toString, headers); + this.mapValue(this.partitionKeyRangeId, BackendHeaders.PARTITION_KEY_RANGE_ID, String::toString, headers); + this.mapValue(this.queryMetrics, BackendHeaders.QUERY_METRICS, String::toString, headers); + this.mapValue(this.quorumAckedLSN, BackendHeaders.QUORUM_ACKED_LSN, Long::parseLong, headers); + this.mapValue(this.quorumAckedLocalLSN, BackendHeaders.QUORUM_ACKED_LOCAL_LSN, Long::parseLong, headers); + this.mapValue(this.requestCharge, HttpHeaders.REQUEST_CHARGE, Double::parseDouble, headers); + this.mapValue(this.requestValidationFailure, BackendHeaders.REQUEST_VALIDATION_FAILURE, Byte::parseByte, headers); + this.mapValue(this.restoreState, BackendHeaders.RESTORE_STATE, String::toString, headers); + this.mapValue(this.retryAfterMilliseconds, HttpHeaders.RETRY_AFTER_IN_MILLISECONDS, Integer::parseInt, headers); + this.mapValue(this.schemaVersion, HttpHeaders.SCHEMA_VERSION, String::toString, headers); + this.mapValue(this.serverDateTimeUtc, HttpHeaders.X_DATE, String::toString, headers); + this.mapValue(this.sessionToken, HttpHeaders.SESSION_TOKEN, String::toString, headers); + this.mapValue(this.shareThroughput, BackendHeaders.SHARE_THROUGHPUT, Boolean::parseBoolean, headers); + this.mapValue(this.storageMaxResoureQuota, HttpHeaders.MAX_RESOURCE_QUOTA, String::toString, headers); + this.mapValue(this.storageResourceQuotaUsage, HttpHeaders.CURRENT_RESOURCE_QUOTA_USAGE, String::toString, headers); + this.mapValue(this.subStatus, BackendHeaders.SUB_STATUS, Integer::parseInt, headers); + this.mapValue(this.transportRequestID, HttpHeaders.TRANSPORT_REQUEST_ID, Integer::parseInt, headers); + this.mapValue(this.xpRole, BackendHeaders.XP_ROLE, Integer::parseInt, headers); + } + + @Override + public String toString() { + final ObjectWriter writer = RntbdObjectMapper.writer(); + try { + return writer.writeValueAsString(this); + } catch (final JsonProcessingException error) { + throw new CorruptedFrameException(error); + } + } + + private void collectEntries(final BiConsumer>> collector) { + + collector.accept(this.LSN, token -> + toLongEntry(BackendHeaders.LSN, token) + ); + + collector.accept(this.collectionLazyIndexProgress, token -> + toIntegerEntry(HttpHeaders.COLLECTION_LAZY_INDEXING_PROGRESS, token) + ); + + collector.accept(this.collectionPartitionIndex, token -> + toIntegerEntry(BackendHeaders.COLLECTION_PARTITION_INDEX, token) + ); + + collector.accept(this.collectionSecurityIdentifier, token -> + toStringEntry(BackendHeaders.COLLECTION_SECURITY_IDENTIFIER, token) + ); + + collector.accept(this.collectionServiceIndex, token -> + toIntegerEntry(BackendHeaders.COLLECTION_SERVICE_INDEX, token) + ); + + collector.accept(this.collectionUpdateProgress, token -> + toIntegerEntry(HttpHeaders.COLLECTION_INDEX_TRANSFORMATION_PROGRESS, token) + ); + + collector.accept(this.continuationToken, token -> + toStringEntry(HttpHeaders.CONTINUATION, token) + ); + + collector.accept(this.currentReplicaSetSize, token -> + toIntegerEntry(BackendHeaders.CURRENT_REPLICA_SET_SIZE, token) + ); + + collector.accept(this.currentWriteQuorum, token -> + toIntegerEntry(BackendHeaders.CURRENT_WRITE_QUORUM, token) + ); + + collector.accept(this.databaseAccountId, token -> + toStringEntry(BackendHeaders.DATABASE_ACCOUNT_ID, token) + ); + + collector.accept(this.disableRntbdChannel, token -> + toBooleanEntry(HttpHeaders.DISABLE_RNTBD_CHANNEL, token) + ); + + collector.accept(this.eTag, token -> + toStringEntry(HttpHeaders.E_TAG, token) + ); + + collector.accept(this.globalCommittedLSN, token -> + toLongEntry(BackendHeaders.GLOBAL_COMMITTED_LSN, token) + ); + + collector.accept(this.hasTentativeWrites, token -> + toBooleanEntry(BackendHeaders.HAS_TENTATIVE_WRITES, token) + ); + + collector.accept(this.indexingDirective, token -> + new Entry(HttpHeaders.INDEXING_DIRECTIVE, RntbdIndexingDirective.fromId(token.getValue(Byte.class)).name()) + ); + + collector.accept(this.isRUPerMinuteUsed, token -> + toByteEntry(BackendHeaders.IS_RU_PER_MINUTE_USED, token) + ); + + collector.accept(this.itemCount, token -> + toIntegerEntry(HttpHeaders.ITEM_COUNT, token) + ); + + collector.accept(this.itemLSN, token -> + toLongEntry(BackendHeaders.ITEM_LSN, token) + ); + + collector.accept(this.itemLocalLSN, token -> + toLongEntry(BackendHeaders.ITEM_LOCAL_LSN, token) + ); + + collector.accept(this.lastStateChangeDateTime, token -> + toStringEntry(HttpHeaders.LAST_STATE_CHANGE_UTC, token) + ); + + collector.accept(this.localLSN, token -> + toLongEntry(BackendHeaders.LOCAL_LSN, token) + ); + + collector.accept(this.logResults, token -> + toStringEntry(HttpHeaders.LOG_RESULTS, token) + ); + + collector.accept(this.numberOfReadRegions, token -> + toIntegerEntry(BackendHeaders.NUMBER_OF_READ_REGIONS, token) + ); + + collector.accept(this.offerReplacePending, token -> + toBooleanEntry(BackendHeaders.OFFER_REPLACE_PENDING, token) + ); + + collector.accept(this.ownerFullName, token -> + toStringEntry(HttpHeaders.OWNER_FULL_NAME, token) + ); + + collector.accept(this.ownerId, token -> + toStringEntry(HttpHeaders.OWNER_ID, token) + ); + + collector.accept(this.partitionKeyRangeId, token -> + toStringEntry(BackendHeaders.PARTITION_KEY_RANGE_ID, token) + ); + + collector.accept(this.queryMetrics, token -> + toStringEntry(BackendHeaders.QUERY_METRICS, token) + ); + + collector.accept(this.quorumAckedLSN, token -> + toLongEntry(BackendHeaders.QUORUM_ACKED_LSN, token) + ); + + collector.accept(this.quorumAckedLocalLSN, token -> + toLongEntry(BackendHeaders.QUORUM_ACKED_LOCAL_LSN, token) + ); + + collector.accept(this.requestCharge, token -> + toCurrencyEntry(HttpHeaders.REQUEST_CHARGE, token) + ); + + collector.accept(this.requestValidationFailure, token -> + toByteEntry(BackendHeaders.REQUEST_VALIDATION_FAILURE, token) + ); + + collector.accept(this.restoreState, token -> + toStringEntry(BackendHeaders.RESTORE_STATE, token) + ); + + collector.accept(this.retryAfterMilliseconds, token -> + toIntegerEntry(HttpHeaders.RETRY_AFTER_IN_MILLISECONDS, token) + ); + + collector.accept(this.schemaVersion, token -> + toStringEntry(HttpHeaders.SCHEMA_VERSION, token) + ); + + collector.accept(this.serverDateTimeUtc, token -> + toStringEntry(HttpHeaders.X_DATE, token) + ); + + collector.accept(this.sessionToken, token -> + this.toSessionTokenEntry(HttpHeaders.SESSION_TOKEN, token) + ); + + collector.accept(this.shareThroughput, token -> + toBooleanEntry(BackendHeaders.SHARE_THROUGHPUT, token) + ); + + collector.accept(this.storageMaxResoureQuota, token -> + toStringEntry(HttpHeaders.MAX_RESOURCE_QUOTA, token) + ); + + collector.accept(this.storageResourceQuotaUsage, token -> + toStringEntry(HttpHeaders.CURRENT_RESOURCE_QUOTA_USAGE, token) + ); + + collector.accept(this.subStatus, token -> + toIntegerEntry(BackendHeaders.SUB_STATUS, token) + ); + + collector.accept(this.transportRequestID, token -> + toIntegerEntry(HttpHeaders.TRANSPORT_REQUEST_ID, token) + ); + + collector.accept(this.xpRole, token -> + toIntegerEntry(BackendHeaders.XP_ROLE, token) + ); + } + + private void mapValue(final RntbdToken token, final String name, final Function parse, final Map headers) { + + final String value = headers.get(name); + + if (value != null) { + token.setValue(parse.apply(value)); + } + } + + private static Map.Entry toBooleanEntry(final String name, final RntbdToken token) { + return new Entry(name, String.valueOf(token.getValue(Byte.class) != 0)); + } + + private static Map.Entry toByteEntry(final String name, final RntbdToken token) { + return new Entry(name, Byte.toString(token.getValue(Byte.class))); + } + + private static Map.Entry toCurrencyEntry(final String name, final RntbdToken token) { + final BigDecimal value = new BigDecimal(Math.round(token.getValue(Double.class) * 100D)).scaleByPowerOfTen(-2); + return new Entry(name, value.toString()); + } + + private static Map.Entry toIntegerEntry(final String name, final RntbdToken token) { + return new Entry(name, Long.toString(token.getValue(Long.class))); + } + + private static Map.Entry toLongEntry(final String name, final RntbdToken token) { + return new Entry(name, Long.toString(token.getValue(Long.class))); + } + + private Map.Entry toSessionTokenEntry(final String name, final RntbdToken token) { + return new Entry(name, this.partitionKeyRangeId.getValue(String.class) + ":" + this.sessionToken.getValue(String.class)); + } + + private static Map.Entry toStringEntry(final String name, final RntbdToken token) { + return new Entry(name, token.getValue(String.class)); + } + + private static final class Entry extends AbstractMap.SimpleImmutableEntry { + Entry(final String name, final String value) { + super(name, value); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdResponseStatus.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdResponseStatus.java new file mode 100644 index 0000000000000..c5c3ad68f36de --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdResponseStatus.java @@ -0,0 +1,119 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonPropertyOrder; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectWriter; +import io.netty.buffer.ByteBuf; +import io.netty.handler.codec.CorruptedFrameException; +import io.netty.handler.codec.http.HttpResponseStatus; + +import java.util.UUID; + +@JsonPropertyOrder({ "length", "status", "activityId" }) +final class RntbdResponseStatus { + + // region Fields + + static final int LENGTH = Integer.BYTES // length + + Integer.BYTES // status + + 2 * Long.BYTES; // activityId + + @JsonProperty("activityId") + private final UUID activityId; + + @JsonProperty("length") + private final int length; + + private final HttpResponseStatus status; + + // endregion + + RntbdResponseStatus(final int length, final HttpResponseStatus status, final UUID activityId) { + this.length = length; + this.status = status; + this.activityId = activityId; + } + + public UUID getActivityId() { + return this.activityId; + } + + int getHeadersLength() { + return this.length - LENGTH; + } + + public int getLength() { + return this.length; + } + + public HttpResponseStatus getStatus() { + return this.status; + } + + @JsonProperty("status") + public int getStatusCode() { + return this.status.code(); + } + + static RntbdResponseStatus decode(final ByteBuf in) { + + final long length = in.readUnsignedIntLE(); + + if (!(LENGTH <= length && length <= Integer.MAX_VALUE)) { + final String reason = String.format("frame length: %d", length); + throw new CorruptedFrameException(reason); + } + + final int code = in.readIntLE(); + final HttpResponseStatus status = HttpResponseStatus.valueOf(code); + + if (status == null) { + final String reason = String.format("status code: %d", code); + throw new CorruptedFrameException(reason); + } + + final UUID activityId = RntbdUUID.decode(in); + return new RntbdResponseStatus((int)length, status, activityId); + } + + void encode(final ByteBuf out) { + out.writeIntLE(this.getLength()); + out.writeIntLE(this.getStatusCode()); + RntbdUUID.encode(this.getActivityId(), out); + } + + @Override + public String toString() { + final ObjectWriter writer = RntbdObjectMapper.writer(); + try { + return writer.writeValueAsString(this); + } catch (final JsonProcessingException error) { + throw new CorruptedFrameException(error); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdServiceEndpoint.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdServiceEndpoint.java new file mode 100644 index 0000000000000..c9a131243f08c --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdServiceEndpoint.java @@ -0,0 +1,354 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.GoneException; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.directconnectivity.RntbdTransportClient.Options; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import com.fasterxml.jackson.databind.ser.std.StdSerializer; +import com.google.common.collect.ImmutableMap; +import io.netty.bootstrap.Bootstrap; +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelOption; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.handler.logging.LogLevel; +import io.netty.handler.ssl.SslContext; +import io.netty.util.concurrent.DefaultThreadFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.SocketAddress; +import java.net.URI; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Stream; + +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +@JsonSerialize(using = RntbdServiceEndpoint.JsonSerializer.class) +public final class RntbdServiceEndpoint implements RntbdEndpoint { + + private static final AtomicLong instanceCount = new AtomicLong(); + private static final Logger logger = LoggerFactory.getLogger(RntbdServiceEndpoint.class); + private static final String namePrefix = RntbdServiceEndpoint.class.getSimpleName() + '-'; + + private final RntbdClientChannelPool channelPool; + private final AtomicBoolean closed; + private final RntbdMetrics metrics; + private final String name; + private final SocketAddress remoteAddress; + private final RntbdRequestTimer requestTimer; + + // region Constructors + + private RntbdServiceEndpoint( + final Config config, final NioEventLoopGroup group, final RntbdRequestTimer timer, final URI physicalAddress + ) { + + final Bootstrap bootstrap = new Bootstrap() + .channel(NioSocketChannel.class) + .group(group) + .option(ChannelOption.AUTO_READ, true) + .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.getConnectionTimeout()) + .option(ChannelOption.SO_KEEPALIVE, true) + .remoteAddress(physicalAddress.getHost(), physicalAddress.getPort()); + + this.name = RntbdServiceEndpoint.namePrefix + instanceCount.incrementAndGet(); + this.channelPool = new RntbdClientChannelPool(bootstrap, config); + this.remoteAddress = bootstrap.config().remoteAddress(); + this.metrics = new RntbdMetrics(this.name); + this.closed = new AtomicBoolean(); + this.requestTimer = timer; + } + + // endregion + + // region Accessors + + @Override + public String getName() { + return this.name; + } + + // endregion + + // region Methods + + @Override + public void close() { + if (this.closed.compareAndSet(false, true)) { + this.channelPool.close(); + this.metrics.close(); + } + } + + public RntbdRequestRecord request(final RntbdRequestArgs args) { + + this.throwIfClosed(); + + if (logger.isDebugEnabled()) { + args.traceOperation(logger, null, "request"); + logger.debug("\n {}\n {}\n REQUEST", this, args); + } + + final RntbdRequestRecord requestRecord = this.write(args); + this.metrics.incrementRequestCount(); + + requestRecord.whenComplete((response, error) -> { + + args.traceOperation(logger, null, "requestComplete", response, error); + this.metrics.incrementResponseCount(); + + if (error != null) { + this.metrics.incrementErrorResponseCount(); + } + + if (logger.isDebugEnabled()) { + if (error == null) { + final int status = response.getStatus(); + logger.debug("\n [{}]\n {}\n request succeeded with response status: {}", this, args, status); + } else { + logger.debug("\n [{}]\n {}\n request failed due to ", this, args, error); + } + } + }); + + return requestRecord; + } + + @Override + public String toString() { + return RntbdObjectMapper.toJson(this); + } + + // endregion + + // region Privates + + private void releaseToPool(final Channel channel) { + + logger.debug("\n [{}]\n {}\n RELEASE", this, channel); + + this.channelPool.release(channel).addListener(future -> { + if (logger.isDebugEnabled()) { + if (future.isSuccess()) { + logger.debug("\n [{}]\n {}\n release succeeded", this, channel); + } else { + logger.debug("\n [{}]\n {}\n release failed due to {}", this, channel, future.cause()); + } + } + }); + } + + private void throwIfClosed() { + checkState(!this.closed.get(), "%s is closed", this); + } + + private RntbdRequestRecord write(final RntbdRequestArgs requestArgs) { + + final RntbdRequestRecord requestRecord = new RntbdRequestRecord(requestArgs, this.requestTimer); + logger.debug("\n [{}]\n {}\n WRITE", this, requestArgs); + + this.channelPool.acquire().addListener(connected -> { + + if (connected.isSuccess()) { + + requestArgs.traceOperation(logger, null, "write"); + final Channel channel = (Channel)connected.get(); + this.releaseToPool(channel); + + channel.write(requestRecord).addListener((ChannelFuture future) -> { + requestArgs.traceOperation(logger, null, "writeComplete", channel); + if (!future.isSuccess()) { + this.metrics.incrementErrorResponseCount(); + } + }); + + return; + } + + final UUID activityId = requestArgs.getActivityId(); + final Throwable cause = connected.cause(); + + if (connected.isCancelled()) { + + logger.debug("\n [{}]\n {}\n write cancelled: {}", this, requestArgs, cause); + requestRecord.cancel(true); + + } else { + + logger.debug("\n [{}]\n {}\n write failed due to {} ", this, requestArgs, cause); + final String reason = cause.getMessage(); + + final GoneException goneException = new GoneException( + String.format("failed to establish connection to %s: %s", this.remoteAddress, reason), + cause instanceof Exception ? (Exception)cause : new IOException(reason, cause), + ImmutableMap.of(HttpConstants.HttpHeaders.ACTIVITY_ID, activityId.toString()), + requestArgs.getReplicaPath() + ); + + BridgeInternal.setRequestHeaders(goneException, requestArgs.getServiceRequest().getHeaders()); + requestRecord.completeExceptionally(goneException); + } + }); + + return requestRecord; + } + + // endregion + + // region Types + + static final class JsonSerializer extends StdSerializer { + + public JsonSerializer() { + this(null); + } + + public JsonSerializer(Class type) { + super(type); + } + + @Override + public void serialize(RntbdServiceEndpoint value, JsonGenerator generator, SerializerProvider provider) + throws IOException { + + generator.writeStartObject(); + generator.writeStringField(value.name, value.remoteAddress.toString()); + generator.writeObjectField("channelPool", value.channelPool); + generator.writeEndObject(); + } + } + + public static final class Provider implements RntbdEndpoint.Provider { + + private static final Logger logger = LoggerFactory.getLogger(Provider.class); + + private final AtomicBoolean closed = new AtomicBoolean(); + private final Config config; + private final ConcurrentHashMap endpoints = new ConcurrentHashMap<>(); + private final NioEventLoopGroup eventLoopGroup; + private final RntbdRequestTimer requestTimer; + + public Provider(final Options options, final SslContext sslContext) { + + checkNotNull(options, "options"); + checkNotNull(sslContext, "sslContext"); + + final DefaultThreadFactory threadFactory = new DefaultThreadFactory("CosmosEventLoop", true); + final int threadCount = Runtime.getRuntime().availableProcessors(); + final LogLevel wireLogLevel; + + if (logger.isTraceEnabled()) { + wireLogLevel = LogLevel.TRACE; + } else if (logger.isDebugEnabled()) { + wireLogLevel = LogLevel.DEBUG; + } else { + wireLogLevel = null; + } + + this.config = new Config(options, sslContext, wireLogLevel); + this.requestTimer = new RntbdRequestTimer(config.getRequestTimeout()); + this.eventLoopGroup = new NioEventLoopGroup(threadCount, threadFactory); + } + + @Override + public void close() throws RuntimeException { + + if (this.closed.compareAndSet(false, true)) { + + this.requestTimer.close(); + + for (final RntbdEndpoint endpoint : this.endpoints.values()) { + endpoint.close(); + } + + this.eventLoopGroup.shutdownGracefully().addListener(future -> { + if (future.isSuccess()) { + logger.debug("\n [{}]\n closed endpoints", this); + return; + } + logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause()); + }); + return; + } + + logger.debug("\n [{}]\n already closed", this); + } + + @Override + public Config config() { + return this.config; + } + + @Override + public int count() { + return this.endpoints.size(); + } + + @Override + public RntbdEndpoint get(URI physicalAddress) { + return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> + new RntbdServiceEndpoint(config, eventLoopGroup, requestTimer, physicalAddress) + ); + } + + @Override + public Stream list() { + return this.endpoints.values().stream(); + } + + private void deleteEndpoint(final URI physicalAddress) { + + // TODO: DANOBLE: Utilize this method of tearing down unhealthy endpoints + // Specifically, ensure that this method is called when a Read/WriteTimeoutException occurs or a health + // check request fails. This perhaps likely requires a change to RntbdClientChannelPool. + // Links: + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/331552 + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/331593 + + checkNotNull(physicalAddress, "physicalAddress: %s", physicalAddress); + + final String authority = physicalAddress.getAuthority(); + final RntbdEndpoint endpoint = this.endpoints.remove(authority); + + if (endpoint != null) { + endpoint.close(); + } + } + } + + // endregion +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdToken.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdToken.java new file mode 100644 index 0000000000000..513bc27b67924 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdToken.java @@ -0,0 +1,234 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonPropertyOrder; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.fasterxml.jackson.databind.ser.PropertyWriter; +import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter; +import io.netty.buffer.ByteBuf; + +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdHeader; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; + +@JsonPropertyOrder({ "id", "name", "type", "present", "required", "value" }) +final class RntbdToken { + + // region Fields + + private static final int HEADER_LENGTH = Short.BYTES + Byte.BYTES; + + static { + RntbdObjectMapper.registerPropertyFilter(RntbdToken.class, RntbdToken.PropertyFilter.class); + } + + private final RntbdHeader header; + private int length; + private Object value; + + // endregion + + // region Constructors + + private RntbdToken(final RntbdHeader header) { + checkNotNull(header, "header"); + this.header = header; + this.value = null; + this.length = Integer.MIN_VALUE; + } + + // endregion + + // region Accessors + + @JsonProperty + public short getId() { + return this.header.id(); + } + + @JsonProperty + public String getName() { + return this.header.name(); + } + + @JsonProperty + public RntbdTokenType getTokenType() { + return this.header.type(); + } + + @JsonProperty + public Object getValue() { + + if (this.value == null) { + return this.header.type().codec().defaultValue(); + } + + if (this.value instanceof ByteBuf) { + final ByteBuf buffer = (ByteBuf)this.value; + this.value = this.header.type().codec().read(buffer); + buffer.release(); + } else { + this.value = this.header.type().codec().convert(this.value); + } + + return this.value; + } + + public T getValue(final Class cls) { + return cls.cast(this.getValue()); + } + + @JsonProperty + public void setValue(final Object value) { + this.ensureValid(value); + this.length = Integer.MIN_VALUE; + this.value = value; + } + + @JsonIgnore + public final Class getValueType() { + return this.header.type().codec().valueType(); + } + + @JsonProperty + public boolean isPresent() { + return this.value != null; + } + + @JsonProperty + public boolean isRequired() { + return this.header.isRequired(); + } + + // endregion + + // region Methods + + public int computeLength() { + + if (!this.isPresent()) { + return 0; + } + + if (this.value instanceof ByteBuf) { + final ByteBuf buffer = (ByteBuf)this.value; + assert buffer.readerIndex() == 0; + return HEADER_LENGTH + buffer.readableBytes(); + } + + if (this.length == Integer.MIN_VALUE) { + this.length = HEADER_LENGTH + this.header.type().codec().computeLength(this.value); + } + + return this.length; + } + + public static RntbdToken create(final RntbdHeader header) { + return new RntbdToken(header); + } + + public void decode(final ByteBuf in) { + + checkNotNull(in, "in"); + + if (this.value instanceof ByteBuf) { + ((ByteBuf)this.value).release(); + } + + this.value = this.header.type().codec().readSlice(in).retain(); // No data transfer until the first call to RntbdToken.getValue + } + + public void encode(final ByteBuf out) { + + checkNotNull(out, "out"); + + if (!this.isPresent()) { + if (this.isRequired()) { + final String message = String.format("Missing value for required header: %s", this); + throw new IllegalStateException(message); + } + return; + } + + out.writeShortLE(this.getId()); + out.writeByte(this.getTokenType().id()); + + if (this.value instanceof ByteBuf) { + out.writeBytes((ByteBuf)this.value); + } else { + this.ensureValid(this.value); + this.header.type().codec().write(this.value, out); + } + } + + public void releaseBuffer() { + if (this.value instanceof ByteBuf) { + final ByteBuf buffer = (ByteBuf)this.value; + buffer.release(); + } + } + + @Override + public String toString() { + return RntbdObjectMapper.toJson(this); + } + + // endregion + + // region Privates + + private void ensureValid(final Object value) { + checkNotNull(value, "value"); + checkArgument(this.header.type().codec().isValid(value), "value: %s", value.getClass()); + } + + // endregion + + // region Types + + static class PropertyFilter extends SimpleBeanPropertyFilter { + + @Override + public void serializeAsField(final Object object, final JsonGenerator generator, final SerializerProvider provider, final PropertyWriter writer) throws Exception { + + if (generator.canOmitFields()) { + + final Object value = writer.getMember().getValue(object); + + if (value instanceof RntbdToken && !((RntbdToken)value).isPresent()) { + return; + } + } + + writer.serializeAsField(object, generator, provider); + } + } + + // endregion +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdTokenStream.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdTokenStream.java new file mode 100644 index 0000000000000..7ddd442819527 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdTokenStream.java @@ -0,0 +1,150 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Maps; +import io.netty.buffer.ByteBuf; + +import java.util.stream.Collector; + +import static com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdConstants.RntbdHeader; +import static com.google.common.base.Preconditions.checkNotNull; + +@SuppressWarnings("UnstableApiUsage") +abstract class RntbdTokenStream & RntbdHeader> { + + final ImmutableMap headers; + final ImmutableMap tokens; + + RntbdTokenStream(final ImmutableSet headers, final ImmutableMap ids) { + + checkNotNull(headers, "headers"); + checkNotNull(ids, "ids"); + + final Collector> collector = Maps.toImmutableEnumMap(h -> h, RntbdToken::create); + this.tokens = headers.stream().collect(collector); + this.headers = ids; + } + + final int computeCount() { + + int count = 0; + + for (final RntbdToken token : this.tokens.values()) { + if (token.isPresent()) { + ++count; + } + } + + return count; + } + + final int computeLength() { + + int total = 0; + + for (final RntbdToken token : this.tokens.values()) { + total += token.computeLength(); + } + + return total; + } + + static > T decode(final ByteBuf in, final T stream) { + + while (in.readableBytes() > 0) { + + final short id = in.readShortLE(); + final RntbdTokenType type = RntbdTokenType.fromId(in.readByte()); + + RntbdToken token = stream.tokens.get(stream.headers.get(id)); + + if (token == null) { + token = RntbdToken.create(new UndefinedHeader(id, type)); + } + + token.decode(in); + } + + for (final RntbdToken token : stream.tokens.values()) { + if (!token.isPresent() && token.isRequired()) { + final String reason = String.format("Required token not found on RNTBD stream: type: %s, identifier: %s", + token.getTokenType(), token.getId()); + throw new IllegalStateException(reason); + } + } + + return stream; + } + + final void encode(final ByteBuf out) { + for (final RntbdToken token : this.tokens.values()) { + token.encode(out); + } + } + + final RntbdToken get(final T header) { + return this.tokens.get(header); + } + + final void releaseBuffers() { + for (final RntbdToken token : this.tokens.values()) { + token.releaseBuffer(); + } + } + + private static final class UndefinedHeader implements RntbdHeader { + + private final short id; + private final RntbdTokenType type; + + UndefinedHeader(final short id, final RntbdTokenType type) { + this.id = id; + this.type = type; + } + + @Override + public boolean isRequired() { + return false; + } + + @Override + public short id() { + return this.id; + } + + @Override + public String name() { + return "Undefined"; + } + + @Override + public RntbdTokenType type() { + return this.type; + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdTokenType.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdTokenType.java new file mode 100644 index 0000000000000..c92562376cb10 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdTokenType.java @@ -0,0 +1,896 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import com.google.common.base.Strings; +import com.google.common.base.Utf8; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; +import io.netty.handler.codec.DecoderException; + +import java.nio.charset.StandardCharsets; +import java.util.UUID; + +enum RntbdTokenType { + + // All values are encoded as little endian byte sequences except for Guid + // Guid values are serialized in Microsoft GUID byte order + // Reference: GUID structure and System.Guid type + + Byte((byte)0x00, RntbdByte.codec), // byte => byte + UShort((byte)0x01, RntbdUnsignedShort.codec), // short => int + ULong((byte)0x02, RntbdUnsignedInteger.codec), // int => long + Long((byte)0x03, RntbdInteger.codec), // int => int + ULongLong((byte)0x04, RntbdLong.codec), // long => long + LongLong((byte)0x05, RntbdLong.codec), // long => long + + Guid((byte)0x06, RntbdGuid.codec), // byte[16] => UUID + SmallString((byte)0x07, RntbdShortString.codec), // (byte, byte[0..255]) => String + String((byte)0x08, RntbdString.codec), // (short, byte[0..64KiB]) => String + ULongString((byte)0x09, RntbdLongString.codec), // (int, byte[0..2GiB-1]) => String + + SmallBytes((byte)0x0A, RntbdShortBytes.codec), // (byte, byte[0..255]) => byte[] + Bytes((byte)0x0B, RntbdBytes.codec), // (short, byte[0..64KiB]) => byte[] + ULongBytes((byte)0x0C, RntbdLongBytes.codec), // (int, byte[0..2GiB-1]) => byte[] + + Float((byte)0x0D, RntbdFloat.codec), // float => float + Double((byte)0x0E, RntbdDouble.codec), // double => double + + Invalid((byte)0xFF, RntbdNone.codec); // no data + + // region Implementation + + private Codec codec; + private byte id; + + RntbdTokenType(final byte id, final Codec codec) { + this.codec = codec; + this.id = id; + } + + public Codec codec() { + return this.codec; + } + + public static RntbdTokenType fromId(final byte value) { + + for (final RntbdTokenType tokenType : RntbdTokenType.values()) { + if (value == tokenType.id) { + return tokenType; + } + } + return Invalid; + } + + public byte id() { + return this.id; + } + + // endregion + + // region Types + + public interface Codec { + + int computeLength(Object value); + + Object convert(Object value); + + Object defaultValue(); + + boolean isValid(Object value); + + Object read(ByteBuf in); + + ByteBuf readSlice(ByteBuf in); + + Class valueType(); + + void write(Object value, ByteBuf out); + } + + private static class RntbdByte implements Codec { + + public static final Codec codec = new RntbdByte(); + + private RntbdByte() { + } + + @Override + public final int computeLength(final Object value) { + return java.lang.Byte.BYTES; + } + + @Override + public final Object convert(final Object value) { + + assert this.isValid(value); + + if (value instanceof Number) { + return ((Number)value).byteValue(); + } + return (boolean)value ? (byte)0x01 : (byte)0x00; + } + + @Override + public final Object defaultValue() { + return (byte)0; + } + + @Override + public final boolean isValid(final Object value) { + return value instanceof Number || value instanceof Boolean; + } + + @Override + public final Object read(final ByteBuf in) { + return in.readByte(); + } + + @Override + public final ByteBuf readSlice(final ByteBuf in) { + return in.readSlice(java.lang.Byte.BYTES); + } + + @Override + public final Class valueType() { + return java.lang.Byte.class; + } + + @Override + public final void write(final Object value, final ByteBuf out) { + assert this.isValid(value); + out.writeByte(value instanceof java.lang.Byte ? (byte)value : ((boolean)value ? 0x01 : 0x00)); + } + } + + private static class RntbdBytes implements Codec { + + public static final Codec codec = new RntbdBytes(); + private static final byte[] defaultValue = {}; + + private RntbdBytes() { + } + + @Override + public int computeLength(final Object value) { + assert this.isValid(value); + return Short.BYTES + ((byte[])value).length; + } + + @Override + public final Object convert(final Object value) { + assert this.isValid(value); + return value; + } + + @Override + public final Object defaultValue() { + return defaultValue; + } + + @Override + public boolean isValid(final Object value) { + return value instanceof byte[] && ((byte[])value).length < 0xFFFF; + } + + @Override + public Object read(final ByteBuf in) { + final int length = in.readUnsignedShortLE(); + return in.readBytes(length); + } + + @Override + public ByteBuf readSlice(final ByteBuf in) { + final int length = in.getUnsignedShortLE(in.readerIndex()); + return in.readSlice(Short.BYTES + length); + } + + @Override + public Class valueType() { + return java.lang.Byte[].class; + } + + @Override + public void write(final Object value, final ByteBuf out) { + + assert this.isValid(value); + + final byte[] bytes = (byte[])value; + final int length = bytes.length; + + if (length > 0xFFFF) { + throw new IllegalStateException(); + } + + out.writeShortLE((short)length); + out.writeBytes(bytes); + } + } + + private static class RntbdDouble implements Codec { + + public static final Codec codec = new RntbdDouble(); + + private RntbdDouble() { + } + + @Override + public final int computeLength(final Object value) { + assert this.isValid(value); + return java.lang.Double.BYTES; + } + + @Override + public final Object convert(final Object value) { + assert this.isValid(value); + return ((Number)value).doubleValue(); + } + + @Override + public final Object defaultValue() { + return 0.0D; + } + + @Override + public final boolean isValid(final Object value) { + return value instanceof Number; + } + + @Override + public final Object read(final ByteBuf in) { + return in.readDoubleLE(); + } + + @Override + public final ByteBuf readSlice(final ByteBuf in) { + return in.readSlice(java.lang.Double.BYTES); + } + + @Override + public Class valueType() { + return java.lang.Double.class; + } + + @Override + public final void write(final Object value, final ByteBuf out) { + assert this.isValid(value); + out.writeDoubleLE(((Number)value).doubleValue()); + } + } + + private static class RntbdFloat implements Codec { + + public static final Codec codec = new RntbdFloat(); + + private RntbdFloat() { + } + + @Override + public final int computeLength(final Object value) { + assert this.isValid(value); + return java.lang.Float.BYTES; + } + + @Override + public final Object convert(final Object value) { + assert this.isValid(value); + return ((Number)value).floatValue(); + } + + @Override + public final Object defaultValue() { + return 0.0F; + } + + @Override + public final boolean isValid(final Object value) { + return value instanceof Number; + } + + @Override + public final Object read(final ByteBuf in) { + return in.readFloatLE(); + } + + @Override + public final ByteBuf readSlice(final ByteBuf in) { + return in.readSlice(java.lang.Float.BYTES); + } + + @Override + public Class valueType() { + return java.lang.Float.class; + } + + @Override + public final void write(final Object value, final ByteBuf out) { + assert this.isValid(value); + out.writeFloatLE(((Number)value).floatValue()); + } + } + + private static class RntbdGuid implements Codec { + + public static final Codec codec = new RntbdGuid(); + + private RntbdGuid() { + } + + @Override + public final int computeLength(final Object value) { + assert this.isValid(value); + return 2 * java.lang.Long.BYTES; + } + + @Override + public final Object convert(final Object value) { + assert this.isValid(value); + return value; + } + + @Override + public final Object defaultValue() { + return RntbdUUID.EMPTY; + } + + @Override + public final boolean isValid(final Object value) { + return value instanceof UUID; + } + + @Override + public final Object read(final ByteBuf in) { + return RntbdUUID.decode(in); + } + + @Override + public final ByteBuf readSlice(final ByteBuf in) { + return in.readSlice(2 * java.lang.Long.BYTES); + } + + @Override + public Class valueType() { + return UUID.class; + } + + @Override + public final void write(final Object value, final ByteBuf out) { + assert this.isValid(value); + RntbdUUID.encode((UUID)value, out); + } + } + + private static class RntbdInteger implements Codec { + + public static final Codec codec = new RntbdInteger(); + + private RntbdInteger() { + } + + @Override + public final int computeLength(final Object value) { + assert this.isValid(value); + return Integer.BYTES; + } + + @Override + public final Object convert(final Object value) { + assert this.isValid(value); + return ((Number)value).intValue(); + } + + @Override + public final Object defaultValue() { + return 0; + } + + @Override + public final boolean isValid(final Object value) { + return value instanceof Number; + } + + @Override + public final Object read(final ByteBuf in) { + return in.readIntLE(); + } + + @Override + public final ByteBuf readSlice(final ByteBuf in) { + return in.readSlice(Integer.BYTES); + } + + @Override + public Class valueType() { + return Integer.class; + } + + @Override + public final void write(final Object value, final ByteBuf out) { + assert this.isValid(value); + out.writeIntLE(((Number)value).intValue()); + } + } + + private static class RntbdLong implements Codec { + + public static final Codec codec = new RntbdLong(); + + private RntbdLong() { + } + + @Override + public final int computeLength(final Object value) { + assert this.isValid(value); + return java.lang.Long.BYTES; + } + + @Override + public final Object convert(final Object value) { + assert this.isValid(value); + return ((Number)value).longValue(); + } + + @Override + public final Object defaultValue() { + return 0L; + } + + @Override + public final boolean isValid(final Object value) { + return value instanceof Number; + } + + @Override + public final Object read(final ByteBuf in) { + return in.readLongLE(); + } + + @Override + public final ByteBuf readSlice(final ByteBuf in) { + return in.readSlice(java.lang.Long.BYTES); + } + + @Override + public Class valueType() { + return java.lang.Long.class; + } + + @Override + public final void write(final Object value, final ByteBuf out) { + assert this.isValid(value); + out.writeLongLE(((Number)value).longValue()); + } + } + + private static class RntbdLongBytes extends RntbdBytes { + + public static final Codec codec = new RntbdLongBytes(); + + private RntbdLongBytes() { + } + + @Override + public final int computeLength(final Object value) { + assert this.isValid(value); + return Integer.BYTES + ((byte[])value).length; + } + + @Override + public final boolean isValid(final Object value) { + return value instanceof byte[] && ((byte[])value).length < 0xFFFF; + } + + @Override + public final Object read(final ByteBuf in) { + + final long length = in.readUnsignedIntLE(); + + if (length > Integer.MAX_VALUE) { + throw new IllegalStateException(); + } + return in.readBytes((int)length); + } + + @Override + public final ByteBuf readSlice(final ByteBuf in) { + + final long length = in.getUnsignedIntLE(in.readerIndex()); + + if (length > Integer.MAX_VALUE) { + throw new IllegalStateException(); + } + return in.readSlice(Integer.BYTES + (int)length); + } + + @Override + public final void write(final Object value, final ByteBuf out) { + + assert this.isValid(value); + + final byte[] bytes = (byte[])value; + out.writeIntLE(bytes.length); + out.writeBytes(bytes); + } + } + + private static class RntbdLongString extends RntbdString { + + public static final Codec codec = new RntbdLongString(); + + private RntbdLongString() { + } + + @Override + public final int computeLength(final Object value) { + return Integer.BYTES + this.computeLength(value, Integer.MAX_VALUE); + } + + @Override + public final Object read(final ByteBuf in) { + + final long length = in.readUnsignedIntLE(); + + if (length > Integer.MAX_VALUE) { + throw new IllegalStateException(); + } + + return in.readCharSequence((int)length, StandardCharsets.UTF_8).toString(); + } + + @Override + public final void write(final Object value, final ByteBuf out) { + + final int length = this.computeLength(value, Integer.MAX_VALUE); + out.writeIntLE(length); + writeValue(out, value, length); + } + } + + private static class RntbdNone implements Codec { + + public static final Codec codec = new RntbdNone(); + + @Override + public final int computeLength(final Object value) { + return 0; + } + + @Override + public final Object convert(final Object value) { + return null; + } + + @Override + public final Object defaultValue() { + return null; + } + + @Override + public final boolean isValid(final Object value) { + return true; + } + + @Override + public final Object read(final ByteBuf in) { + return null; + } + + @Override + public final ByteBuf readSlice(final ByteBuf in) { + return null; + } + + @Override + public Class valueType() { + return null; + } + + @Override + public final void write(final Object value, final ByteBuf out) { + } + } + + private static class RntbdShortBytes extends RntbdBytes { + + public static final Codec codec = new RntbdShortBytes(); + + private RntbdShortBytes() { + } + + @Override + public final int computeLength(final Object value) { + assert this.isValid(value); + return java.lang.Byte.BYTES + ((byte[])value).length; + } + + @Override + public final boolean isValid(final Object value) { + return value instanceof byte[] && ((byte[])value).length < 0xFFFF; + } + + @Override + public final Object read(final ByteBuf in) { + + final int length = in.readUnsignedByte(); + final byte[] bytes = new byte[length]; + in.readBytes(bytes); + + return bytes; + } + + @Override + public final ByteBuf readSlice(final ByteBuf in) { + return in.readSlice(java.lang.Byte.BYTES + in.getUnsignedByte(in.readerIndex())); + } + + @Override + public final void write(final Object value, final ByteBuf out) { + + assert this.isValid(value); + + final byte[] bytes = (byte[])value; + final int length = bytes.length; + + if (length > 0xFF) { + throw new IllegalStateException(); + } + + out.writeByte((byte)length); + out.writeBytes(bytes); + } + } + + private static class RntbdShortString extends RntbdString { + + public static final Codec codec = new RntbdShortString(); + + private RntbdShortString() { + } + + @Override + public final int computeLength(final Object value) { + return java.lang.Byte.BYTES + this.computeLength(value, 0xFF); + } + + @Override + public final Object read(final ByteBuf in) { + return in.readCharSequence(in.readUnsignedByte(), StandardCharsets.UTF_8).toString(); + } + + @Override + public final ByteBuf readSlice(final ByteBuf in) { + return in.readSlice(java.lang.Byte.BYTES + in.getUnsignedByte(in.readerIndex())); + } + + @Override + public final void write(final Object value, final ByteBuf out) { + + final int length = this.computeLength(value, 0xFF); + out.writeByte(length); + writeValue(out, value, length); + } + } + + private static class RntbdString implements Codec { + + public static final Codec codec = new RntbdString(); + + private RntbdString() { + } + + final int computeLength(final Object value, final int maxLength) { + + assert this.isValid(value); + final int length; + + if (value instanceof java.lang.String) { + + final java.lang.String string = (java.lang.String)value; + length = Utf8.encodedLength(string); + + } else { + + final byte[] string = (byte[])value; + + if (!Utf8.isWellFormed(string)) { + final java.lang.String reason = Strings.lenientFormat("UTF-8 byte string is ill-formed: %s", ByteBufUtil.hexDump(string)); + throw new DecoderException(reason); + } + + length = string.length; + } + + if (length > maxLength) { + final java.lang.String reason = Strings.lenientFormat("UTF-8 byte string exceeds %s bytes: %s bytes", maxLength, length); + throw new DecoderException(reason); + } + + return length; + } + + @Override + public int computeLength(final Object value) { + return Short.BYTES + this.computeLength(value, 0xFFFF); + } + + @Override + public final Object convert(final Object value) { + assert this.isValid(value); + return value instanceof java.lang.String ? value : new String((byte[])value, StandardCharsets.UTF_8); + } + + @Override + public final Object defaultValue() { + return ""; + } + + @Override + public final boolean isValid(final Object value) { + return value instanceof java.lang.String || value instanceof byte[]; + } + + @Override + public Object read(final ByteBuf in) { + final int length = in.readUnsignedShortLE(); + return in.readCharSequence(length, StandardCharsets.UTF_8).toString(); + } + + @Override + public ByteBuf readSlice(final ByteBuf in) { + return in.readSlice(Short.BYTES + in.getUnsignedShortLE(in.readerIndex())); + } + + @Override + public Class valueType() { + return java.lang.String.class; + } + + @Override + public void write(final Object value, final ByteBuf out) { + + final int length = this.computeLength(value, 0xFFFF); + out.writeShortLE(length); + writeValue(out, value, length); + } + + static void writeValue(final ByteBuf out, final Object value, final int length) { + + final int start = out.writerIndex(); + + if (value instanceof java.lang.String) { + out.writeCharSequence((java.lang.String)value, StandardCharsets.UTF_8); + } else { + out.writeBytes((byte[])value); + } + + assert out.writerIndex() - start == length; + } + } + + private static class RntbdUnsignedInteger implements Codec { + + public static final Codec codec = new RntbdUnsignedInteger(); + + private RntbdUnsignedInteger() { + } + + @Override + public final int computeLength(final Object value) { + assert this.isValid(value); + return Integer.BYTES; + } + + @Override + public final Object convert(final Object value) { + assert this.isValid(value); + return ((Number)value).longValue() & 0xFFFFFFFFL; + } + + @Override + public final Object defaultValue() { + return 0L; + } + + @Override + public final boolean isValid(final Object value) { + return value instanceof Number; + } + + @Override + public final Object read(final ByteBuf in) { + return in.readUnsignedIntLE(); + } + + @Override + public final ByteBuf readSlice(final ByteBuf in) { + return in.readSlice(Integer.BYTES); + } + + @Override + public Class valueType() { + return java.lang.Long.class; + } + + @Override + public final void write(final Object value, final ByteBuf out) { + assert this.isValid(value); + out.writeIntLE(((Number)value).intValue()); + } + } + + private static class RntbdUnsignedShort implements Codec { + + public static final Codec codec = new RntbdUnsignedShort(); + + private RntbdUnsignedShort() { + } + + @Override + public final int computeLength(final Object value) { + assert this.isValid(value); + return Short.BYTES; + } + + @Override + public final Object convert(final Object value) { + assert this.isValid(value); + return ((Number)value).intValue() & 0xFFFF; + } + + @Override + public final Object defaultValue() { + return 0; + } + + @Override + public final boolean isValid(final Object value) { + return value instanceof Number; + } + + @Override + public final Object read(final ByteBuf in) { + return in.readUnsignedShortLE(); + } + + @Override + public final ByteBuf readSlice(final ByteBuf in) { + return in.readSlice(Short.BYTES); + } + + @Override + public Class valueType() { + return Integer.class; + } + + @Override + public final void write(final Object value, final ByteBuf out) { + assert this.isValid(value); + out.writeShortLE(((Number)value).shortValue()); + } + } + + // endregion +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdUUID.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdUUID.java new file mode 100644 index 0000000000000..c8a6dc9bd3056 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/directconnectivity/rntbd/RntbdUUID.java @@ -0,0 +1,113 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +package com.azure.data.cosmos.internal.directconnectivity.rntbd; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.CorruptedFrameException; + +import java.util.UUID; + +import static com.google.common.base.Preconditions.checkNotNull; + +public final class RntbdUUID { + + public static final UUID EMPTY = new UUID(0L, 0L); + + private RntbdUUID() { + } + + /** + * Decode a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray} + * + * @param bytes a {@link byte} array containing the serialized {@link UUID} to be decoded + * @return a new {@link UUID} + */ + public static UUID decode(final byte[] bytes) { + return decode(Unpooled.wrappedBuffer(bytes)); + } + + /** + * Decode a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray} + * + * @param in a {@link ByteBuf} containing the serialized {@link UUID} to be decoded + * @return a new {@link UUID} + */ + public static UUID decode(final ByteBuf in) { + + checkNotNull(in, "in"); + + if (in.readableBytes() < 2 * Long.BYTES) { + final String reason = String.format("invalid frame length: %d", in.readableBytes()); + throw new CorruptedFrameException(reason); + } + + long mostSignificantBits = in.readUnsignedIntLE() << 32; + + mostSignificantBits |= (0x000000000000FFFFL & in.readShortLE()) << 16; + mostSignificantBits |= (0x000000000000FFFFL & in.readShortLE()); + + long leastSignificantBits = (0x000000000000FFFFL & in.readShortLE()) << (32 + 16); + + for (int shift = 32 + 8; shift >= 0; shift -= 8) { + leastSignificantBits |= (0x00000000000000FFL & in.readByte()) << shift; + } + + return new UUID(mostSignificantBits, leastSignificantBits); + } + + /** + * Encodes a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray} + * + * @param uuid a {@link UUID} to be encoded + * @return a new byte array containing the encoded + */ + public static byte[] encode(final UUID uuid) { + final byte[] bytes = new byte[2 * Integer.BYTES]; + encode(uuid, Unpooled.wrappedBuffer(bytes)); + return bytes; + } + + /** + * Encodes a {@link UUID} as serialized by Microsoft APIs like {@code System.Guid.ToByteArray} + * + * @param uuid a {@link UUID} to be encoded + * @param out an output {@link ByteBuf} + */ + public static void encode(final UUID uuid, final ByteBuf out) { + + final long mostSignificantBits = uuid.getMostSignificantBits(); + + out.writeIntLE((int)((mostSignificantBits & 0xFFFFFFFF00000000L) >>> 32)); + out.writeShortLE((short)((mostSignificantBits & 0x00000000FFFF0000L) >>> 16)); + out.writeShortLE((short)(mostSignificantBits & 0x000000000000FFFFL)); + + final long leastSignificantBits = uuid.getLeastSignificantBits(); + + out.writeShortLE((short)((leastSignificantBits & 0xFFFF000000000000L) >>> (32 + 16))); + out.writeShort((short)((leastSignificantBits & 0x0000FFFF00000000L) >>> 32)); + out.writeInt((int)(leastSignificantBits & 0x00000000FFFFFFFFL)); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/BufferedHttpResponse.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/BufferedHttpResponse.java new file mode 100644 index 0000000000000..79b7ed0f58cb8 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/BufferedHttpResponse.java @@ -0,0 +1,92 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.http; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; + +/** + * HTTP response which will buffer the response's body when/if it is read. + */ +public class BufferedHttpResponse extends HttpResponse { + private final HttpResponse innerHttpResponse; + private final Mono cachedBody; + + /** + * Creates a buffered HTTP response. + * + * @param innerHttpResponse The HTTP response to buffer + */ + public BufferedHttpResponse(HttpResponse innerHttpResponse) { + this.innerHttpResponse = innerHttpResponse; + this.cachedBody = innerHttpResponse.bodyAsByteArray().cache(); + this.withRequest(innerHttpResponse.request()); + } + + @Override + public int statusCode() { + return innerHttpResponse.statusCode(); + } + + @Override + public String headerValue(String name) { + return innerHttpResponse.headerValue(name); + } + + @Override + public HttpHeaders headers() { + return innerHttpResponse.headers(); + } + + @Override + public Mono bodyAsByteArray() { + return cachedBody; + } + + @Override + public Flux body() { + return bodyAsByteArray().flatMapMany(bytes -> Flux.just(Unpooled.wrappedBuffer(bytes))); + } + + @Override + public Mono bodyAsString() { + return bodyAsByteArray() + .map(bytes -> bytes == null ? null : new String(bytes, StandardCharsets.UTF_8)); + } + + @Override + public Mono bodyAsString(Charset charset) { + return bodyAsByteArray() + .map(bytes -> bytes == null ? null : new String(bytes, charset)); + } + + @Override + public BufferedHttpResponse buffer() { + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/HttpClient.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/HttpClient.java new file mode 100644 index 0000000000000..1689d37511b76 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/HttpClient.java @@ -0,0 +1,61 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.http; + +import reactor.core.publisher.Mono; +import reactor.netty.resources.ConnectionProvider; + +/** + * A generic interface for sending HTTP requests and getting responses. + */ +public interface HttpClient { + + /** + * Send the provided request asynchronously. + * + * @param request The HTTP request to send + * @return A {@link Mono} that emits response asynchronously + */ + Mono send(HttpRequest request); + + /** + * Create fixed HttpClient with {@link HttpClientConfig} + * + * @return the HttpClient + */ + static HttpClient createFixed(HttpClientConfig httpClientConfig) { + if (httpClientConfig.getConfigs() == null) { + throw new IllegalArgumentException("HttpClientConfig is null"); + } + + if (httpClientConfig.getMaxPoolSize() == null) { + return new ReactorNettyClient(ConnectionProvider.fixed(httpClientConfig.getConfigs().getReactorNettyConnectionPoolName()), httpClientConfig); + } + return new ReactorNettyClient(ConnectionProvider.fixed(httpClientConfig.getConfigs().getReactorNettyConnectionPoolName(), httpClientConfig.getMaxPoolSize()), httpClientConfig); + } + + /** + * Shutdown the Http Client and clean up resources + */ + void shutdown(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/HttpClientConfig.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/HttpClientConfig.java new file mode 100644 index 0000000000000..971d1bf310ed6 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/HttpClientConfig.java @@ -0,0 +1,85 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.http; + +import com.azure.data.cosmos.internal.Configs; + +import java.net.InetSocketAddress; + +/** + * Helper class internally used for instantiating reactor netty http client. + */ +public class HttpClientConfig { + public final static String REACTOR_NETWORK_LOG_CATEGORY = "com.azure.data.cosmos.netty-network"; + + private final Configs configs; + private Integer maxPoolSize; + private Integer maxIdleConnectionTimeoutInMillis; + private Integer requestTimeoutInMillis; + private InetSocketAddress proxy; + + public HttpClientConfig(Configs configs) { + this.configs = configs; + } + + public HttpClientConfig withPoolSize(int maxPoolSize) { + this.maxPoolSize = maxPoolSize; + return this; + } + + public HttpClientConfig withHttpProxy(InetSocketAddress proxy) { + this.proxy = proxy; + return this; + } + + public HttpClientConfig withMaxIdleConnectionTimeoutInMillis(int maxIdleConnectionTimeoutInMillis) { + this.maxIdleConnectionTimeoutInMillis = maxIdleConnectionTimeoutInMillis; + return this; + } + + public HttpClientConfig withRequestTimeoutInMillis(int requestTimeoutInMillis) { + this.requestTimeoutInMillis = requestTimeoutInMillis; + return this; + } + + public Configs getConfigs() { + return configs; + } + + public Integer getMaxPoolSize() { + return maxPoolSize; + } + + public Integer getMaxIdleConnectionTimeoutInMillis() { + return maxIdleConnectionTimeoutInMillis; + } + + public Integer getRequestTimeoutInMillis() { + return requestTimeoutInMillis; + } + + public InetSocketAddress getProxy() { + return proxy; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/HttpHeader.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/HttpHeader.java new file mode 100644 index 0000000000000..e9cbca44a59fb --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/HttpHeader.java @@ -0,0 +1,85 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.http; + +import org.apache.commons.lang3.StringUtils; + +/** + * A single header within a HTTP request or response. + * + * If multiple header values are added to a HTTP request or response with + * the same name (case-insensitive), then the values will be appended + * to the end of the same Header with commas separating them. + */ +public class HttpHeader { + private final String name; + private String value; + + /** + * Create a HttpHeader instance using the provided name and value. + * + * @param name the name + * @param value the value + */ + public HttpHeader(String name, String value) { + this.name = name; + this.value = value; + } + + /** + * Get the header name. + * + * @return the name of this Header + */ + public String name() { + return name; + } + + /** + * Get the header value. + * + * @return the value of this Header + */ + public String value() { + return value; + } + + /** + * Get the comma separated value as an array. + * + * @return the values of this Header that are separated by a comma + */ + public String[] values() { + return value == null ? null : StringUtils.split(value, ","); + } + + /** + * Get the String representation of the header. + * + * @return the String representation of this HttpHeader + */ + @Override + public String toString() { + return name + ":" + value; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/HttpHeaders.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/HttpHeaders.java new file mode 100644 index 0000000000000..f1bf91509ce3d --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/HttpHeaders.java @@ -0,0 +1,154 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.http; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.JsonSerializable; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.fasterxml.jackson.databind.jsontype.TypeSerializer; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Locale; +import java.util.Map; + +/** + * A collection of headers on an HTTP request or response. + */ +public class HttpHeaders implements Iterable, JsonSerializable { + private Map headers; + + /** + * Create an empty HttpHeaders instance. + */ + public HttpHeaders() { + this.headers = new HashMap<>(); + } + + /** + * Create an HttpHeaders instance with the given size. + */ + public HttpHeaders(int size) { + this.headers = new HashMap<>(size); + } + + /** + * Create a HttpHeaders instance with the provided initial headers. + * + * @param headers the map of initial headers + */ + public HttpHeaders(Map headers) { + this.headers = new HashMap<>(headers.size()); + for (final Map.Entry header : headers.entrySet()) { + this.set(header.getKey(), header.getValue()); + } + } + + /** + * Gets the number of headers in the collection. + * + * @return the number of headers in this collection. + */ + public int size() { + return headers.size(); + } + + /** + * Set a header. + * + * if header with same name already exists then the value will be overwritten. + * if value is null and header with provided name already exists then it will be removed. + * + * @param name the name + * @param value the value + * @return this HttpHeaders + */ + public HttpHeaders set(String name, String value) { + final String headerKey = name.toLowerCase(Locale.ROOT); + if (value == null) { + headers.remove(headerKey); + } else { + headers.put(headerKey, new HttpHeader(name, value)); + } + return this; + } + + /** + * Get the header value for the provided header name. Null will be returned if the header + * name isn't found. + * + * @param name the name of the header to look for + * @return The String value of the header, or null if the header isn't found + */ + public String value(String name) { + final HttpHeader header = getHeader(name); + return header == null ? null : header.value(); + } + + /** + * Get the header values for the provided header name. Null will be returned if + * the header name isn't found. + * + * @param name the name of the header to look for + * @return the values of the header, or null if the header isn't found + */ + public String[] values(String name) { + final HttpHeader header = getHeader(name); + return header == null ? null : header.values(); + } + + private HttpHeader getHeader(String headerName) { + final String headerKey = headerName.toLowerCase(Locale.ROOT); + return headers.get(headerKey); + } + + /** + * Get {@link Map} representation of the HttpHeaders collection. + * + * @return the headers as map + */ + public Map toMap() { + final Map result = new HashMap<>(headers.size()); + for (final HttpHeader header : headers.values()) { + result.put(header.name(), header.value()); + } + return result; + } + + @Override + public Iterator iterator() { + return headers.values().iterator(); + } + + @Override + public void serialize(JsonGenerator jsonGenerator, SerializerProvider serializerProvider) throws IOException { + jsonGenerator.writeObject(toMap()); + } + + @Override + public void serializeWithType(JsonGenerator jsonGenerator, SerializerProvider serializerProvider, TypeSerializer typeSerializer) throws IOException { + serialize(jsonGenerator, serializerProvider); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/HttpRequest.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/HttpRequest.java new file mode 100644 index 0000000000000..f1aeb6172c63c --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/HttpRequest.java @@ -0,0 +1,225 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.http; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.HttpMethod; +import reactor.core.publisher.Flux; + +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; + +/** + * The outgoing Http request. + */ +public class HttpRequest { + private HttpMethod httpMethod; + private URI uri; + private int port; + private HttpHeaders headers; + private Flux body; + + /** + * Create a new HttpRequest instance. + * + * @param httpMethod the HTTP request method + * @param uri the target address to send the request to + */ + public HttpRequest(HttpMethod httpMethod, URI uri, int port, HttpHeaders httpHeaders) { + this.httpMethod = httpMethod; + this.uri = uri; + this.port = port; + this.headers = httpHeaders; + } + + /** + * Create a new HttpRequest instance. + * + * @param httpMethod the HTTP request method + * @param uri the target address to send the request to + */ + public HttpRequest(HttpMethod httpMethod, String uri, int port) throws URISyntaxException { + this.httpMethod = httpMethod; + this.uri = new URI(uri); + this.port = port; + this.headers = new HttpHeaders(); + } + + /** + * Create a new HttpRequest instance. + * + * @param httpMethod the HTTP request method + * @param uri the target address to send the request to + * @param headers the HTTP headers to use with this request + * @param body the request content + */ + public HttpRequest(HttpMethod httpMethod, URI uri, int port, HttpHeaders headers, Flux body) { + this.httpMethod = httpMethod; + this.uri = uri; + this.port = port; + this.headers = headers; + this.body = body; + } + + /** + * Get the request method. + * + * @return the request method + */ + public HttpMethod httpMethod() { + return httpMethod; + } + + /** + * Set the request method. + * + * @param httpMethod the request method + * @return this HttpRequest + */ + public HttpRequest withHttpMethod(HttpMethod httpMethod) { + this.httpMethod = httpMethod; + return this; + } + + /** + * Get the target port. + * + * @return the target port + */ + public int port() { + return port; + } + + /** + * Set the target port to send the request to. + * + * @param port target port + * @return this HttpRequest + */ + public HttpRequest withPort(int port) { + this.port = port; + return this; + } + + /** + * Get the target address. + * + * @return the target address + */ + public URI uri() { + return uri; + } + + /** + * Set the target address to send the request to. + * + * @param uri target address as {@link URI} + * @return this HttpRequest + */ + public HttpRequest withUri(URI uri) { + this.uri = uri; + return this; + } + + /** + * Get the request headers. + * + * @return headers to be sent + */ + public HttpHeaders headers() { + return headers; + } + + /** + * Set the request headers. + * + * @param headers the set of headers + * @return this HttpRequest + */ + public HttpRequest withHeaders(HttpHeaders headers) { + this.headers = headers; + return this; + } + + /** + * Set a request header, replacing any existing value. + * A null for {@code value} will remove the header if one with matching name exists. + * + * @param name the header name + * @param value the header value + * @return this HttpRequest + */ + public HttpRequest withHeader(String name, String value) { + headers.set(name, value); + return this; + } + + /** + * Get the request content. + * + * @return the content to be send + */ + public Flux body() { + return body; + } + + /** + * Set the request content. + * + * @param content the request content + * @return this HttpRequest + */ + public HttpRequest withBody(String content) { + final byte[] bodyBytes = content.getBytes(StandardCharsets.UTF_8); + return withBody(bodyBytes); + } + + /** + * Set the request content. + * The Content-Length header will be set based on the given content's length + * + * @param content the request content + * @return this HttpRequest + */ + public HttpRequest withBody(byte[] content) { + headers.set("Content-Length", String.valueOf(content.length)); + // Unpooled.wrappedBuffer(body) allocates ByteBuf from unpooled heap + return withBody(Flux.defer(() -> Flux.just(Unpooled.wrappedBuffer(content)))); + } + + /** + * Set request content. + *

+ * Caller must set the Content-Length header to indicate the length of the content, + * or use Transfer-Encoding: chunked. + * + * @param content the request content + * @return this HttpRequest + */ + public HttpRequest withBody(Flux content) { + this.body = content; + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/HttpResponse.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/HttpResponse.java new file mode 100644 index 0000000000000..b9a48db61e331 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/HttpResponse.java @@ -0,0 +1,154 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.http; + +import io.netty.buffer.ByteBuf; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.netty.Connection; + +import java.nio.charset.Charset; + +/** + * The type representing response of {@link HttpRequest}. + */ +public abstract class HttpResponse implements AutoCloseable { + private HttpRequest request; + + /** + * Get the response status code. + * + * @return the response status code + */ + public abstract int statusCode(); + + /** + * Lookup a response header with the provided name. + * + * @param name the name of the header to lookup. + * @return the value of the header, or null if the header doesn't exist in the response. + */ + public abstract String headerValue(String name); + + /** + * Get all response headers. + * + * @return the response headers + */ + public abstract HttpHeaders headers(); + + /** + * Get the publisher emitting response content chunks. + * + *

+ * Returns a stream of the response's body content. Emissions may occur on the + * Netty EventLoop threads which are shared across channels and should not be + * blocked. Blocking should be avoided as much as possible/practical in reactive + * programming but if you do use methods like {@code blockingSubscribe} or {@code blockingGet} + * on the stream then be sure to use {@code subscribeOn} and {@code observeOn} + * before the blocking call. For example: + * + *

+     * {@code
+     *   response.body()
+     *     .map(bb -> bb.limit())
+     *     .reduce((x,y) -> x + y)
+     *     .subscribeOn(Schedulers.io())
+     *     .observeOn(Schedulers.io())
+     *     .blockingGet();
+     * }
+     * 
+ *

+ * The above code is a simplistic example and would probably run fine without + * the `subscribeOn` and `observeOn` but should be considered a template for + * more complex situations. + * + * @return The response's content as a stream of {@link ByteBuf}. + */ + public abstract Flux body(); + + /** + * Get the response content as a byte[]. + * + * @return this response content as a byte[] + */ + public abstract Mono bodyAsByteArray(); + + /** + * Get the response content as a string. + * + * @return This response content as a string + */ + public abstract Mono bodyAsString(); + + /** + * Get the response content as a string. + * + * @param charset the charset to use as encoding + * @return This response content as a string + */ + public abstract Mono bodyAsString(Charset charset); + + /** + * Get the request which resulted in this response. + * + * @return the request which resulted in this response. + */ + public final HttpRequest request() { + return request; + } + + /** + * Sets the request which resulted in this HttpResponse. + * + * @param request the request + * @return this HTTP response + */ + public final HttpResponse withRequest(HttpRequest request) { + this.request = request; + return this; + } + + /** + * Get a new Response object wrapping this response with it's content + * buffered into memory. + * + * @return the new Response object + */ + public HttpResponse buffer() { + return new BufferedHttpResponse(this); + } + + /** + * Closes the response content stream, if any. + */ + @Override + public void close() { + } + + // package private for test purpose + Connection internConnection() { + return null; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/ReactorNettyClient.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/ReactorNettyClient.java new file mode 100644 index 0000000000000..6776183c37c9f --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/http/ReactorNettyClient.java @@ -0,0 +1,198 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.http; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.logging.LogLevel; +import org.reactivestreams.Publisher; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.netty.ByteBufFlux; +import reactor.netty.Connection; +import reactor.netty.NettyOutbound; +import reactor.netty.http.client.HttpClientRequest; +import reactor.netty.http.client.HttpClientResponse; +import reactor.netty.resources.ConnectionProvider; +import reactor.netty.tcp.ProxyProvider; +import reactor.netty.tcp.TcpResources; + +import java.nio.charset.Charset; +import java.util.Objects; +import java.util.function.BiFunction; + +import static com.azure.data.cosmos.internal.http.HttpClientConfig.REACTOR_NETWORK_LOG_CATEGORY; + +/** + * HttpClient that is implemented using reactor-netty. + */ +class ReactorNettyClient implements HttpClient { + + private final Logger logger = LoggerFactory.getLogger(getClass().getSimpleName()); + + private HttpClientConfig httpClientConfig; + private reactor.netty.http.client.HttpClient httpClient; + private ConnectionProvider connectionProvider; + + /** + * Creates ReactorNettyClient with {@link ConnectionProvider}. + */ + ReactorNettyClient(ConnectionProvider connectionProvider, HttpClientConfig httpClientConfig) { + this.connectionProvider = connectionProvider; + this.httpClientConfig = httpClientConfig; + this.httpClient = reactor.netty.http.client.HttpClient.create(connectionProvider); + configureChannelPipelineHandlers(); + } + + private void configureChannelPipelineHandlers() { + this.httpClient = this.httpClient.tcpConfiguration(tcpClient -> { + if (LoggerFactory.getLogger(REACTOR_NETWORK_LOG_CATEGORY).isTraceEnabled()) { + tcpClient = tcpClient.wiretap(REACTOR_NETWORK_LOG_CATEGORY, LogLevel.TRACE); + } + if (this.httpClientConfig.getProxy() != null) { + tcpClient = tcpClient.proxy(typeSpec -> typeSpec.type(ProxyProvider.Proxy.HTTP).address(this.httpClientConfig.getProxy())); + } + return tcpClient; + }); + } + + @Override + public Mono send(final HttpRequest request) { + Objects.requireNonNull(request.httpMethod()); + Objects.requireNonNull(request.uri()); + Objects.requireNonNull(this.httpClientConfig); + + return this.httpClient + .port(request.port()) + .request(HttpMethod.valueOf(request.httpMethod().toString())) + .uri(request.uri().toString()) + .send(bodySendDelegate(request)) + .responseConnection(responseDelegate(request)) + .single(); + } + + /** + * Delegate to send the request content. + * + * @param restRequest the Rest request contains the body to be sent + * @return a delegate upon invocation sets the request body in reactor-netty outbound object + */ + private static BiFunction> bodySendDelegate(final HttpRequest restRequest) { + return (reactorNettyRequest, reactorNettyOutbound) -> { + for (HttpHeader header : restRequest.headers()) { + reactorNettyRequest.header(header.name(), header.value()); + } + if (restRequest.body() != null) { + Flux nettyByteBufFlux = restRequest.body().map(Unpooled::wrappedBuffer); + return reactorNettyOutbound.options(sendOptions -> sendOptions.flushOnEach(false)).send(nettyByteBufFlux); + } else { + return reactorNettyOutbound.options(sendOptions -> sendOptions.flushOnEach(false)); + } + }; + } + + /** + * Delegate to receive response. + * + * @param restRequest the Rest request whose response this delegate handles + * @return a delegate upon invocation setup Rest response object + */ + private static BiFunction> responseDelegate(final HttpRequest restRequest) { + return (reactorNettyResponse, reactorNettyConnection) -> + Mono.just(new ReactorNettyHttpResponse(reactorNettyResponse, reactorNettyConnection).withRequest(restRequest)); + } + + @Override + public void shutdown() { + TcpResources.disposeLoopsAndConnections(); + this.connectionProvider.dispose(); + } + + private static class ReactorNettyHttpResponse extends HttpResponse { + private final HttpClientResponse reactorNettyResponse; + private final Connection reactorNettyConnection; + + ReactorNettyHttpResponse(HttpClientResponse reactorNettyResponse, Connection reactorNettyConnection) { + this.reactorNettyResponse = reactorNettyResponse; + this.reactorNettyConnection = reactorNettyConnection; + } + + @Override + public int statusCode() { + return reactorNettyResponse.status().code(); + } + + @Override + public String headerValue(String name) { + return reactorNettyResponse.responseHeaders().get(name); + } + + @Override + public HttpHeaders headers() { + HttpHeaders headers = new HttpHeaders(reactorNettyResponse.responseHeaders().size()); + reactorNettyResponse.responseHeaders().forEach(e -> headers.set(e.getKey(), e.getValue())); + return headers; + } + + @Override + public Flux body() { + return bodyIntern().doFinally(s -> this.close()); + } + + @Override + public Mono bodyAsByteArray() { + return bodyIntern().aggregate().asByteArray().doFinally(s -> this.close()); + } + + @Override + public Mono bodyAsString() { + return bodyIntern().aggregate().asString().doFinally(s -> this.close()); + } + + @Override + public Mono bodyAsString(Charset charset) { + return bodyIntern().aggregate().asString(charset).doFinally(s -> this.close()); + } + + @Override + public void close() { + if (reactorNettyConnection.channel().eventLoop().inEventLoop()) { + reactorNettyConnection.dispose(); + } else { + reactorNettyConnection.channel().eventLoop().execute(reactorNettyConnection::dispose); + } + } + + private ByteBufFlux bodyIntern() { + return reactorNettyConnection.inbound().receive(); + } + + @Override + Connection internConnection() { + return reactorNettyConnection; + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/AggregateDocumentQueryExecutionContext.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/AggregateDocumentQueryExecutionContext.java new file mode 100644 index 0000000000000..543d03b16cf5e --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/AggregateDocumentQueryExecutionContext.java @@ -0,0 +1,150 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.internal.Undefined; +import com.azure.data.cosmos.internal.Constants; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.QueryMetrics; +import com.azure.data.cosmos.internal.query.aggregation.AggregateOperator; +import com.azure.data.cosmos.internal.query.aggregation.Aggregator; +import com.azure.data.cosmos.internal.query.aggregation.AverageAggregator; +import com.azure.data.cosmos.internal.query.aggregation.CountAggregator; +import com.azure.data.cosmos.internal.query.aggregation.MaxAggregator; +import com.azure.data.cosmos.internal.query.aggregation.MinAggregator; +import com.azure.data.cosmos.internal.query.aggregation.SumAggregator; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.function.Function; + +public class AggregateDocumentQueryExecutionContext implements IDocumentQueryExecutionComponent{ + + private IDocumentQueryExecutionComponent component; + private Aggregator aggregator; + private ConcurrentMap queryMetricsMap = new ConcurrentHashMap<>(); + + //QueryInfo class used in PipelinedDocumentQueryExecutionContext returns a Collection of AggregateOperators + //while Multiple aggregates are allowed in queries targeted at a single partition, only a single aggregate is allowed in x-partition queries (currently) + public AggregateDocumentQueryExecutionContext (IDocumentQueryExecutionComponent component, Collection aggregateOperators) { + + this.component = component; + AggregateOperator aggregateOperator = aggregateOperators.iterator().next(); + + switch (aggregateOperator) { + case Average: + this.aggregator = new AverageAggregator(); + break; + case Count: + this.aggregator = new CountAggregator(); + break; + case Max: + this.aggregator = new MaxAggregator(); + break; + case Min: + this.aggregator = new MinAggregator(); + break; + case Sum: + this.aggregator = new SumAggregator(); + break; + default: + throw new IllegalStateException("Unexpected value: " + aggregateOperator.toString()); + } + } + + @SuppressWarnings("unchecked") + @Override + public Flux> drainAsync(int maxPageSize) { + + return this.component.drainAsync(maxPageSize) + .collectList() + .map( superList -> { + + double requestCharge = 0; + List aggregateResults = new ArrayList(); + HashMap headers = new HashMap(); + + for(FeedResponse page : superList) { + + if (page.results().size() == 0) { + headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double.toString(requestCharge)); + FeedResponse frp = BridgeInternal.createFeedResponse(aggregateResults, headers); + return (FeedResponse) frp; + } + + Document doc = ((Document)page.results().get(0)); + requestCharge += page.requestCharge(); + QueryItem values = new QueryItem(doc.toJson()); + this.aggregator.aggregate(values.getItem()); + for(String key : BridgeInternal.queryMetricsFromFeedResponse(page).keySet()) { + if (queryMetricsMap.containsKey(key)) { + QueryMetrics qm = BridgeInternal.queryMetricsFromFeedResponse(page).get(key); + queryMetricsMap.get(key).add(qm); + } else { + queryMetricsMap.put(key, BridgeInternal.queryMetricsFromFeedResponse(page).get(key)); + } + } + } + + if (this.aggregator.getResult() == null || !this.aggregator.getResult().equals(Undefined.Value())) { + Document aggregateDocument = new Document(); + BridgeInternal.setProperty(aggregateDocument, Constants.Properties.AGGREGATE, this.aggregator.getResult()); + aggregateResults.add(aggregateDocument); + } + + headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double.toString(requestCharge)); + FeedResponse frp = BridgeInternal.createFeedResponse(aggregateResults, headers); + if(!queryMetricsMap.isEmpty()) { + for(String key: queryMetricsMap.keySet()) { + BridgeInternal.putQueryMetricsIntoMap(frp, key, queryMetricsMap.get(key)); + } + } + return (FeedResponse) frp; + }).flux(); + } + + public static Flux> createAsync( + Function>> createSourceComponentFunction, + Collection aggregates, + String continuationToken) { + + return createSourceComponentFunction + .apply(continuationToken) + .map( component -> { return new AggregateDocumentQueryExecutionContext(component, aggregates);}); + } + + public IDocumentQueryExecutionComponent getComponent() { + return this.component; + } + +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/CompositeContinuationToken.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/CompositeContinuationToken.java new file mode 100644 index 0000000000000..c7c8f64d2b0d9 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/CompositeContinuationToken.java @@ -0,0 +1,120 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.JsonSerializable; +import com.azure.data.cosmos.internal.Utils.ValueHolder; +import com.azure.data.cosmos.internal.routing.Range; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public final class CompositeContinuationToken extends JsonSerializable { + private static final String TokenPropertyName = "token"; + private static final String RangePropertyName = "range"; + private static final Logger logger = LoggerFactory.getLogger(CompositeContinuationToken.class); + + public CompositeContinuationToken(String token, Range range) { + // token is allowed to be null + if (range == null) { + throw new IllegalArgumentException("range must not be null."); + } + + this.setToken(token); + this.setRange(range); + } + + private CompositeContinuationToken(String serializedCompositeContinuationToken) { + super(serializedCompositeContinuationToken); + } + + public static boolean tryParse(String serializedCompositeContinuationToken, + ValueHolder outCompositeContinuationToken) { + boolean parsed; + try { + CompositeContinuationToken compositeContinuationToken = new CompositeContinuationToken( + serializedCompositeContinuationToken); + compositeContinuationToken.getToken(); + + Range range = compositeContinuationToken.getRange(); + if (range == null) { + throw new IllegalArgumentException("range must not be null."); + } + + range.getMax(); + range.getMin(); + range.isEmpty(); + range.isMaxInclusive(); + range.isMinInclusive(); + range.isSingleValue(); + + outCompositeContinuationToken.v = compositeContinuationToken; + parsed = true; + } catch (Exception ex) { + logger.debug( + "Received exception {} when trying to parse: {}", + ex.getMessage(), + serializedCompositeContinuationToken); + parsed = false; + outCompositeContinuationToken.v = null; + } + + return parsed; + } + + /** + * @return the token + */ + public String getToken() { + return super.getString(TokenPropertyName); + } + + /** + * @return the range + */ + public Range getRange() { + return new Range(super.getString(RangePropertyName)); + } + + /** + * @param token + * the token to set + */ + private void setToken(String token) { + BridgeInternal.setProperty(this, TokenPropertyName, token); + } + + /** + * @param range + * the range to set + */ + private void setRange(Range range) { + /* TODO: Don't stringify the range */ + BridgeInternal.setProperty(this, RangePropertyName, range.toString()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/DefaultDocumentQueryExecutionContext.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/DefaultDocumentQueryExecutionContext.java new file mode 100644 index 0000000000000..f2f18806e2e08 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/DefaultDocumentQueryExecutionContext.java @@ -0,0 +1,267 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.SqlQuerySpec; +import com.azure.data.cosmos.internal.BackoffRetryUtility; +import com.azure.data.cosmos.internal.Constants; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.IDocumentClientRetryPolicy; +import com.azure.data.cosmos.internal.InvalidPartitionExceptionRetryPolicy; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.PartitionKeyRangeGoneRetryPolicy; +import com.azure.data.cosmos.internal.PathsHelper; +import com.azure.data.cosmos.internal.QueryMetrics; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.Strings; +import com.azure.data.cosmos.internal.Utils.ValueHolder; +import com.azure.data.cosmos.internal.caches.IPartitionKeyRangeCache; +import com.azure.data.cosmos.internal.caches.RxCollectionCache; +import com.azure.data.cosmos.internal.query.metrics.ClientSideMetrics; +import com.azure.data.cosmos.internal.query.metrics.FetchExecutionRangeAccumulator; +import com.azure.data.cosmos.internal.query.metrics.SchedulingStopwatch; +import com.azure.data.cosmos.internal.query.metrics.SchedulingTimeSpan; +import com.azure.data.cosmos.internal.routing.PartitionKeyInternal; +import com.azure.data.cosmos.internal.routing.PartitionKeyRangeIdentity; +import com.azure.data.cosmos.internal.routing.Range; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.ImmutablePair; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.function.BiFunction; +import java.util.function.Function; + +import static com.azure.data.cosmos.CommonsBridgeInternal.partitionKeyRangeIdInternal; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class DefaultDocumentQueryExecutionContext extends DocumentQueryExecutionContextBase { + + private boolean isContinuationExpected; + private volatile int retries = -1; + + private final SchedulingStopwatch fetchSchedulingMetrics; + private final FetchExecutionRangeAccumulator fetchExecutionRangeAccumulator; + private static final String DEFAULT_PARTITION_KEY_RANGE_ID = "0"; + + public DefaultDocumentQueryExecutionContext(IDocumentQueryClient client, ResourceType resourceTypeEnum, + Class resourceType, SqlQuerySpec query, FeedOptions feedOptions, String resourceLink, + UUID correlatedActivityId, boolean isContinuationExpected) { + + super(client, + resourceTypeEnum, + resourceType, + query, + feedOptions, + resourceLink, + false, + correlatedActivityId); + + this.isContinuationExpected = isContinuationExpected; + this.fetchSchedulingMetrics = new SchedulingStopwatch(); + this.fetchSchedulingMetrics.ready(); + this.fetchExecutionRangeAccumulator = new FetchExecutionRangeAccumulator(DEFAULT_PARTITION_KEY_RANGE_ID); + } + + protected PartitionKeyInternal getPartitionKeyInternal() { + return this.feedOptions.partitionKey() == null ? null : feedOptions.partitionKey().getInternalPartitionKey(); + } + + @Override + public Flux> executeAsync() { + + if (feedOptions == null) { + feedOptions = new FeedOptions(); + } + + FeedOptions newFeedOptions = new FeedOptions(feedOptions); + + // We can not go to backend with the composite continuation token, + // but we still need the gateway for the query plan. + // The workaround is to try and parse the continuation token as a composite continuation token. + // If it is, then we send the query to the gateway with max degree of parallelism to force getting back the query plan + + String originalContinuation = newFeedOptions.requestContinuation(); + + if (isClientSideContinuationToken(originalContinuation)) { + // At this point we know we want back a query plan + newFeedOptions.requestContinuation(null); + newFeedOptions.maxDegreeOfParallelism(Integer.MAX_VALUE); + } + + int maxPageSize = newFeedOptions.maxItemCount() != null ? newFeedOptions.maxItemCount() : Constants.Properties.DEFAULT_MAX_PAGE_SIZE; + + BiFunction createRequestFunc = (continuationToken, pageSize) -> this.createRequestAsync(continuationToken, pageSize); + + // TODO: clean up if we want to use single vs observable. + Function>> executeFunc = executeInternalAsyncFunc(); + + return Paginator + .getPaginatedQueryResultAsObservable(newFeedOptions, createRequestFunc, executeFunc, resourceType, maxPageSize); + } + + public Mono> getTargetPartitionKeyRanges(String resourceId, List> queryRanges) { + // TODO: FIXME this needs to be revisited + + Range r = new Range<>("", "FF", true, false); + return client.getPartitionKeyRangeCache().tryGetOverlappingRangesAsync(resourceId, r, false, null); + } + + protected Function>> executeInternalAsyncFunc() { + RxCollectionCache collectionCache = this.client.getCollectionCache(); + IPartitionKeyRangeCache partitionKeyRangeCache = this.client.getPartitionKeyRangeCache(); + IDocumentClientRetryPolicy retryPolicyInstance = this.client.getResetSessionTokenRetryPolicy().getRequestPolicy(); + + retryPolicyInstance = new InvalidPartitionExceptionRetryPolicy(collectionCache, retryPolicyInstance, resourceLink, feedOptions); + if (super.resourceTypeEnum.isPartitioned()) { + retryPolicyInstance = new PartitionKeyRangeGoneRetryPolicy( + collectionCache, + partitionKeyRangeCache, + PathsHelper.getCollectionPath(super.resourceLink), + retryPolicyInstance, + feedOptions); + } + + final IDocumentClientRetryPolicy finalRetryPolicyInstance = retryPolicyInstance; + + return req -> { + finalRetryPolicyInstance.onBeforeSendRequest(req); + this.fetchExecutionRangeAccumulator.beginFetchRange(); + this.fetchSchedulingMetrics.start(); + return BackoffRetryUtility.executeRetry(() -> { + ++this.retries; + return executeRequestAsync(req); + }, finalRetryPolicyInstance).flux() + .map(tFeedResponse -> { + this.fetchSchedulingMetrics.stop(); + this.fetchExecutionRangeAccumulator.endFetchRange(tFeedResponse.activityId(), + tFeedResponse.results().size(), + this.retries); + ImmutablePair schedulingTimeSpanMap = + new ImmutablePair<>(DEFAULT_PARTITION_KEY_RANGE_ID, this.fetchSchedulingMetrics.getElapsedTime()); + if (!StringUtils.isEmpty(tFeedResponse.responseHeaders().get(HttpConstants.HttpHeaders.QUERY_METRICS))) { + QueryMetrics qm = + BridgeInternal.createQueryMetricsFromDelimitedStringAndClientSideMetrics(tFeedResponse.responseHeaders() + .get(HttpConstants.HttpHeaders.QUERY_METRICS), + new ClientSideMetrics(this.retries, + tFeedResponse.requestCharge(), + this.fetchExecutionRangeAccumulator.getExecutionRanges(), + Arrays.asList(schedulingTimeSpanMap)), + tFeedResponse.activityId()); + BridgeInternal.putQueryMetricsIntoMap(tFeedResponse, DEFAULT_PARTITION_KEY_RANGE_ID, qm); + } + return tFeedResponse; + }); + }; + } + + private Mono> executeOnceAsync(IDocumentClientRetryPolicy retryPolicyInstance, String continuationToken) { + // Don't reuse request, as the rest of client SDK doesn't reuse requests between retries. + // The code leaves some temporary garbage in request (in RequestContext etc.), + // which shold be erased during retries. + + RxDocumentServiceRequest request = this.createRequestAsync(continuationToken, this.feedOptions.maxItemCount()); + if (retryPolicyInstance != null) { + retryPolicyInstance.onBeforeSendRequest(request); + } + + if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY)) + || !request.getResourceType().isPartitioned()) { + return this.executeRequestAsync(request); + } + + + // TODO: remove this as partition key range id is not relevant + // TODO; has to be rx async + //CollectionCache collectionCache = this.client.getCollectionCache(); + + // TODO: has to be rx async + //DocumentCollection collection = + // collectionCache.resolveCollection(request); + + // TODO: this code is not relevant because partition key range id should not be exposed + // if (!Strings.isNullOrEmpty(super.getPartitionKeyId())) + // { + // request.RouteTo(new PartitionKeyRangeIdentity(collection.ResourceId, base.PartitionKeyRangeId)); + // return await this.ExecuteRequestAsync(request); + // } + + request.UseGatewayMode = true; + return this.executeRequestAsync(request); + } + + public RxDocumentServiceRequest createRequestAsync(String continuationToken, Integer maxPageSize) { + + // TODO this should be async + Map requestHeaders = this.createCommonHeadersAsync( + this.getFeedOptions(continuationToken, maxPageSize)); + + // TODO: add support for simple continuation for single partition query + //requestHeaders.put(keyHttpConstants.HttpHeaders.IsContinuationExpected, isContinuationExpected.ToString()) + + RxDocumentServiceRequest request = this.createDocumentServiceRequest( + requestHeaders, + this.query, + this.getPartitionKeyInternal()); + + if (!StringUtils.isEmpty(partitionKeyRangeIdInternal(feedOptions))) { + request.routeTo(new PartitionKeyRangeIdentity(partitionKeyRangeIdInternal(feedOptions))); + } + + return request; + } + + private static boolean isClientSideContinuationToken(String continuationToken) { + if (continuationToken != null) { + ValueHolder outCompositeContinuationToken = new ValueHolder(); + if (CompositeContinuationToken.tryParse(continuationToken, outCompositeContinuationToken)) { + return true; + } + + ValueHolder outOrderByContinuationToken = new ValueHolder(); + if (OrderByContinuationToken.tryParse(continuationToken, outOrderByContinuationToken)) { + return true; + } + + ValueHolder outTakeContinuationToken = new ValueHolder(); + if (TakeContinuationToken.tryParse(continuationToken, outTakeContinuationToken)) { + return true; + } + } + + return false; + } +} + diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/DocumentProducer.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/DocumentProducer.java new file mode 100644 index 0000000000000..e3cc355c71255 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/DocumentProducer.java @@ -0,0 +1,271 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.JsonSerializable; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.internal.Exceptions; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.IDocumentClientRetryPolicy; +import com.azure.data.cosmos.internal.ObservableHelper; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.QueryMetrics; +import com.azure.data.cosmos.internal.QueryMetricsConstants; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.Utils; +import com.azure.data.cosmos.internal.query.metrics.ClientSideMetrics; +import com.azure.data.cosmos.internal.query.metrics.FetchExecutionRangeAccumulator; +import com.azure.data.cosmos.internal.query.metrics.SchedulingStopwatch; +import com.azure.data.cosmos.internal.query.metrics.SchedulingTimeSpan; +import com.azure.data.cosmos.internal.routing.Range; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +class DocumentProducer { + private static final Logger logger = LoggerFactory.getLogger(DocumentProducer.class); + private int retries; + + class DocumentProducerFeedResponse { + FeedResponse pageResult; + PartitionKeyRange sourcePartitionKeyRange; + + DocumentProducerFeedResponse(FeedResponse pageResult) { + this.pageResult = pageResult; + this.sourcePartitionKeyRange = DocumentProducer.this.targetRange; + populatePartitionedQueryMetrics(); + } + + DocumentProducerFeedResponse(FeedResponse pageResult, PartitionKeyRange pkr) { + this.pageResult = pageResult; + this.sourcePartitionKeyRange = pkr; + populatePartitionedQueryMetrics(); + } + + void populatePartitionedQueryMetrics() { + String queryMetricsDelimitedString = pageResult.responseHeaders().get(HttpConstants.HttpHeaders.QUERY_METRICS); + if (!StringUtils.isEmpty(queryMetricsDelimitedString)) { + queryMetricsDelimitedString += String.format(";%s=%.2f", QueryMetricsConstants.RequestCharge, pageResult.requestCharge()); + ImmutablePair schedulingTimeSpanMap = + new ImmutablePair<>(targetRange.id(), fetchSchedulingMetrics.getElapsedTime()); + + QueryMetrics qm =BridgeInternal.createQueryMetricsFromDelimitedStringAndClientSideMetrics(queryMetricsDelimitedString, + new ClientSideMetrics(retries, + pageResult.requestCharge(), + fetchExecutionRangeAccumulator.getExecutionRanges(), + Arrays.asList(schedulingTimeSpanMap) + ), pageResult.activityId()); + BridgeInternal.putQueryMetricsIntoMap(pageResult, targetRange.id(), qm); + } + } + } + + protected final IDocumentQueryClient client; + protected final String collectionRid; + protected final FeedOptions feedOptions; + protected final Class resourceType; + protected final PartitionKeyRange targetRange; + protected final String collectionLink; + protected final TriFunction createRequestFunc; + protected final Function>> executeRequestFuncWithRetries; + protected final Callable createRetryPolicyFunc; + protected final int pageSize; + protected final UUID correlatedActivityId; + public int top; + private volatile String lastResponseContinuationToken; + private final SchedulingStopwatch fetchSchedulingMetrics; + private SchedulingStopwatch moveNextSchedulingMetrics; + private final FetchExecutionRangeAccumulator fetchExecutionRangeAccumulator; + + public DocumentProducer( + IDocumentQueryClient client, + String collectionResourceId, + FeedOptions feedOptions, + TriFunction createRequestFunc, + Function>> executeRequestFunc, + PartitionKeyRange targetRange, + String collectionLink, + Callable createRetryPolicyFunc, + Class resourceType , + UUID correlatedActivityId, + int initialPageSize, // = -1, + String initialContinuationToken, + int top) { + + this.client = client; + this.collectionRid = collectionResourceId; + + this.createRequestFunc = createRequestFunc; + + this.fetchSchedulingMetrics = new SchedulingStopwatch(); + this.fetchSchedulingMetrics.ready(); + this.fetchExecutionRangeAccumulator = new FetchExecutionRangeAccumulator(targetRange.id()); + + this.executeRequestFuncWithRetries = request -> { + retries = -1; + this.fetchSchedulingMetrics.start(); + this.fetchExecutionRangeAccumulator.beginFetchRange(); + IDocumentClientRetryPolicy retryPolicy = null; + if (createRetryPolicyFunc != null) { + try { + retryPolicy = createRetryPolicyFunc.call(); + } catch (Exception e) { + return Flux.error(e); + } + retryPolicy.onBeforeSendRequest(request); + } + return ObservableHelper.inlineIfPossibleAsObs( + () -> { + ++retries; + return executeRequestFunc.apply(request); + }, retryPolicy); + }; + + this.correlatedActivityId = correlatedActivityId; + + this.feedOptions = feedOptions != null ? feedOptions : new FeedOptions(); + this.feedOptions.requestContinuation(initialContinuationToken); + this.lastResponseContinuationToken = initialContinuationToken; + this.resourceType = resourceType; + this.targetRange = targetRange; + this.collectionLink = collectionLink; + this.createRetryPolicyFunc = createRetryPolicyFunc; + this.pageSize = initialPageSize; + this.top = top; + } + + public Flux produceAsync() { + BiFunction sourcePartitionCreateRequestFunc = + (token, maxItemCount) -> createRequestFunc.apply(targetRange, token, maxItemCount); + Flux> obs = Paginator + .getPaginatedQueryResultAsObservable( + feedOptions.requestContinuation(), + sourcePartitionCreateRequestFunc, + executeRequestFuncWithRetries, + resourceType, + top, + pageSize) + .map(rsp -> { + lastResponseContinuationToken = rsp.continuationToken(); + this.fetchExecutionRangeAccumulator.endFetchRange(rsp.activityId(), + rsp.results().size(), + this.retries); + this.fetchSchedulingMetrics.stop(); + return rsp;}); + + return splitProof(obs.map(DocumentProducerFeedResponse::new)); + } + + private Flux splitProof(Flux sourceFeedResponseObservable) { + return sourceFeedResponseObservable.onErrorResume( t -> { + CosmosClientException dce = Utils.as(t, CosmosClientException.class); + if (dce == null || !isSplit(dce)) { + logger.error("Unexpected failure", t); + return Flux.error(t); + } + + // we are dealing with Split + logger.info("DocumentProducer handling a partition split in [{}], detail:[{}]", targetRange, dce); + Mono> replacementRangesObs = getReplacementRanges(targetRange.toRange()); + + // Since new DocumentProducers are instantiated for the new replacement ranges, if for the new + // replacement partitions split happens the corresponding DocumentProducer can recursively handle splits. + // so this is resilient to split on splits. + Flux> replacementProducers = replacementRangesObs.flux().flatMap( + partitionKeyRanges -> { + if (logger.isDebugEnabled()) { + logger.info("Cross Partition Query Execution detected partition [{}] split into [{}] partitions," + + " last continuation token is [{}].", + targetRange.toJson(), + partitionKeyRanges.stream() + .map(JsonSerializable::toJson).collect(Collectors.joining(", ")), + lastResponseContinuationToken); + } + return Flux.fromIterable(createReplacingDocumentProducersOnSplit(partitionKeyRanges)); + }); + + return produceOnSplit(replacementProducers); + }); + } + + protected Flux produceOnSplit(Flux> replacingDocumentProducers) { + return replacingDocumentProducers.flatMap(DocumentProducer::produceAsync, 1); + } + + private List> createReplacingDocumentProducersOnSplit(List partitionKeyRanges) { + + List> replacingDocumentProducers = new ArrayList<>(partitionKeyRanges.size()); + for(PartitionKeyRange pkr: partitionKeyRanges) { + replacingDocumentProducers.add(createChildDocumentProducerOnSplit(pkr, lastResponseContinuationToken)); + } + return replacingDocumentProducers; + } + + protected DocumentProducer createChildDocumentProducerOnSplit( + PartitionKeyRange targetRange, + String initialContinuationToken) { + + return new DocumentProducer( + client, + collectionRid, + feedOptions, + createRequestFunc, + executeRequestFuncWithRetries, + targetRange, + collectionLink, + null, + resourceType , + correlatedActivityId, + pageSize, + initialContinuationToken, + top); + } + + private Mono> getReplacementRanges(Range range) { + return client.getPartitionKeyRangeCache().tryGetOverlappingRangesAsync(collectionRid, range, true, feedOptions.properties()); + } + + private boolean isSplit(CosmosClientException e) { + return Exceptions.isPartitionSplit(e); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/DocumentQueryExecutionContextBase.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/DocumentQueryExecutionContextBase.java new file mode 100644 index 0000000000000..61cb796dde8f5 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/DocumentQueryExecutionContextBase.java @@ -0,0 +1,293 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.SqlParameterList; +import com.azure.data.cosmos.SqlQuerySpec; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.ReplicatedResourceClientUtils; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RuntimeConstants.MediaTypes; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.RxDocumentServiceResponse; +import com.azure.data.cosmos.internal.Strings; +import com.azure.data.cosmos.internal.Utils; +import com.azure.data.cosmos.internal.routing.PartitionKeyInternal; +import com.azure.data.cosmos.internal.routing.PartitionKeyRangeIdentity; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.io.UnsupportedEncodingException; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public abstract class DocumentQueryExecutionContextBase +implements IDocumentQueryExecutionContext { + + protected ResourceType resourceTypeEnum; + protected String resourceLink; + protected IDocumentQueryClient client; + protected Class resourceType; + protected FeedOptions feedOptions; + protected SqlQuerySpec query; + protected UUID correlatedActivityId; + protected boolean shouldExecuteQueryRequest; + + protected DocumentQueryExecutionContextBase(IDocumentQueryClient client, ResourceType resourceTypeEnum, + Class resourceType, SqlQuerySpec query, FeedOptions feedOptions, String resourceLink, + boolean getLazyFeedResponse, UUID correlatedActivityId) { + + // TODO: validate args are not null: client and feedOption should not be null + this.client = client; + this.resourceTypeEnum = resourceTypeEnum; + this.resourceType = resourceType; + this.query = query; + this.shouldExecuteQueryRequest = (query != null); + this.feedOptions = feedOptions; + this.resourceLink = resourceLink; + // this.getLazyFeedResponse = getLazyFeedResponse; + this.correlatedActivityId = correlatedActivityId; + } + + @Override + abstract public Flux> executeAsync(); + + public RxDocumentServiceRequest createDocumentServiceRequest(Map requestHeaders, + SqlQuerySpec querySpec, + PartitionKeyInternal partitionKey) { + + RxDocumentServiceRequest request = querySpec != null + ? this.createQueryDocumentServiceRequest(requestHeaders, querySpec) + : this.createReadFeedDocumentServiceRequest(requestHeaders); + + this.populatePartitionKeyInfo(request, partitionKey); + + return request; + } + + protected RxDocumentServiceRequest createDocumentServiceRequest(Map requestHeaders, + SqlQuerySpec querySpec, + PartitionKeyRange targetRange, + String collectionRid) { + RxDocumentServiceRequest request = querySpec != null + ? this.createQueryDocumentServiceRequest(requestHeaders, querySpec) + : this.createReadFeedDocumentServiceRequest(requestHeaders); + + this.populatePartitionKeyRangeInfo(request, targetRange, collectionRid); + + return request; + } + + public Mono> executeRequestAsync(RxDocumentServiceRequest request) { + return (this.shouldExecuteQueryRequest ? this.executeQueryRequestAsync(request) + : this.executeReadFeedRequestAsync(request)); + } + + public Mono> executeQueryRequestAsync(RxDocumentServiceRequest request) { + return this.getFeedResponse(this.executeQueryRequestInternalAsync(request)); + } + + public Mono> executeReadFeedRequestAsync(RxDocumentServiceRequest request) { + return this.getFeedResponse(this.client.readFeedAsync(request)); + } + + protected Mono> getFeedResponse(Mono response) { + return response.map(resp -> BridgeInternal.toFeedResponsePage(resp, resourceType)); + } + + public FeedOptions getFeedOptions(String continuationToken, Integer maxPageSize) { + FeedOptions options = new FeedOptions(this.feedOptions); + options.requestContinuation(continuationToken); + options.maxItemCount(maxPageSize); + return options; + } + + private Mono executeQueryRequestInternalAsync(RxDocumentServiceRequest request) { + return this.client.executeQueryAsync(request); + } + + public Map createCommonHeadersAsync(FeedOptions feedOptions) { + Map requestHeaders = new HashMap<>(); + + ConsistencyLevel defaultConsistencyLevel = this.client.getDefaultConsistencyLevelAsync(); + ConsistencyLevel desiredConsistencyLevel = this.client.getDesiredConsistencyLevelAsync(); + if (!Strings.isNullOrEmpty(feedOptions.sessionToken()) + && !ReplicatedResourceClientUtils.isReadingFromMaster(this.resourceTypeEnum, OperationType.ReadFeed)) { + if (defaultConsistencyLevel == ConsistencyLevel.SESSION + || (desiredConsistencyLevel == ConsistencyLevel.SESSION)) { + // Query across partitions is not supported today. Master resources (for e.g., + // database) + // can span across partitions, whereas server resources (viz: collection, + // document and attachment) + // don't span across partitions. Hence, session token returned by one partition + // should not be used + // when quering resources from another partition. + // Since master resources can span across partitions, don't send session token + // to the backend. + // As master resources are sync replicated, we should always get consistent + // query result for master resources, + // irrespective of the chosen replica. + // For server resources, which don't span partitions, specify the session token + // for correct replica to be chosen for servicing the query result. + requestHeaders.put(HttpConstants.HttpHeaders.SESSION_TOKEN, feedOptions.sessionToken()); + } + } + + requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, feedOptions.requestContinuation()); + requestHeaders.put(HttpConstants.HttpHeaders.IS_QUERY, Strings.toString(true)); + + // Flow the pageSize only when we are not doing client eval + if (feedOptions.maxItemCount() != null && feedOptions.maxItemCount() > 0) { + requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Strings.toString(feedOptions.maxItemCount())); + } + + if (feedOptions.enableCrossPartitionQuery() != null) { + + requestHeaders.put(HttpConstants.HttpHeaders.ENABLE_CROSS_PARTITION_QUERY, + Strings.toString(feedOptions.enableCrossPartitionQuery())); + } + + if (feedOptions.maxDegreeOfParallelism() != 0) { + requestHeaders.put(HttpConstants.HttpHeaders.PARALLELIZE_CROSS_PARTITION_QUERY, Strings.toString(true)); + } + + if (this.feedOptions.enableCrossPartitionQuery() != null) { + requestHeaders.put(HttpConstants.HttpHeaders.ENABLE_SCAN_IN_QUERY, + Strings.toString(this.feedOptions.enableCrossPartitionQuery())); + } + + if (this.feedOptions.responseContinuationTokenLimitInKb() > 0) { + requestHeaders.put(HttpConstants.HttpHeaders.RESPONSE_CONTINUATION_TOKEN_LIMIT_IN_KB, + Strings.toString(feedOptions.responseContinuationTokenLimitInKb())); + } + + if (desiredConsistencyLevel != null) { + requestHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, desiredConsistencyLevel.toString()); + } + + if(feedOptions.populateQueryMetrics()){ + requestHeaders.put(HttpConstants.HttpHeaders.POPULATE_QUERY_METRICS, String.valueOf(feedOptions.populateQueryMetrics())); + } + + return requestHeaders; + } + + private void populatePartitionKeyInfo(RxDocumentServiceRequest request, PartitionKeyInternal partitionKey) { + if (request == null) { + throw new NullPointerException("request"); + } + + if (this.resourceTypeEnum.isPartitioned()) { + if (partitionKey != null) { + request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, partitionKey.toJson()); + } + } + } + + public void populatePartitionKeyRangeInfo(RxDocumentServiceRequest request, PartitionKeyRange range, + String collectionRid) { + if (request == null) { + throw new NullPointerException("request"); + } + + if (range == null) { + throw new NullPointerException("range"); + } + + if (this.resourceTypeEnum.isPartitioned()) { + request.routeTo(new PartitionKeyRangeIdentity(collectionRid, range.id())); + } + } + + private RxDocumentServiceRequest createQueryDocumentServiceRequest(Map requestHeaders, + SqlQuerySpec querySpec) { + RxDocumentServiceRequest executeQueryRequest; + + String queryText; + switch (this.client.getQueryCompatibilityMode()) { + case SqlQuery: + SqlParameterList params = querySpec.parameters(); + Utils.checkStateOrThrow(params != null && params.size() > 0, "query.parameters", + "Unsupported argument in query compatibility mode '%s'", + this.client.getQueryCompatibilityMode().toString()); + + executeQueryRequest = RxDocumentServiceRequest.create(OperationType.SqlQuery, this.resourceTypeEnum, + this.resourceLink, + // AuthorizationTokenType.PrimaryMasterKey, + requestHeaders); + + executeQueryRequest.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, MediaTypes.JSON); + queryText = querySpec.queryText(); + break; + + case Default: + case Query: + default: + executeQueryRequest = RxDocumentServiceRequest.create(OperationType.Query, this.resourceTypeEnum, + this.resourceLink, + // AuthorizationTokenType.PrimaryMasterKey, + requestHeaders); + + executeQueryRequest.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, MediaTypes.QUERY_JSON); + queryText = querySpec.toJson(); + break; + } + + try { + executeQueryRequest.setContentBytes(queryText.getBytes("UTF-8")); + } catch (UnsupportedEncodingException e) { + // TODO Auto-generated catch block + // TODO: exception should be handled differently + e.printStackTrace(); + } + + return executeQueryRequest; + } + + private RxDocumentServiceRequest createReadFeedDocumentServiceRequest(Map requestHeaders) { + if (this.resourceTypeEnum == ResourceType.Database || this.resourceTypeEnum == ResourceType.Offer) { + return RxDocumentServiceRequest.create(OperationType.ReadFeed, null, this.resourceTypeEnum, + // TODO: we may want to add a constructor to RxDocumentRequest supporting authorization type similar to .net + // AuthorizationTokenType.PrimaryMasterKey, + requestHeaders); + } else { + return RxDocumentServiceRequest.create(OperationType.ReadFeed, this.resourceTypeEnum, this.resourceLink, + // TODO: we may want to add a constructor to RxDocumentRequest supporting authorization type similar to .net + // AuthorizationTokenType.PrimaryMasterKey, + requestHeaders); + } + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/DocumentQueryExecutionContextFactory.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/DocumentQueryExecutionContextFactory.java new file mode 100644 index 0000000000000..bef8a6b7fdc31 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/DocumentQueryExecutionContextFactory.java @@ -0,0 +1,185 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.BadRequestException; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.SqlQuerySpec; +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.Utils; +import com.azure.data.cosmos.internal.caches.RxCollectionCache; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.util.List; +import java.util.UUID; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class DocumentQueryExecutionContextFactory { + + private final static int PageSizeFactorForTop = 5; + + private static Mono resolveCollection(IDocumentQueryClient client, SqlQuerySpec query, + ResourceType resourceTypeEnum, String resourceLink) { + + RxCollectionCache collectionCache = client.getCollectionCache(); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.create( + OperationType.Query, + resourceTypeEnum, + resourceLink, null + // TODO AuthorizationTokenType.INVALID) + ); //this request doesnt actually go to server + return collectionCache.resolveCollectionAsync(request); + } + + public static Flux> createDocumentQueryExecutionContextAsync( + IDocumentQueryClient client, + ResourceType resourceTypeEnum, + Class resourceType, + SqlQuerySpec query, + FeedOptions feedOptions, + String resourceLink, + boolean isContinuationExpected, + UUID correlatedActivityId) { + + // return proxy + Flux collectionObs = Flux.empty(); + + if (resourceTypeEnum.isCollectionChild()) { + collectionObs = resolveCollection(client, query, resourceTypeEnum, resourceLink).flux(); + } + + // We create a ProxyDocumentQueryExecutionContext that will be initialized with DefaultDocumentQueryExecutionContext + // which will be used to send the query to GATEWAY and on getting 400(bad request) with 1004(cross parition query not servable), we initialize it with + // PipelinedDocumentQueryExecutionContext by providing the partition query execution info that's needed(which we get from the exception returned from GATEWAY). + + Flux> proxyQueryExecutionContext = + collectionObs.flatMap(collection -> { + if (feedOptions != null && feedOptions.partitionKey() != null && feedOptions.partitionKey().equals(PartitionKey.None)) { + feedOptions.partitionKey(BridgeInternal.getPartitionKey(BridgeInternal.getNonePartitionKey(collection.getPartitionKey()))); + } + return ProxyDocumentQueryExecutionContext.createAsync( + client, + resourceTypeEnum, + resourceType, + query, + feedOptions, + resourceLink, + collection, + isContinuationExpected, + correlatedActivityId); + }).switchIfEmpty(ProxyDocumentQueryExecutionContext.createAsync( + client, + resourceTypeEnum, + resourceType, + query, + feedOptions, + resourceLink, + null, + isContinuationExpected, + correlatedActivityId)); + + return proxyQueryExecutionContext; + } + + public static Flux> createSpecializedDocumentQueryExecutionContextAsync( + IDocumentQueryClient client, + ResourceType resourceTypeEnum, + Class resourceType, + SqlQuerySpec query, + FeedOptions feedOptions, + String resourceLink, + boolean isContinuationExpected, + PartitionedQueryExecutionInfo partitionedQueryExecutionInfo, + List targetRanges, + String collectionRid, + UUID correlatedActivityId) { + + int initialPageSize = Utils.getValueOrDefault(feedOptions.maxItemCount(), ParallelQueryConfig.ClientInternalPageSize); + + BadRequestException validationError = Utils.checkRequestOrReturnException + (initialPageSize > 0, "MaxItemCount", "INVALID MaxItemCount %s", initialPageSize); + if (validationError != null) { + return Flux.error(validationError); + } + + QueryInfo queryInfo = partitionedQueryExecutionInfo.getQueryInfo(); + + boolean getLazyFeedResponse = queryInfo.hasTop(); + + // We need to compute the optimal initial page size for order-by queries + if (queryInfo.hasOrderBy()) { + int top; + if (queryInfo.hasTop() && (top = partitionedQueryExecutionInfo.getQueryInfo().getTop()) > 0) { + int pageSizeWithTop = Math.min( + (int)Math.ceil(top / (double)targetRanges.size()) * PageSizeFactorForTop, + top); + + if (initialPageSize > 0) { + initialPageSize = Math.min(pageSizeWithTop, initialPageSize); + } + else { + initialPageSize = pageSizeWithTop; + } + } + // TODO: do not support continuation in string format right now + // else if (isContinuationExpected) + // { + // if (initialPageSize < 0) + // { + // initialPageSize = (int)Math.Max(feedOptions.MaxBufferedItemCount, ParallelQueryConfig.GetConfig().DefaultMaximumBufferSize); + // } + // + // initialPageSize = Math.Min( + // (int)Math.Ceiling(initialPageSize / (double)targetRanges.Count) * PageSizeFactorForTop, + // initialPageSize); + // } + } + + return PipelinedDocumentQueryExecutionContext.createAsync( + client, + resourceTypeEnum, + resourceType, + query, + feedOptions, + resourceLink, + collectionRid, + partitionedQueryExecutionInfo, + targetRanges, + initialPageSize, + isContinuationExpected, + getLazyFeedResponse, + correlatedActivityId); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ExceptionHelper.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ExceptionHelper.java new file mode 100644 index 0000000000000..b34c251fc7643 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ExceptionHelper.java @@ -0,0 +1,63 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query; + +import java.util.concurrent.ExecutionException; + +class ExceptionHelper { + + private ExceptionHelper() {} + + public static Throwable unwrap(Throwable e) { + if (e.getCause() == null) { + return e; + } + if (e instanceof IllegalStateException || e instanceof ExecutionException) { + return unwrap(e.getCause()); + } + return e; + } + + public static Throwable unwrapIllegalStateException(Exception e) { + if (e instanceof IllegalStateException && e.getCause() != null) { + return e.getCause(); + } + return e; + } + + public static Throwable unwrapExecutionException(Exception e) { + if (e instanceof RuntimeException && e.getCause() != null) { + return e.getCause(); + } + return e; + } + + public static RuntimeException toRuntimeException(Throwable e) { + if (e instanceof RuntimeException) { + return (RuntimeException) e; + } + throw new IllegalStateException(e); + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/Fetcher.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/Fetcher.java new file mode 100644 index 0000000000000..73ccbd5b52066 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/Fetcher.java @@ -0,0 +1,121 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; + +import java.util.function.BiFunction; +import java.util.function.Function; + +class Fetcher { + private final static Logger logger = LoggerFactory.getLogger(Fetcher.class); + + private final BiFunction createRequestFunc; + private final Function>> executeFunc; + private final boolean isChangeFeed; + + private volatile boolean shouldFetchMore; + private volatile int maxItemCount; + private volatile int top; + private volatile String continuationToken; + + public Fetcher(BiFunction createRequestFunc, + Function>> executeFunc, + String continuationToken, + boolean isChangeFeed, + int top, + int maxItemCount) { + + this.createRequestFunc = createRequestFunc; + this.executeFunc = executeFunc; + this.isChangeFeed = isChangeFeed; + + this.continuationToken = continuationToken; + this.top = top; + if (top == -1) { + this.maxItemCount = maxItemCount; + } else { + // it is a top query, we should not retrieve more than requested top. + this.maxItemCount = Math.min(maxItemCount, top); + } + this.shouldFetchMore = true; + } + + public boolean shouldFetchMore() { + return shouldFetchMore; + } + + public Flux> nextPage() { + RxDocumentServiceRequest request = createRequest(); + return nextPage(request); + } + + private void updateState(FeedResponse response) { + continuationToken = response.continuationToken(); + if (top != -1) { + top -= response.results().size(); + if (top < 0) { + // this shouldn't happen + // this means backend retrieved more items than requested + logger.warn("Azure Cosmos DB BackEnd Service returned more than requested {} items", maxItemCount); + top = 0; + } + maxItemCount = Math.min(maxItemCount, top); + } + + shouldFetchMore = shouldFetchMore && + // if token is null or top == 0 then done + (!StringUtils.isEmpty(continuationToken) && (top != 0)) && + // if change feed query and no changes then done + (!isChangeFeed || !BridgeInternal.noChanges(response)); + + logger.debug("Fetcher state updated: " + + "isChangeFeed = {}, continuation token = {}, max item count = {}, should fetch more = {}", + isChangeFeed, continuationToken, maxItemCount, shouldFetchMore); + } + + private RxDocumentServiceRequest createRequest() { + if (!shouldFetchMore) { + // this should never happen + logger.error("invalid state, trying to fetch more after completion"); + throw new IllegalStateException("INVALID state, trying to fetch more after completion"); + } + + return createRequestFunc.apply(continuationToken, maxItemCount); + } + + private Flux> nextPage(RxDocumentServiceRequest request) { + return executeFunc.apply(request).map(rsp -> { + updateState(rsp); + return rsp; + }); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/IDocumentQueryClient.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/IDocumentQueryClient.java new file mode 100644 index 0000000000000..6bd7f74abcece --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/IDocumentQueryClient.java @@ -0,0 +1,95 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.internal.IRetryPolicyFactory; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.RxDocumentServiceResponse; +import com.azure.data.cosmos.internal.caches.IPartitionKeyRangeCache; +import com.azure.data.cosmos.internal.caches.RxCollectionCache; +import reactor.core.publisher.Mono; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public interface IDocumentQueryClient { + + /** + * TODO: this should be async returning observable + * @return + */ + RxCollectionCache getCollectionCache(); + + /** + * TODO: this should be async returning observable + * @return + */ + IPartitionKeyRangeCache getPartitionKeyRangeCache(); + + /** + * @return + */ + IRetryPolicyFactory getResetSessionTokenRetryPolicy(); + + /** + * TODO: this should be async returning observable + * @return + */ + ConsistencyLevel getDefaultConsistencyLevelAsync(); + + /** + * TODO: this should be async returning observable + * @return + */ + ConsistencyLevel getDesiredConsistencyLevelAsync(); + + Mono executeQueryAsync(RxDocumentServiceRequest request); + + QueryCompatibilityMode getQueryCompatibilityMode(); + + ///

+ /// A client query compatibility mode when making query request. + /// Can be used to force a specific query request format. + /// + enum QueryCompatibilityMode { + /// + /// DEFAULT (latest) query format. + /// + Default, + + /// + /// Query (application/query+json). + /// DEFAULT. + /// + Query, + + /// + /// SqlQuery (application/sql). + /// + SqlQuery + } + + Mono readFeedAsync(RxDocumentServiceRequest request); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/IDocumentQueryExecutionComponent.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/IDocumentQueryExecutionComponent.java new file mode 100644 index 0000000000000..4825b6931de0b --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/IDocumentQueryExecutionComponent.java @@ -0,0 +1,36 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.Resource; +import reactor.core.publisher.Flux; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public interface IDocumentQueryExecutionComponent { + + Flux> drainAsync(int maxPageSize); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/IDocumentQueryExecutionContext.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/IDocumentQueryExecutionContext.java new file mode 100644 index 0000000000000..9134daf10b657 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/IDocumentQueryExecutionContext.java @@ -0,0 +1,36 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.Resource; +import reactor.core.publisher.Flux; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public interface IDocumentQueryExecutionContext { + + Flux> executeAsync(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ItemComparator.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ItemComparator.java new file mode 100644 index 0000000000000..843e367d6906e --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ItemComparator.java @@ -0,0 +1,65 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query; + +import java.util.Comparator; + +public final class ItemComparator implements Comparator { + private ItemComparator() { + } + + private static class SingletonHelper { + private static final ItemComparator INSTANCE = new ItemComparator(); + } + + public static ItemComparator getInstance() { + return SingletonHelper.INSTANCE; + } + + @Override + public int compare(Object obj1, Object obj2) { + ItemType type1 = ItemTypeHelper.getOrderByItemType(obj1); + ItemType type2 = ItemTypeHelper.getOrderByItemType(obj2); + + int cmp = Integer.compare(type1.getVal(), type2.getVal()); + + if (cmp != 0) { + return cmp; + } + + switch (type1) { + case NoValue: + case Null: + return 0; + case Boolean: + return Boolean.compare((Boolean) obj1, (Boolean) obj2); + case Number: + return Double.compare(((Number) obj1).doubleValue(), ((Number) obj2).doubleValue()); + case String: + return ((String) obj1).compareTo((String) obj2); + default: + throw new ClassCastException(String.format("Unexpected type: %s", type1.toString())); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ItemType.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ItemType.java new file mode 100644 index 0000000000000..56b90491c7835 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ItemType.java @@ -0,0 +1,38 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query; + +public enum ItemType { + NoValue(0x0), Null(0x1), Boolean(0x2), Number(0x4), String(0x5); + + private final int val; + + ItemType(int val) { + this.val = val; + } + + public int getVal() { + return this.val; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ItemTypeHelper.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ItemTypeHelper.java new file mode 100644 index 0000000000000..b43d8cf0fdfab --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ItemTypeHelper.java @@ -0,0 +1,52 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.internal.Undefined; + +public final class ItemTypeHelper { + public static ItemType getOrderByItemType(Object obj) { + if (obj == null) { + return ItemType.Null; + } + + if (obj instanceof Undefined) { + return ItemType.NoValue; + } + + if (obj instanceof Boolean) { + return ItemType.Boolean; + } + + if (obj instanceof Number) { + return ItemType.Number; + } + + if (obj instanceof String) { + return ItemType.String; + } + + throw new IllegalArgumentException(String.format("Unexpected type: %s", obj.getClass().toString())); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/OrderByContinuationToken.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/OrderByContinuationToken.java new file mode 100644 index 0000000000000..64993e10cece8 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/OrderByContinuationToken.java @@ -0,0 +1,153 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.JsonSerializable; +import com.azure.data.cosmos.internal.Utils.ValueHolder; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ArrayNode; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public final class OrderByContinuationToken extends JsonSerializable { + private static final String CompositeContinuationTokenPropertyName = "compositeToken"; + private static final String OrderByItemsPropetryName = "orderByItems"; + private static final String RidPropertyName = "rid"; + private static final String InclusivePropertyName = "inclusive"; + private static final Logger logger = LoggerFactory.getLogger(OrderByContinuationToken.class); + + public OrderByContinuationToken(CompositeContinuationToken compositeContinuationToken, QueryItem[] orderByItems, + String rid, boolean inclusive) { + if (compositeContinuationToken == null) { + throw new IllegalArgumentException("CompositeContinuationToken must not be null."); + } + + if (orderByItems == null) { + throw new IllegalArgumentException("orderByItems must not be null."); + } + + if (orderByItems.length == 0) { + throw new IllegalArgumentException("orderByItems must not be empty."); + } + + if (rid == null) { + throw new IllegalArgumentException("rid must not be null."); + } + + this.setCompositeContinuationToken(compositeContinuationToken); + this.setOrderByItems(orderByItems); + this.setRid(rid); + this.setInclusive(inclusive); + } + + private OrderByContinuationToken(String serializedOrderByContinuationToken) { + super(serializedOrderByContinuationToken); + } + + public static boolean tryParse(String serializedOrderByContinuationToken, + ValueHolder outOrderByContinuationToken) { + boolean parsed; + try { + OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( + serializedOrderByContinuationToken); + CompositeContinuationToken compositeContinuationToken = orderByContinuationToken + .getCompositeContinuationToken(); + if (compositeContinuationToken == null) { + throw new IllegalArgumentException("compositeContinuationToken must not be null."); + } + + orderByContinuationToken.getOrderByItems(); + orderByContinuationToken.getRid(); + orderByContinuationToken.getInclusive(); + + outOrderByContinuationToken.v = orderByContinuationToken; + parsed = true; + } catch (Exception ex) { + logger.debug( + "Received exception {} when trying to parse: {}", + ex.getMessage(), + serializedOrderByContinuationToken); + parsed = false; + outOrderByContinuationToken.v = null; + } + + return parsed; + } + + public CompositeContinuationToken getCompositeContinuationToken() { + ValueHolder outCompositeContinuationToken = new ValueHolder(); + boolean succeeded = CompositeContinuationToken.tryParse(super.getString(CompositeContinuationTokenPropertyName), + outCompositeContinuationToken); + if (!succeeded) { + throw new IllegalArgumentException("Continuation Token was not able to be parsed"); + } + + return outCompositeContinuationToken.v; + } + + public QueryItem[] getOrderByItems() { + List queryItems = new ArrayList(); + ArrayNode arrayNode = (ArrayNode) super.get(OrderByItemsPropetryName); + for (JsonNode jsonNode : arrayNode) { + QueryItem queryItem = new QueryItem(jsonNode.toString()); + queryItems.add(queryItem); + } + + QueryItem[] queryItemsArray = new QueryItem[queryItems.size()]; + + return queryItems.toArray(queryItemsArray); + } + + public String getRid() { + return super.getString(RidPropertyName); + } + + public boolean getInclusive() { + return super.getBoolean(InclusivePropertyName); + } + + private void setCompositeContinuationToken(CompositeContinuationToken compositeContinuationToken) { + BridgeInternal.setProperty(this, CompositeContinuationTokenPropertyName, compositeContinuationToken.toJson()); + } + + private void setOrderByItems(QueryItem[] orderByItems) { + BridgeInternal.setProperty(this, OrderByItemsPropetryName, orderByItems); + } + + private void setRid(String rid) { + BridgeInternal.setProperty(this, RidPropertyName, rid); + } + + private void setInclusive(boolean inclusive) { + BridgeInternal.setProperty(this, InclusivePropertyName, inclusive); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/OrderByDocumentProducer.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/OrderByDocumentProducer.java new file mode 100644 index 0000000000000..d5b4cec4e74bd --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/OrderByDocumentProducer.java @@ -0,0 +1,113 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.IDocumentClientRetryPolicy; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.QueryMetrics; +import com.azure.data.cosmos.internal.RequestChargeTracker; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.Utils; +import com.azure.data.cosmos.internal.query.orderbyquery.OrderByRowResult; +import com.azure.data.cosmos.internal.query.orderbyquery.OrderbyRowComparer; +import reactor.core.publisher.Flux; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.function.Function; + +class OrderByDocumentProducer extends DocumentProducer { + private final OrderbyRowComparer consumeComparer; + private final Map targetRangeToOrderByContinuationTokenMap; + + OrderByDocumentProducer( + OrderbyRowComparer consumeComparer, + IDocumentQueryClient client, + String collectionResourceId, + FeedOptions feedOptions, + TriFunction createRequestFunc, + Function>> executeRequestFunc, + PartitionKeyRange targetRange, + String collectionLink, + Callable createRetryPolicyFunc, + Class resourceType, + UUID correlatedActivityId, + int initialPageSize, + String initialContinuationToken, + int top, + Map targetRangeToOrderByContinuationTokenMap) { + super(client, collectionResourceId, feedOptions, createRequestFunc, executeRequestFunc, targetRange, collectionLink, + createRetryPolicyFunc, resourceType, correlatedActivityId, initialPageSize, initialContinuationToken, top); + this.consumeComparer = consumeComparer; + this.targetRangeToOrderByContinuationTokenMap = targetRangeToOrderByContinuationTokenMap; + } + + protected Flux produceOnSplit(Flux> replacementProducers) { + return replacementProducers.collectList().flux().flatMap(documentProducers -> { + RequestChargeTracker tracker = new RequestChargeTracker(); + Map queryMetricsMap = new HashMap<>(); + return OrderByUtils.orderedMerge(resourceType, consumeComparer, tracker, documentProducers, queryMetricsMap, + targetRangeToOrderByContinuationTokenMap) + .map(orderByQueryResult -> resultPageFrom(tracker, orderByQueryResult)); + }); + } + + @SuppressWarnings("unchecked") + private DocumentProducerFeedResponse resultPageFrom(RequestChargeTracker tracker, OrderByRowResult row) { + double requestCharge = tracker.getAndResetCharge(); + Map headers = Utils.immutableMapOf(HttpConstants.HttpHeaders.REQUEST_CHARGE, String.valueOf(requestCharge)); + FeedResponse fr = BridgeInternal.createFeedResponse(Collections.singletonList((T) row), headers); + return new DocumentProducerFeedResponse(fr, row.getSourcePartitionKeyRange()); + } + + protected DocumentProducer createChildDocumentProducerOnSplit( + PartitionKeyRange targetRange, + String initialContinuationToken) { + + return new OrderByDocumentProducer<>( + consumeComparer, + client, + collectionRid, + feedOptions, + createRequestFunc, + executeRequestFuncWithRetries, + targetRange, + collectionLink, + createRetryPolicyFunc, + resourceType , + correlatedActivityId, + pageSize, + initialContinuationToken, + top, + this.targetRangeToOrderByContinuationTokenMap); + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/OrderByDocumentQueryExecutionContext.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/OrderByDocumentQueryExecutionContext.java new file mode 100644 index 0000000000000..30afa3602176a --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/OrderByDocumentQueryExecutionContext.java @@ -0,0 +1,642 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.SqlQuerySpec; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.IDocumentClientRetryPolicy; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.QueryMetrics; +import com.azure.data.cosmos.internal.RequestChargeTracker; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.Utils; +import com.azure.data.cosmos.internal.Utils.ValueHolder; +import com.azure.data.cosmos.internal.query.orderbyquery.OrderByRowResult; +import com.azure.data.cosmos.internal.query.orderbyquery.OrderbyRowComparer; +import com.azure.data.cosmos.internal.routing.Range; +import org.apache.commons.lang3.NotImplementedException; +import org.apache.commons.lang3.tuple.ImmutablePair; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.function.Function; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class OrderByDocumentQueryExecutionContext + extends ParallelDocumentQueryExecutionContextBase { + private final String FormatPlaceHolder = "{documentdb-formattableorderbyquery-filter}"; + private final String True = "true"; + private final String collectionRid; + private final OrderbyRowComparer consumeComparer; + private final RequestChargeTracker tracker; + private final ConcurrentMap queryMetricMap; + private Flux> orderByObservable; + private final Map targetRangeToOrderByContinuationTokenMap; + + private OrderByDocumentQueryExecutionContext( + IDocumentQueryClient client, + List partitionKeyRanges, + ResourceType resourceTypeEnum, + Class klass, + SqlQuerySpec query, + FeedOptions feedOptions, + String resourceLink, + String rewrittenQuery, + boolean isContinuationExpected, + boolean getLazyFeedResponse, + OrderbyRowComparer consumeComparer, + String collectionRid, + UUID correlatedActivityId) { + super(client, partitionKeyRanges, resourceTypeEnum, klass, query, feedOptions, resourceLink, rewrittenQuery, + isContinuationExpected, getLazyFeedResponse, correlatedActivityId); + this.collectionRid = collectionRid; + this.consumeComparer = consumeComparer; + this.tracker = new RequestChargeTracker(); + this.queryMetricMap = new ConcurrentHashMap<>(); + targetRangeToOrderByContinuationTokenMap = new HashMap<>(); + } + + public static Flux> createAsync( + IDocumentQueryClient client, + ResourceType resourceTypeEnum, + Class resourceType, + SqlQuerySpec expression, + FeedOptions feedOptions, + String resourceLink, + String collectionRid, + PartitionedQueryExecutionInfo partitionedQueryExecutionInfo, + List partitionKeyRanges, + int initialPageSize, + boolean isContinuationExpected, + boolean getLazyFeedResponse, + UUID correlatedActivityId) { + + OrderByDocumentQueryExecutionContext context = new OrderByDocumentQueryExecutionContext(client, + partitionKeyRanges, + resourceTypeEnum, + resourceType, + expression, + feedOptions, + resourceLink, + partitionedQueryExecutionInfo.getQueryInfo().getRewrittenQuery(), + isContinuationExpected, + getLazyFeedResponse, + new OrderbyRowComparer(partitionedQueryExecutionInfo.getQueryInfo().getOrderBy()), + collectionRid, + correlatedActivityId); + + try { + context.initialize(partitionKeyRanges, + partitionedQueryExecutionInfo.getQueryInfo().getOrderBy(), + partitionedQueryExecutionInfo.getQueryInfo().getOrderByExpressions(), + initialPageSize, + feedOptions.requestContinuation()); + + return Flux.just(context); + } catch (CosmosClientException dce) { + return Flux.error(dce); + } + } + + private void initialize( + List partitionKeyRanges, + List sortOrders, + Collection orderByExpressions, + int initialPageSize, + String continuationToken) throws CosmosClientException { + if (continuationToken == null) { + // First iteration so use null continuation tokens and "true" filters + Map partitionKeyRangeToContinuationToken = new HashMap(); + for (PartitionKeyRange partitionKeyRange : partitionKeyRanges) { + partitionKeyRangeToContinuationToken.put(partitionKeyRange, + null); + } + + super.initialize(collectionRid, + partitionKeyRangeToContinuationToken, + initialPageSize, + new SqlQuerySpec(querySpec.queryText().replace(FormatPlaceHolder, + True), + querySpec.parameters())); + } else { + // Check to see if order by continuation token is a valid JSON. + OrderByContinuationToken orderByContinuationToken; + ValueHolder outOrderByContinuationToken = new ValueHolder(); + if (!OrderByContinuationToken.tryParse(continuationToken, + outOrderByContinuationToken)) { + String message = String.format("INVALID JSON in continuation token %s for OrderBy~Context", + continuationToken); + throw BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.BADREQUEST, + message); + } + + orderByContinuationToken = outOrderByContinuationToken.v; + + CompositeContinuationToken compositeContinuationToken = orderByContinuationToken + .getCompositeContinuationToken(); + // Check to see if the ranges inside are valid + if (compositeContinuationToken.getRange().isEmpty()) { + String message = String.format("INVALID RANGE in the continuation token %s for OrderBy~Context.", + continuationToken); + throw BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.BADREQUEST, + message); + } + + // At this point the token is valid. + ImmutablePair targetIndexAndFilters = this.GetFiltersForPartitions( + orderByContinuationToken, + partitionKeyRanges, + sortOrders, + orderByExpressions); + + int targetIndex = targetIndexAndFilters.left; + targetRangeToOrderByContinuationTokenMap.put(String.valueOf(targetIndex), orderByContinuationToken); + FormattedFilterInfo formattedFilterInfo = targetIndexAndFilters.right; + + // Left + String filterForRangesLeftOfTheTargetRange = formattedFilterInfo.getFilterForRangesLeftOfTheTargetRange(); + this.initializeRangeWithContinuationTokenAndFilter(partitionKeyRanges, + /* startInclusive */ 0, + /* endExclusive */ targetIndex, + /* continuationToken */ null, + filterForRangesLeftOfTheTargetRange, + initialPageSize); + + // Target + String filterForTargetRange = formattedFilterInfo.getFilterForTargetRange(); + this.initializeRangeWithContinuationTokenAndFilter(partitionKeyRanges, + /* startInclusive */ targetIndex, + /* endExclusive */ targetIndex + 1, + null, + filterForTargetRange, + initialPageSize); + + // Right + String filterForRangesRightOfTheTargetRange = formattedFilterInfo.getFilterForRangesRightOfTheTargetRange(); + this.initializeRangeWithContinuationTokenAndFilter(partitionKeyRanges, + /* startInclusive */ targetIndex + 1, + /* endExclusive */ partitionKeyRanges.size(), + /* continuationToken */ null, + filterForRangesRightOfTheTargetRange, + initialPageSize); + } + + orderByObservable = OrderByUtils.orderedMerge(resourceType, + consumeComparer, + tracker, + documentProducers, + queryMetricMap, + targetRangeToOrderByContinuationTokenMap); + } + + private void initializeRangeWithContinuationTokenAndFilter( + List partitionKeyRanges, + int startInclusive, + int endExclusive, + String continuationToken, + String filter, + int initialPageSize) { + Map partitionKeyRangeToContinuationToken = new HashMap(); + for (int i = startInclusive; i < endExclusive; i++) { + PartitionKeyRange partitionKeyRange = partitionKeyRanges.get(i); + partitionKeyRangeToContinuationToken.put(partitionKeyRange, + continuationToken); + } + + super.initialize(collectionRid, + partitionKeyRangeToContinuationToken, + initialPageSize, + new SqlQuerySpec(querySpec.queryText().replace(FormatPlaceHolder, + filter), + querySpec.parameters())); + } + + private ImmutablePair GetFiltersForPartitions( + OrderByContinuationToken orderByContinuationToken, + List partitionKeyRanges, + List sortOrders, + Collection orderByExpressions) throws CosmosClientException { + // Find the partition key range we left off on + int startIndex = this.FindTargetRangeAndExtractContinuationTokens(partitionKeyRanges, + orderByContinuationToken.getCompositeContinuationToken().getRange()); + + // Get the filters. + FormattedFilterInfo formattedFilterInfo = this.GetFormattedFilters(orderByExpressions, + orderByContinuationToken.getOrderByItems(), + sortOrders, + orderByContinuationToken.getInclusive()); + + return new ImmutablePair(startIndex, + formattedFilterInfo); + } + + private OrderByDocumentQueryExecutionContext.FormattedFilterInfo GetFormattedFilters( + Collection orderByExpressionCollection, + QueryItem[] orderByItems, + Collection sortOrderCollection, + boolean inclusive) { + // Convert to arrays + SortOrder[] sortOrders = new SortOrder[sortOrderCollection.size()]; + sortOrderCollection.toArray(sortOrders); + + String[] expressions = new String[orderByExpressionCollection.size()]; + orderByExpressionCollection.toArray(expressions); + + // Validate the inputs + if (expressions.length != sortOrders.length) { + throw new IllegalArgumentException("expressions.size() != sortOrders.size()"); + } + + if (expressions.length != orderByItems.length) { + throw new IllegalArgumentException("expressions.size() != orderByItems.length"); + } + + // When we run cross partition queries, + // we only serialize the continuation token for the partition that we left off + // on. + // The only problem is that when we resume the order by query, + // we don't have continuation tokens for all other partitions. + // The saving grace is that the data has a composite sort order(query sort + // order, partition key range id) + // so we can generate range filters which in turn the backend will turn into rid + // based continuation tokens, + // which is enough to get the streams of data flowing from all partitions. + // The details of how this is done is described below: + + int numOrderByItems = expressions.length; + boolean isSingleOrderBy = numOrderByItems == 1; + StringBuilder left = new StringBuilder(); + StringBuilder target = new StringBuilder(); + StringBuilder right = new StringBuilder(); + + if (isSingleOrderBy) { + // For a single order by query we resume the continuations in this manner + // Suppose the query is SELECT* FROM c ORDER BY c.string ASC + // And we left off on partition N with the value "B" + // Then + // ALL the partitions to the left will have finished reading "B" + // Partition N is still reading "B" + // ALL the partitions to the right have let to read a "B + // Therefore the filters should be + // > "B" , >= "B", and >= "B" respectively + // Repeat the same logic for DESC and you will get + // < "B", <= "B", and <= "B" respectively + // The general rule becomes + // For ASC + // > for partitions to the left + // >= for the partition we left off on + // >= for the partitions to the right + // For DESC + // < for partitions to the left + // <= for the partition we left off on + // <= for the partitions to the right + String expression = expressions[0]; + SortOrder sortOrder = sortOrders[0]; + QueryItem orderByItem = orderByItems[0]; + Object rawItem = orderByItem.getItem(); + String orderByItemToString; + if (rawItem instanceof String) { + orderByItemToString = "\"" + rawItem.toString().replaceAll("\"", + "\\\"") + "\""; + } else { + orderByItemToString = rawItem.toString(); + } + + left.append(String.format("%s %s %s", + expression, + (sortOrder == SortOrder.Descending ? "<" : ">"), + orderByItemToString)); + + if (inclusive) { + target.append(String.format("%s %s %s", + expression, + (sortOrder == SortOrder.Descending ? "<=" : ">="), + orderByItemToString)); + } else { + target.append(String.format("%s %s %s", + expression, + (sortOrder == SortOrder.Descending ? "<" : ">"), + orderByItemToString)); + } + + right.append(String.format("%s %s %s", + expression, + (sortOrder == SortOrder.Descending ? "<=" : ">="), + orderByItemToString)); + } else { + // This code path needs to be implemented, but it's error prone and needs + // testing. + // You can port the implementation from the .net SDK and it should work if + // ported right. + throw new NotImplementedException( + "Resuming a multi order by query from a continuation token is not supported yet."); + } + + return new FormattedFilterInfo(left.toString(), + target.toString(), + right.toString()); + } + + protected OrderByDocumentProducer createDocumentProducer( + String collectionRid, + PartitionKeyRange targetRange, + String continuationToken, + int initialPageSize, + FeedOptions feedOptions, + SqlQuerySpec querySpecForInit, + Map commonRequestHeaders, + TriFunction createRequestFunc, + Function>> executeFunc, + Callable createRetryPolicyFunc) { + return new OrderByDocumentProducer(consumeComparer, + client, + collectionRid, + feedOptions, + createRequestFunc, + executeFunc, + targetRange, + collectionRid, + () -> client.getResetSessionTokenRetryPolicy().getRequestPolicy(), + resourceType, + correlatedActivityId, + initialPageSize, + continuationToken, + top, + this.targetRangeToOrderByContinuationTokenMap); + } + + private static class ItemToPageTransformer + implements Function>, Flux>> { + private final static int DEFAULT_PAGE_SIZE = 100; + private final RequestChargeTracker tracker; + private final int maxPageSize; + private final ConcurrentMap queryMetricMap; + private final Function, String> orderByContinuationTokenCallback; + private volatile FeedResponse> previousPage; + + public ItemToPageTransformer( + RequestChargeTracker tracker, + int maxPageSize, + ConcurrentMap queryMetricsMap, + Function, String> orderByContinuationTokenCallback) { + this.tracker = tracker; + this.maxPageSize = maxPageSize > 0 ? maxPageSize : DEFAULT_PAGE_SIZE; + this.queryMetricMap = queryMetricsMap; + this.orderByContinuationTokenCallback = orderByContinuationTokenCallback; + this.previousPage = null; + } + + private static Map headerResponse( + double requestCharge) { + return Utils.immutableMapOf(HttpConstants.HttpHeaders.REQUEST_CHARGE, + String.valueOf(requestCharge)); + } + + private FeedResponse> addOrderByContinuationToken( + FeedResponse> page, + String orderByContinuationToken) { + Map headers = new HashMap<>(page.responseHeaders()); + headers.put(HttpConstants.HttpHeaders.CONTINUATION, + orderByContinuationToken); + return BridgeInternal.createFeedResponseWithQueryMetrics(page.results(), + headers, + BridgeInternal.queryMetricsFromFeedResponse(page)); + } + + @Override + public Flux> apply(Flux> source) { + return source + // .windows: creates an observable of observable where inner observable + // emits max maxPageSize elements + .window(maxPageSize).map(Flux::collectList) + // flattens the observable>>> to + // Observable>> + .flatMap(resultListObs -> resultListObs, + 1) + // translates Observable>> to + // Observable>>> + .map(orderByRowResults -> { + // construct a page from result with request charge + FeedResponse> feedResponse = BridgeInternal.createFeedResponse( + orderByRowResults, + headerResponse(tracker.getAndResetCharge())); + if (!queryMetricMap.isEmpty()) { + for (String key : queryMetricMap.keySet()) { + BridgeInternal.putQueryMetricsIntoMap(feedResponse, + key, + queryMetricMap.get(key)); + } + } + return feedResponse; + }) + // Emit an empty page so the downstream observables know when there are no more + // results. + .concatWith(Flux.defer(() -> { + return Flux.just(BridgeInternal.createFeedResponse(Utils.immutableListOf(), + null)); + })) + // CREATE pairs from the stream to allow the observables downstream to "peek" + // 1, 2, 3, null -> (null, 1), (1, 2), (2, 3), (3, null) + .map(orderByRowResults -> { + ImmutablePair>, FeedResponse>> previousCurrent = new ImmutablePair>, FeedResponse>>( + this.previousPage, + orderByRowResults); + this.previousPage = orderByRowResults; + return previousCurrent; + }) + // remove the (null, 1) + .skip(1) + // Add the continuation token based on the current and next page. + .map(currentNext -> { + FeedResponse> current = currentNext.left; + FeedResponse> next = currentNext.right; + + FeedResponse> page; + if (next.results().size() == 0) { + // No more pages no send current page with null continuation token + page = current; + page = this.addOrderByContinuationToken(page, + null); + } else { + // Give the first page but use the first value in the next page to generate the + // continuation token + page = current; + List> results = next.results(); + OrderByRowResult firstElementInNextPage = results.get(0); + String orderByContinuationToken = this.orderByContinuationTokenCallback + .apply(firstElementInNextPage); + page = this.addOrderByContinuationToken(page, + orderByContinuationToken); + } + + return page; + }).map(feedOfOrderByRowResults -> { + // FeedResponse> to FeedResponse + List unwrappedResults = new ArrayList(); + for (OrderByRowResult orderByRowResult : feedOfOrderByRowResults.results()) { + unwrappedResults.add(orderByRowResult.getPayload()); + } + + return BridgeInternal.createFeedResponseWithQueryMetrics(unwrappedResults, + feedOfOrderByRowResults.responseHeaders(), + BridgeInternal.queryMetricsFromFeedResponse(feedOfOrderByRowResults)); + }).switchIfEmpty(Flux.defer(() -> { + // create an empty page if there is no result + return Flux.just(BridgeInternal.createFeedResponse(Utils.immutableListOf(), + headerResponse(tracker.getAndResetCharge()))); + })); + } + } + + @Override + public Flux> drainAsync( + int maxPageSize) { + //// In order to maintain the continuation token for the user we must drain with + //// a few constraints + //// 1) We always drain from the partition, which has the highest priority item + //// first + //// 2) If multiple partitions have the same priority item then we drain from + //// the left most first + //// otherwise we would need to keep track of how many of each item we drained + //// from each partition + //// (just like parallel queries). + //// Visually that look the following case where we have three partitions that + //// are numbered and store letters. + //// For teaching purposes I have made each item a tuple of the following form: + //// + //// So that duplicates across partitions are distinct, but duplicates within + //// partitions are indistinguishable. + //// |-------| |-------| |-------| + //// | | | | | | + //// | | | | | | + //// | | | | | | + //// | | | | | | + //// | | | | | | + //// | | | | | | + //// | | | | | | + //// |-------| |-------| |-------| + //// Now the correct drain order in this case is: + //// ,,,,,,,,,,, + //// ,,,,,,,,, + //// In more mathematical terms + //// 1) always comes before where x < z + //// 2) always come before where j < k + return this.orderByObservable.compose(new ItemToPageTransformer(tracker, + maxPageSize, + this.queryMetricMap, + this::getContinuationToken)); + } + + @Override + public Flux> executeAsync() { + return drainAsync(feedOptions.maxItemCount()); + } + + private String getContinuationToken( + OrderByRowResult orderByRowResult) { + // rid + String rid = orderByRowResult.resourceId(); + + // CompositeContinuationToken + String backendContinuationToken = orderByRowResult.getSourceBackendContinuationToken(); + Range range = orderByRowResult.getSourcePartitionKeyRange().toRange(); + + boolean inclusive = true; + CompositeContinuationToken compositeContinuationToken = new CompositeContinuationToken(backendContinuationToken, + range); + + // OrderByItems + QueryItem[] orderByItems = new QueryItem[orderByRowResult.getOrderByItems().size()]; + orderByRowResult.getOrderByItems().toArray(orderByItems); + + return new OrderByContinuationToken(compositeContinuationToken, + orderByItems, + rid, + inclusive).toJson(); + } + + private final class FormattedFilterInfo { + private final String filterForRangesLeftOfTheTargetRange; + private final String filterForTargetRange; + private final String filterForRangesRightOfTheTargetRange; + + public FormattedFilterInfo( + String filterForRangesLeftOfTheTargetRange, + String filterForTargetRange, + String filterForRangesRightOfTheTargetRange) { + if (filterForRangesLeftOfTheTargetRange == null) { + throw new IllegalArgumentException("filterForRangesLeftOfTheTargetRange must not be null."); + } + + if (filterForTargetRange == null) { + throw new IllegalArgumentException("filterForTargetRange must not be null."); + } + + if (filterForRangesRightOfTheTargetRange == null) { + throw new IllegalArgumentException("filterForRangesRightOfTheTargetRange must not be null."); + } + + this.filterForRangesLeftOfTheTargetRange = filterForRangesLeftOfTheTargetRange; + this.filterForTargetRange = filterForTargetRange; + this.filterForRangesRightOfTheTargetRange = filterForRangesRightOfTheTargetRange; + } + + /** + * @return the filterForRangesLeftOfTheTargetRange + */ + public String getFilterForRangesLeftOfTheTargetRange() { + return filterForRangesLeftOfTheTargetRange; + } + + /** + * @return the filterForTargetRange + */ + public String getFilterForTargetRange() { + return filterForTargetRange; + } + + /** + * @return the filterForRangesRightOfTheTargetRange + */ + public String getFilterForRangesRightOfTheTargetRange() { + return filterForRangesRightOfTheTargetRange; + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/OrderByUtils.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/OrderByUtils.java new file mode 100644 index 0000000000000..2837ac580b817 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/OrderByUtils.java @@ -0,0 +1,166 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.BadRequestException; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.internal.QueryMetrics; +import com.azure.data.cosmos.internal.RequestChargeTracker; +import com.azure.data.cosmos.internal.ResourceId; +import com.azure.data.cosmos.internal.query.orderbyquery.OrderByRowResult; +import com.azure.data.cosmos.internal.query.orderbyquery.OrderbyRowComparer; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ArrayNode; +import org.apache.commons.lang3.tuple.Pair; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; + +class OrderByUtils { + + public static Flux> orderedMerge(Class klass, + OrderbyRowComparer consumeComparer, + RequestChargeTracker tracker, + List> documentProducers, + Map queryMetricsMap, + Map targetRangeToOrderByContinuationTokenMap) { + Flux>[] fluxes = documentProducers + .subList(0, documentProducers.size()) + .stream() + .map(producer -> + toOrderByQueryResultObservable(klass, producer, tracker, queryMetricsMap, targetRangeToOrderByContinuationTokenMap, consumeComparer.getSortOrders())) + .toArray(Flux[]::new); + return Flux.mergeOrdered(consumeComparer, fluxes); + } + + private static Flux> toOrderByQueryResultObservable(Class klass, + DocumentProducer producer, + RequestChargeTracker tracker, + Map queryMetricsMap, + Map targetRangeToOrderByContinuationTokenMap, + List sortOrders) { + return producer + .produceAsync() + .compose(new OrderByUtils.PageToItemTransformer(klass, tracker, queryMetricsMap, targetRangeToOrderByContinuationTokenMap, sortOrders)); + } + + private static class PageToItemTransformer implements Function.DocumentProducerFeedResponse>, Flux>> { + private final RequestChargeTracker tracker; + private final Class klass; + private final Map queryMetricsMap; + private final Map targetRangeToOrderByContinuationTokenMap; + private final List sortOrders; + + public PageToItemTransformer(Class klass, RequestChargeTracker tracker, Map queryMetricsMap, + Map targetRangeToOrderByContinuationTokenMap, List sortOrders) { + this.klass = klass; + this.tracker = tracker; + this.queryMetricsMap = queryMetricsMap; + this.targetRangeToOrderByContinuationTokenMap = targetRangeToOrderByContinuationTokenMap; + this.sortOrders = sortOrders; + } + + @Override + public Flux> apply(Flux.DocumentProducerFeedResponse> source) { + return source.flatMap(documentProducerFeedResponse -> { + for (String key : BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult).keySet()) { + if (queryMetricsMap.containsKey(key)) { + QueryMetrics qm = BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult).get(key); + queryMetricsMap.get(key).add(qm); + } else { + queryMetricsMap.put(key, BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult).get(key)); + } + } + List results = documentProducerFeedResponse.pageResult.results(); + OrderByContinuationToken orderByContinuationToken = targetRangeToOrderByContinuationTokenMap.get(documentProducerFeedResponse.sourcePartitionKeyRange.id()); + if (orderByContinuationToken != null) { + Pair booleanResourceIdPair = ResourceId.tryParse(orderByContinuationToken.getRid()); + if (!booleanResourceIdPair.getLeft()) { + return Flux.error(new BadRequestException(String.format("INVALID Rid in the continuation token %s for OrderBy~Context.", + orderByContinuationToken.getCompositeContinuationToken().getToken()))); + } + ResourceId continuationTokenRid = booleanResourceIdPair.getRight(); + results = results.stream() + .filter(tOrderByRowResult -> { + // When we resume a query on a partition there is a possibility that we only read a partial page from the backend + // meaning that will we repeat some documents if we didn't do anything about it. + // The solution is to filter all the documents that come before in the sort order, since we have already emitted them to the client. + // The key is to seek until we get an order by value that matches the order by value we left off on. + // Once we do that we need to seek to the correct _rid within the term, + // since there might be many documents with the same order by value we left off on. + List queryItems = new ArrayList(); + ArrayNode arrayNode = (ArrayNode) tOrderByRowResult.get("orderByItems"); + for (JsonNode jsonNode : arrayNode) { + QueryItem queryItem = new QueryItem(jsonNode.toString()); + queryItems.add(queryItem); + } + + // Check if its the same orderby item from the token + long cmp = 0; + for (int i = 0; i < sortOrders.size(); i++) { + cmp = ItemComparator.getInstance().compare(orderByContinuationToken.getOrderByItems()[i].getItem(), + queryItems.get(i).getItem()); + if (cmp != 0) { + cmp = sortOrders.get(i).equals(SortOrder.Descending) ? -cmp : cmp; + break; + } + } + + if (cmp == 0) { + // Once the item matches the order by items from the continuation tokens + // We still need to remove all the documents that have a lower rid in the rid sort order. + // If there is a tie in the sort order the documents should be in _rid order in the same direction as the first order by field. + // So if it's ORDER BY c.age ASC, c.name DESC the _rids are ASC + // If ti's ORDER BY c.age DESC, c.name DESC the _rids are DESC + cmp = (continuationTokenRid.getDocument() - ResourceId.tryParse(tOrderByRowResult.resourceId()).getRight().getDocument()); + + if (sortOrders.iterator().next().equals(SortOrder.Descending)) { + cmp = -cmp; + } + return (cmp <= 0); + } + return true; + + }) + .collect(Collectors.toList()); + + } + + tracker.addCharge(documentProducerFeedResponse.pageResult.requestCharge()); + Flux x = Flux.fromIterable(results); + + return x.map(r -> new OrderByRowResult( + klass, + r.toJson(), + documentProducerFeedResponse.sourcePartitionKeyRange, + documentProducerFeedResponse.pageResult.continuationToken())); + }, 1); + } + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/Paginator.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/Paginator.java new file mode 100644 index 0000000000000..4780965387d37 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/Paginator.java @@ -0,0 +1,94 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.ChangeFeedOptions; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; + +import java.util.function.BiFunction; +import java.util.function.Function; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class Paginator { + + private final static Logger logger = LoggerFactory.getLogger(Paginator.class); + + public static Flux> getPaginatedChangeFeedQueryResultAsObservable( + ChangeFeedOptions feedOptions, BiFunction createRequestFunc, + Function>> executeFunc, Class resourceType, + int maxPageSize) { + return getPaginatedQueryResultAsObservable(feedOptions.requestContinuation(), createRequestFunc, executeFunc, resourceType, + -1, maxPageSize, true); + } + + public static Flux> getPaginatedQueryResultAsObservable( + FeedOptions feedOptions, + BiFunction createRequestFunc, + Function>> executeFunc, Class resourceType, + int maxPageSize) { + return getPaginatedQueryResultAsObservable(feedOptions.requestContinuation(), createRequestFunc, executeFunc, resourceType, + -1, maxPageSize); + } + + public static Flux> getPaginatedQueryResultAsObservable( + String continuationToken, + BiFunction createRequestFunc, + Function>> executeFunc, Class resourceType, + int top, int maxPageSize) { + return getPaginatedQueryResultAsObservable(continuationToken, createRequestFunc, executeFunc, resourceType, + top, maxPageSize, false); + } + + private static Flux> getPaginatedQueryResultAsObservable( + String continuationToken, + BiFunction createRequestFunc, + Function>> executeFunc, Class resourceType, + int top, int maxPageSize, boolean isChangeFeed) { + + return Flux.defer(() -> { + Flux>> generate = Flux.generate(() -> + new Fetcher<>(createRequestFunc, executeFunc, continuationToken, isChangeFeed, top, maxPageSize), + (tFetcher, sink) -> { + if (tFetcher.shouldFetchMore()) { + Flux> nextPage = tFetcher.nextPage(); + sink.next(nextPage); + } else { + logger.debug("No more results"); + sink.complete(); + } + return tFetcher; + }); + + return generate.flatMapSequential(feedResponseFlux -> feedResponseFlux, 1); + }); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ParallelDocumentQueryExecutionContext.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ParallelDocumentQueryExecutionContext.java new file mode 100644 index 0000000000000..61b9354ea8fb3 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ParallelDocumentQueryExecutionContext.java @@ -0,0 +1,360 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.SqlQuerySpec; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.IDocumentClientRetryPolicy; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.RequestChargeTracker; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.Utils; +import com.azure.data.cosmos.internal.Utils.ValueHolder; +import org.apache.commons.lang3.tuple.ImmutablePair; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.function.Function; +import java.util.stream.Collectors; +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class ParallelDocumentQueryExecutionContext + extends ParallelDocumentQueryExecutionContextBase { + + private ParallelDocumentQueryExecutionContext( + IDocumentQueryClient client, + List partitionKeyRanges, + ResourceType resourceTypeEnum, + Class resourceType, + SqlQuerySpec query, + FeedOptions feedOptions, + String resourceLink, + String rewrittenQuery, + String collectionRid, + boolean isContinuationExpected, + boolean getLazyFeedResponse, + UUID correlatedActivityId) { + super(client, partitionKeyRanges, resourceTypeEnum, resourceType, query, feedOptions, resourceLink, + rewrittenQuery, isContinuationExpected, getLazyFeedResponse, correlatedActivityId); + } + + public static Flux> createAsync( + IDocumentQueryClient client, + ResourceType resourceTypeEnum, + Class resourceType, + SqlQuerySpec query, + FeedOptions feedOptions, + String resourceLink, + String collectionRid, + PartitionedQueryExecutionInfo partitionedQueryExecutionInfo, + List targetRanges, + int initialPageSize, + boolean isContinuationExpected, + boolean getLazyFeedResponse, + UUID correlatedActivityId) { + + ParallelDocumentQueryExecutionContext context = new ParallelDocumentQueryExecutionContext(client, + targetRanges, + resourceTypeEnum, + resourceType, + query, + feedOptions, + resourceLink, + partitionedQueryExecutionInfo.getQueryInfo().getRewrittenQuery(), + collectionRid, + isContinuationExpected, + getLazyFeedResponse, + correlatedActivityId); + + try { + context.initialize(collectionRid, + targetRanges, + initialPageSize, + feedOptions.requestContinuation()); + return Flux.just(context); + } catch (CosmosClientException dce) { + return Flux.error(dce); + } + } + + private void initialize( + String collectionRid, + List targetRanges, + int initialPageSize, + String continuationToken) throws CosmosClientException { + // Generate the corresponding continuation token map. + Map partitionKeyRangeToContinuationTokenMap = new HashMap(); + if (continuationToken == null) { + // If the user does not give a continuation token, + // then just start the query from the first partition. + for (PartitionKeyRange targetRange : targetRanges) { + partitionKeyRangeToContinuationTokenMap.put(targetRange, + null); + } + } else { + // Figure out which partitions to resume from: + + // If a continuation token is given then we need to figure out partition key + // range it maps to + // in order to filter the partition key ranges. + // For example if suppliedCompositeContinuationToken.RANGE.Min == + // partition3.RANGE.Min, + // then we know that partitions 0, 1, 2 are fully drained. + + // Check to see if composite continuation token is a valid JSON. + ValueHolder outCompositeContinuationToken = new ValueHolder(); + if (!CompositeContinuationToken.tryParse(continuationToken, + outCompositeContinuationToken)) { + String message = String.format("INVALID JSON in continuation token %s for Parallel~Context", + continuationToken); + throw BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.BADREQUEST, + message); + } + + CompositeContinuationToken compositeContinuationToken = outCompositeContinuationToken.v; + + // Get the right hand side of the query ranges: + List filteredPartitionKeyRanges = this.getPartitionKeyRangesForContinuation( + compositeContinuationToken, + targetRanges); + + // The first partition is the one we left off on and have a backend continuation + // token for. + partitionKeyRangeToContinuationTokenMap.put(filteredPartitionKeyRanges.get(0), + compositeContinuationToken.getToken()); + + // The remaining partitions we have yet to touch / have null continuation tokens + for (int i = 1; i < filteredPartitionKeyRanges.size(); i++) { + partitionKeyRangeToContinuationTokenMap.put(filteredPartitionKeyRanges.get(i), + null); + } + } + + super.initialize(collectionRid, + partitionKeyRangeToContinuationTokenMap, + initialPageSize, + this.querySpec); + } + + private List getPartitionKeyRangesForContinuation( + CompositeContinuationToken compositeContinuationToken, + List partitionKeyRanges) throws CosmosClientException { + // Find the partition key range we left off on + int startIndex = this.FindTargetRangeAndExtractContinuationTokens(partitionKeyRanges, + compositeContinuationToken.getRange()); + + List rightHandSideRanges = new ArrayList(); + for (int i = startIndex; i < partitionKeyRanges.size(); i++) { + rightHandSideRanges.add(partitionKeyRanges.get(i)); + } + + return rightHandSideRanges; + } + + private static class EmptyPagesFilterTransformer + implements Function.DocumentProducerFeedResponse>, Flux>> { + private final RequestChargeTracker tracker; + private DocumentProducer.DocumentProducerFeedResponse previousPage; + + public EmptyPagesFilterTransformer( + RequestChargeTracker tracker) { + + if (tracker == null) { + throw new IllegalArgumentException("Request Charge Tracker must not be null."); + } + + this.tracker = tracker; + this.previousPage = null; + } + + private DocumentProducer.DocumentProducerFeedResponse plusCharge( + DocumentProducer.DocumentProducerFeedResponse documentProducerFeedResponse, + double charge) { + FeedResponse page = documentProducerFeedResponse.pageResult; + Map headers = new HashMap<>(page.responseHeaders()); + double pageCharge = page.requestCharge(); + pageCharge += charge; + headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, + String.valueOf(pageCharge)); + FeedResponse newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.results(), + headers, + BridgeInternal.queryMetricsFromFeedResponse(page)); + documentProducerFeedResponse.pageResult = newPage; + return documentProducerFeedResponse; + } + + private DocumentProducer.DocumentProducerFeedResponse addCompositeContinuationToken( + DocumentProducer.DocumentProducerFeedResponse documentProducerFeedResponse, + String compositeContinuationToken) { + FeedResponse page = documentProducerFeedResponse.pageResult; + Map headers = new HashMap<>(page.responseHeaders()); + headers.put(HttpConstants.HttpHeaders.CONTINUATION, + compositeContinuationToken); + FeedResponse newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.results(), + headers, + BridgeInternal.queryMetricsFromFeedResponse(page)); + documentProducerFeedResponse.pageResult = newPage; + return documentProducerFeedResponse; + } + + private static Map headerResponse( + double requestCharge) { + return Utils.immutableMapOf(HttpConstants.HttpHeaders.REQUEST_CHARGE, + String.valueOf(requestCharge)); + } + + @Override + public Flux> apply(Flux.DocumentProducerFeedResponse> source) { + // Emit an empty page so the downstream observables know when there are no more + // results. + return source.filter(documentProducerFeedResponse -> { + if (documentProducerFeedResponse.pageResult.results().isEmpty()) { + // filter empty pages and accumulate charge + tracker.addCharge(documentProducerFeedResponse.pageResult.requestCharge()); + return false; + } + return true; + }).map(documentProducerFeedResponse -> { + // Add the request charge + double charge = tracker.getAndResetCharge(); + if (charge > 0) { + return new ValueHolder<>(plusCharge(documentProducerFeedResponse, + charge)); + } else { + return new ValueHolder<>(documentProducerFeedResponse); + } + }).concatWith(Flux.just(new ValueHolder<>(null))).map(heldValue -> { + DocumentProducer.DocumentProducerFeedResponse documentProducerFeedResponse = heldValue.v; + // CREATE pairs from the stream to allow the observables downstream to "peek" + // 1, 2, 3, null -> (null, 1), (1, 2), (2, 3), (3, null) + ImmutablePair.DocumentProducerFeedResponse, DocumentProducer.DocumentProducerFeedResponse> previousCurrent = new ImmutablePair<>( + this.previousPage, + documentProducerFeedResponse); + this.previousPage = documentProducerFeedResponse; + return previousCurrent; + }).skip(1).map(currentNext -> { + // remove the (null, 1) + // Add the continuation token based on the current and next page. + DocumentProducer.DocumentProducerFeedResponse current = currentNext.left; + DocumentProducer.DocumentProducerFeedResponse next = currentNext.right; + + String compositeContinuationToken; + String backendContinuationToken = current.pageResult.continuationToken(); + if (backendContinuationToken == null) { + // We just finished reading the last document from a partition + if (next == null) { + // It was the last partition and we are done + compositeContinuationToken = null; + } else { + // It wasn't the last partition, so we need to give the next range, but with a + // null continuation + CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken(null, + next.sourcePartitionKeyRange.toRange()); + compositeContinuationToken = compositeContinuationTokenDom.toJson(); + } + } else { + // We are in the middle of reading a partition, + // so give back this partition with a backend continuation token + CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken( + backendContinuationToken, + current.sourcePartitionKeyRange.toRange()); + compositeContinuationToken = compositeContinuationTokenDom.toJson(); + } + + DocumentProducer.DocumentProducerFeedResponse page; + page = current; + page = this.addCompositeContinuationToken(page, + compositeContinuationToken); + + return page; + }).map(documentProducerFeedResponse -> { + // Unwrap the documentProducerFeedResponse and get back the feedResponse + return documentProducerFeedResponse.pageResult; + }).switchIfEmpty(Flux.defer(() -> { + // create an empty page if there is no result + return Flux.just(BridgeInternal.createFeedResponse(Utils.immutableListOf(), + headerResponse(tracker.getAndResetCharge()))); + })); + } + } + + @Override + public Flux> drainAsync( + int maxPageSize) { + List.DocumentProducerFeedResponse>> obs = this.documentProducers + // Get the stream. + .stream() + // Start from the left most partition first. + .sorted(Comparator.comparing(dp -> dp.targetRange.getMinInclusive())) + // For each partition get it's stream of results. + .map(DocumentProducer::produceAsync) + // Merge results from all partitions. + .collect(Collectors.toList()); + return Flux.concat(obs).compose(new EmptyPagesFilterTransformer<>(new RequestChargeTracker())); + } + + @Override + public Flux> executeAsync() { + return this.drainAsync(feedOptions.maxItemCount()); + } + + protected DocumentProducer createDocumentProducer( + String collectionRid, + PartitionKeyRange targetRange, + String initialContinuationToken, + int initialPageSize, + FeedOptions feedOptions, + SqlQuerySpec querySpecForInit, + Map commonRequestHeaders, + TriFunction createRequestFunc, + Function>> executeFunc, + Callable createRetryPolicyFunc) { + return new DocumentProducer(client, + collectionRid, + feedOptions, + createRequestFunc, + executeFunc, + targetRange, + collectionRid, + () -> client.getResetSessionTokenRetryPolicy().getRequestPolicy(), + resourceType, + correlatedActivityId, + initialPageSize, + initialContinuationToken, + top); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ParallelDocumentQueryExecutionContextBase.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ParallelDocumentQueryExecutionContextBase.java new file mode 100644 index 0000000000000..52625723d24c2 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ParallelDocumentQueryExecutionContextBase.java @@ -0,0 +1,157 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.SqlQuerySpec; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.IDocumentClientRetryPolicy; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.Strings; +import com.azure.data.cosmos.internal.routing.Range; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.function.Function; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public abstract class ParallelDocumentQueryExecutionContextBase + extends DocumentQueryExecutionContextBase implements IDocumentQueryExecutionComponent { + + protected final List> documentProducers; + protected final List partitionKeyRanges; + protected final SqlQuerySpec querySpec; + protected int pageSize; + protected int top = -1; + + protected ParallelDocumentQueryExecutionContextBase(IDocumentQueryClient client, + List partitionKeyRanges, ResourceType resourceTypeEnum, Class resourceType, + SqlQuerySpec query, FeedOptions feedOptions, String resourceLink, String rewrittenQuery, + boolean isContinuationExpected, boolean getLazyFeedResponse, UUID correlatedActivityId) { + super(client, resourceTypeEnum, resourceType, query, feedOptions, resourceLink, getLazyFeedResponse, + correlatedActivityId); + + documentProducers = new ArrayList<>(); + + this.partitionKeyRanges = partitionKeyRanges; + + if (!Strings.isNullOrEmpty(rewrittenQuery)) { + this.querySpec = new SqlQuerySpec(rewrittenQuery, super.query.parameters()); + } else { + this.querySpec = super.query; + } + } + + protected void initialize(String collectionRid, + Map partitionKeyRangeToContinuationTokenMap, int initialPageSize, + SqlQuerySpec querySpecForInit) { + this.pageSize = initialPageSize; + Map commonRequestHeaders = createCommonHeadersAsync(this.getFeedOptions(null, null)); + + for (PartitionKeyRange targetRange : partitionKeyRangeToContinuationTokenMap.keySet()) { + TriFunction createRequestFunc = (partitionKeyRange, + continuationToken, pageSize) -> { + Map headers = new HashMap<>(commonRequestHeaders); + headers.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); + headers.put(HttpConstants.HttpHeaders.PAGE_SIZE, Strings.toString(pageSize)); + return this.createDocumentServiceRequest(headers, querySpecForInit, partitionKeyRange, collectionRid); + }; + + Function>> executeFunc = (request) -> { + return this.executeRequestAsync(request).flux(); + }; + + DocumentProducer dp = createDocumentProducer(collectionRid, targetRange, + partitionKeyRangeToContinuationTokenMap.get(targetRange), initialPageSize, feedOptions, + querySpecForInit, commonRequestHeaders, createRequestFunc, executeFunc, + () -> client.getResetSessionTokenRetryPolicy().getRequestPolicy()); + + documentProducers.add(dp); + } + } + + protected int FindTargetRangeAndExtractContinuationTokens( + List partitionKeyRanges, Range range) throws CosmosClientException { + if (partitionKeyRanges == null) { + throw new IllegalArgumentException("partitionKeyRanges can not be null."); + } + + if (partitionKeyRanges.size() < 1) { + throw new IllegalArgumentException("partitionKeyRanges must have atleast one element."); + } + + for (PartitionKeyRange partitionKeyRange : partitionKeyRanges) { + if (partitionKeyRange == null) { + throw new IllegalArgumentException("partitionKeyRanges can not have null elements."); + } + } + + // Find the minimum index. + PartitionKeyRange needle = new PartitionKeyRange(/* id */ null, range.getMin(), range.getMax()); + int minIndex; + for (minIndex = 0; minIndex < partitionKeyRanges.size(); minIndex++) { + if (needle.getMinInclusive().equals(partitionKeyRanges.get(minIndex).getMinInclusive())) { + break; + } + } + + if (minIndex == partitionKeyRanges.size()) { + throw BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.BADREQUEST, + String.format("Could not find partition key range for continuation token: {0}", needle)); + } + + return minIndex; + } + + abstract protected DocumentProducer createDocumentProducer(String collectionRid, PartitionKeyRange targetRange, + String initialContinuationToken, int initialPageSize, FeedOptions feedOptions, SqlQuerySpec querySpecForInit, + Map commonRequestHeaders, + TriFunction createRequestFunc, + Function>> executeFunc, + Callable createRetryPolicyFunc); + + @Override + abstract public Flux> drainAsync(int maxPageSize); + + public void setTop(int newTop) { + this.top = newTop; + + for (DocumentProducer producer : this.documentProducers) { + producer.top = newTop; + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ParallelQueryConfig.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ParallelQueryConfig.java new file mode 100644 index 0000000000000..6544f64583e84 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ParallelQueryConfig.java @@ -0,0 +1,33 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class ParallelQueryConfig { + + public static final int ClientInternalPageSize = 100; + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/PartitionedQueryExecutionInfo.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/PartitionedQueryExecutionInfo.java new file mode 100644 index 0000000000000..cdf64044c4140 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/PartitionedQueryExecutionInfo.java @@ -0,0 +1,72 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.JsonSerializable; +import com.azure.data.cosmos.internal.Constants; +import com.azure.data.cosmos.internal.routing.Range; + +import java.util.List; + +/** + * Used internally to encapsulates execution information for a query in the Azure Cosmos DB database service. + */ +public final class PartitionedQueryExecutionInfo extends JsonSerializable { + @SuppressWarnings("unchecked") + private static final Class> QUERY_RANGES_CLASS = (Class>) Range + .getEmptyRange((String) null).getClass(); + + private QueryInfo queryInfo; + private List> queryRanges; + + PartitionedQueryExecutionInfo(QueryInfo queryInfo, List> queryRanges) { + this.queryInfo = queryInfo; + this.queryRanges = queryRanges; + + BridgeInternal.setProperty(this, + PartitionedQueryExecutionInfoInternal.PARTITIONED_QUERY_EXECUTION_INFO_VERSION_PROPERTY, + Constants.PartitionedQueryExecutionInfo.VERSION_1); + } + + public PartitionedQueryExecutionInfo(String jsonString) { + super(jsonString); + } + + public int getVersion() { + return super.getInt(PartitionedQueryExecutionInfoInternal.PARTITIONED_QUERY_EXECUTION_INFO_VERSION_PROPERTY); + } + + public QueryInfo getQueryInfo() { + return this.queryInfo != null ? this.queryInfo + : (this.queryInfo = super.getObject( + PartitionedQueryExecutionInfoInternal.QUERY_INFO_PROPERTY, QueryInfo.class)); + } + + public List> getQueryRanges() { + return this.queryRanges != null ? this.queryRanges + : (this.queryRanges = super.getList( + PartitionedQueryExecutionInfoInternal.QUERY_RANGES_PROPERTY, QUERY_RANGES_CLASS)); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/PartitionedQueryExecutionInfoInternal.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/PartitionedQueryExecutionInfoInternal.java new file mode 100644 index 0000000000000..64b0061044c8d --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/PartitionedQueryExecutionInfoInternal.java @@ -0,0 +1,85 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.JsonSerializable; +import com.azure.data.cosmos.internal.Constants; +import com.azure.data.cosmos.internal.Utils; +import com.azure.data.cosmos.internal.routing.PartitionKeyInternal; +import com.azure.data.cosmos.internal.routing.Range; +import com.fasterxml.jackson.core.JsonProcessingException; + +import java.util.List; + +public final class PartitionedQueryExecutionInfoInternal extends JsonSerializable { + static final String QUERY_INFO_PROPERTY = "queryInfo"; + static final String QUERY_RANGES_PROPERTY = "queryRanges"; + static final String PARTITIONED_QUERY_EXECUTION_INFO_VERSION_PROPERTY = "partitionedQueryExecutionInfoVersion"; + + @SuppressWarnings("unchecked") + private static final Class> QUERY_RANGE_CLASS = (Class>) Range + .getEmptyRange((PartitionKeyInternal) null).getClass(); + + private QueryInfo queryInfo; + private List> queryRanges; + + public PartitionedQueryExecutionInfoInternal() { + BridgeInternal.setProperty(this, PARTITIONED_QUERY_EXECUTION_INFO_VERSION_PROPERTY, Constants.PartitionedQueryExecutionInfo.VERSION_1); + } + + public PartitionedQueryExecutionInfoInternal(String jsonString) { + super(jsonString); + } + + public int getVersion() { + return super.getInt(PARTITIONED_QUERY_EXECUTION_INFO_VERSION_PROPERTY); + } + + public QueryInfo getQueryInfo() { + return this.queryInfo != null ? this.queryInfo + : (this.queryInfo = super.getObject(QUERY_INFO_PROPERTY, QueryInfo.class)); + } + + public void setQueryInfo(QueryInfo queryInfo) { + this.queryInfo = queryInfo; + } + + public List> getQueryRanges() { + return this.queryRanges != null ? this.queryRanges + : (this.queryRanges = super.getList(QUERY_RANGES_PROPERTY, QUERY_RANGE_CLASS)); + } + + public void setQueryRanges(List> queryRanges) { + this.queryRanges = queryRanges; + } + + public String toJson() { + try { + return Utils.getSimpleObjectMapper().writeValueAsString(this); + } catch (JsonProcessingException e) { + throw new IllegalStateException("Unable to serialize partition query execution info internal."); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/PipelinedDocumentQueryExecutionContext.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/PipelinedDocumentQueryExecutionContext.java new file mode 100644 index 0000000000000..606d95618af60 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/PipelinedDocumentQueryExecutionContext.java @@ -0,0 +1,134 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.SqlQuerySpec; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.Utils; +import reactor.core.publisher.Flux; + +import java.util.List; +import java.util.UUID; +import java.util.function.Function; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public class PipelinedDocumentQueryExecutionContext implements IDocumentQueryExecutionContext { + + private IDocumentQueryExecutionComponent component; + private int actualPageSize; + private UUID correlatedActivityId; + + private PipelinedDocumentQueryExecutionContext(IDocumentQueryExecutionComponent component, int actualPageSize, + UUID correlatedActivityId) { + this.component = component; + this.actualPageSize = actualPageSize; + this.correlatedActivityId = correlatedActivityId; + + // this.executeNextSchedulingMetrics = new SchedulingStopwatch(); + // this.executeNextSchedulingMetrics.Ready(); + + // DefaultTrace.TraceVerbose(string.Format( + // CultureInfo.InvariantCulture, + // "{0} Pipelined~Context, actual page size: {1}", + // DateTime.UtcNow.ToString("o", CultureInfo.InvariantCulture), + // this.actualPageSize)); + } + + public static Flux> createAsync( + IDocumentQueryClient client, ResourceType resourceTypeEnum, Class resourceType, SqlQuerySpec expression, + FeedOptions feedOptions, String resourceLink, String collectionRid, + PartitionedQueryExecutionInfo partitionedQueryExecutionInfo, List targetRanges, + int initialPageSize, boolean isContinuationExpected, boolean getLazyFeedResponse, + UUID correlatedActivityId) { + // Use nested callback pattern to unwrap the continuation token at each level. + Function>> createBaseComponentFunction; + + QueryInfo queryInfo = partitionedQueryExecutionInfo.getQueryInfo(); + + if (queryInfo.hasOrderBy()) { + createBaseComponentFunction = (continuationToken) -> { + FeedOptions orderByFeedOptions = new FeedOptions(feedOptions); + orderByFeedOptions.requestContinuation(continuationToken); + return OrderByDocumentQueryExecutionContext.createAsync(client, resourceTypeEnum, resourceType, + expression, orderByFeedOptions, resourceLink, collectionRid, partitionedQueryExecutionInfo, + targetRanges, initialPageSize, isContinuationExpected, getLazyFeedResponse, + correlatedActivityId); + }; + } else { + createBaseComponentFunction = (continuationToken) -> { + FeedOptions parallelFeedOptions = new FeedOptions(feedOptions); + parallelFeedOptions.requestContinuation(continuationToken); + return ParallelDocumentQueryExecutionContext.createAsync(client, resourceTypeEnum, resourceType, + expression, parallelFeedOptions, resourceLink, collectionRid, partitionedQueryExecutionInfo, + targetRanges, initialPageSize, isContinuationExpected, getLazyFeedResponse, + correlatedActivityId); + }; + } + + Function>> createAggregateComponentFunction; + if (queryInfo.hasAggregates()) { + createAggregateComponentFunction = (continuationToken) -> { + return AggregateDocumentQueryExecutionContext.createAsync(createBaseComponentFunction, + queryInfo.getAggregates(), continuationToken); + }; + } else { + createAggregateComponentFunction = createBaseComponentFunction; + } + + Function>> createTopComponentFunction; + if (queryInfo.hasTop()) { + createTopComponentFunction = (continuationToken) -> { + return TopDocumentQueryExecutionContext.createAsync(createAggregateComponentFunction, + queryInfo.getTop(), continuationToken); + }; + } else { + createTopComponentFunction = createAggregateComponentFunction; + } + + int actualPageSize = Utils.getValueOrDefault(feedOptions.maxItemCount(), + ParallelQueryConfig.ClientInternalPageSize); + + if (actualPageSize == -1) { + actualPageSize = Integer.MAX_VALUE; + } + + int pageSize = Math.min(actualPageSize, Utils.getValueOrDefault(queryInfo.getTop(), (actualPageSize))); + return createTopComponentFunction.apply(feedOptions.requestContinuation()) + .map(c -> new PipelinedDocumentQueryExecutionContext<>(c, pageSize, correlatedActivityId)); + } + + @Override + public Flux> executeAsync() { + // TODO Auto-generated method stub + + // TODO add more code here + return this.component.drainAsync(actualPageSize); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ProxyDocumentQueryExecutionContext.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ProxyDocumentQueryExecutionContext.java new file mode 100644 index 0000000000000..a3980bea91665 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/ProxyDocumentQueryExecutionContext.java @@ -0,0 +1,186 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.SqlQuerySpec; +import com.azure.data.cosmos.internal.Exceptions; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.Utils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.lang.invoke.MethodHandles; +import java.util.List; +import java.util.UUID; +import java.util.function.Function; + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + * + * This class is used as a proxy to wrap the + * DefaultDocumentQueryExecutionContext which is needed for sending the query to + * GATEWAY first and then uses PipelinedDocumentQueryExecutionContext after it + * gets the necessary info. + */ +public class ProxyDocumentQueryExecutionContext implements IDocumentQueryExecutionContext { + + private IDocumentQueryExecutionContext innerExecutionContext; + private IDocumentQueryClient client; + private ResourceType resourceTypeEnum; + private Class resourceType; + private FeedOptions feedOptions; + private SqlQuerySpec query; + private String resourceLink; + private DocumentCollection collection; + private UUID correlatedActivityId; + private boolean isContinuationExpected; + private final static Logger logger = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); + + public ProxyDocumentQueryExecutionContext( + IDocumentQueryExecutionContext innerExecutionContext, + IDocumentQueryClient client, + ResourceType resourceTypeEnum, + Class resourceType, + SqlQuerySpec query, + FeedOptions feedOptions, + String resourceLink, + DocumentCollection collection, + boolean isContinuationExpected, + UUID correlatedActivityId) { + this.innerExecutionContext = innerExecutionContext; + + this.client = client; + this.resourceTypeEnum = resourceTypeEnum; + this.resourceType = resourceType; + this.query = query; + this.feedOptions = feedOptions; + this.resourceLink = resourceLink; + + this.collection = collection; + this.isContinuationExpected = isContinuationExpected; + this.correlatedActivityId = correlatedActivityId; + } + + @Override + public Flux> executeAsync() { + + Function>> func = t -> { + + logger.debug("Received non result message from gateway", t); + if (!(t instanceof Exception)) { + logger.error("Unexpected failure", t); + return Flux.error(t); + } + + if (!isCrossPartitionQuery((Exception) t)) { + // If this is not a cross partition query then propagate error + logger.debug("Failure from gateway", t); + return Flux.error(t); + } + + logger.debug("Setting up query pipeline using the query plan received form gateway"); + + // cross partition query construct pipeline + + CosmosClientException dce = (CosmosClientException) t; + + PartitionedQueryExecutionInfo partitionedQueryExecutionInfo = new + PartitionedQueryExecutionInfo(dce.error().getPartitionedQueryExecutionInfo()); + + logger.debug("Query Plan from gateway {}", partitionedQueryExecutionInfo); + + DefaultDocumentQueryExecutionContext queryExecutionContext = + (DefaultDocumentQueryExecutionContext) this.innerExecutionContext; + + Mono> partitionKeyRanges = queryExecutionContext.getTargetPartitionKeyRanges(collection.resourceId(), + partitionedQueryExecutionInfo.getQueryRanges()); + + Flux> exContext = partitionKeyRanges.flux() + .flatMap(pkranges -> DocumentQueryExecutionContextFactory.createSpecializedDocumentQueryExecutionContextAsync( + this.client, + this.resourceTypeEnum, + this.resourceType, + this.query, + this.feedOptions, + this.resourceLink, + isContinuationExpected, + partitionedQueryExecutionInfo, + pkranges, + this.collection.resourceId(), + this.correlatedActivityId)); + + return exContext.flatMap(IDocumentQueryExecutionContext::executeAsync); + }; + + return this.innerExecutionContext.executeAsync().onErrorResume(func); + } + + private boolean isCrossPartitionQuery(Exception exception) { + + CosmosClientException clientException = Utils.as(exception, CosmosClientException.class); + + if (clientException == null) { + return false; + } + + return (Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.BADREQUEST) && + Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.CROSS_PARTITION_QUERY_NOT_SERVABLE)); + } + + public static Flux> createAsync(IDocumentQueryClient client, + ResourceType resourceTypeEnum, Class resourceType, SqlQuerySpec query, FeedOptions feedOptions, + String resourceLink, DocumentCollection collection, boolean isContinuationExpected, + UUID correlatedActivityId) { + + IDocumentQueryExecutionContext innerExecutionContext = + new DefaultDocumentQueryExecutionContext( + client, + resourceTypeEnum, + resourceType, + query, + feedOptions, + resourceLink, + correlatedActivityId, + isContinuationExpected); + + return Flux.just(new ProxyDocumentQueryExecutionContext(innerExecutionContext, client, + resourceTypeEnum, + resourceType, + query, + feedOptions, + resourceLink, + collection, + isContinuationExpected, + correlatedActivityId)); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/QueryInfo.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/QueryInfo.java new file mode 100644 index 0000000000000..8c4d6e1cd3c77 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/QueryInfo.java @@ -0,0 +1,91 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.JsonSerializable; +import com.azure.data.cosmos.internal.query.aggregation.AggregateOperator; +import org.apache.commons.lang3.StringUtils; + +import java.util.Collection; +import java.util.List; + +/** + * Used internally to encapsulates a query's information in the Azure Cosmos DB database service. + */ +public final class QueryInfo extends JsonSerializable { + private Integer top; + private List orderBy; + private Collection aggregates; + private Collection orderByExpressions; + private String rewrittenQuery; + + public QueryInfo() { } + + public QueryInfo(String jsonString) { + super(jsonString); + } + + public Integer getTop() { + return this.top != null ? this.top : (this.top = super.getInt("top")); + } + + public List getOrderBy() { + return this.orderBy != null ? this.orderBy : (this.orderBy = super.getList("orderBy", SortOrder.class)); + } + + public String getRewrittenQuery() { + return this.rewrittenQuery != null ? this.rewrittenQuery + : (this.rewrittenQuery = super.getString("rewrittenQuery")); + } + + public boolean hasTop() { + return this.getTop() != null; + } + + public boolean hasOrderBy() { + Collection orderBy = this.getOrderBy(); + return orderBy != null && orderBy.size() > 0; + } + + public boolean hasRewrittenQuery() { + return !StringUtils.isEmpty(this.getRewrittenQuery()); + } + + public boolean hasAggregates() { + Collection aggregates = this.getAggregates(); + return aggregates != null && aggregates.size() > 0; + } + + public Collection getAggregates() { + return this.aggregates != null + ? this.aggregates + : (this.aggregates = super.getCollection("aggregates", AggregateOperator.class)); + } + + public Collection getOrderByExpressions() { + return this.orderByExpressions != null + ? this.orderByExpressions + : (this.orderByExpressions = super.getCollection("orderByExpressions", String.class)); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/QueryItem.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/QueryItem.java new file mode 100644 index 0000000000000..ff447462358be --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/QueryItem.java @@ -0,0 +1,48 @@ + +/** + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.JsonSerializable; +import com.azure.data.cosmos.internal.Undefined; + +/** + * Used internally for query in the Azure Cosmos DB database service. + */ +public final class QueryItem extends JsonSerializable { + private Object item; + + public QueryItem(String jsonString) { + super(jsonString); + } + + public Object getItem() { + if (this.item == null) { + Object rawItem = super.get("item"); + this.item = super.has("item") ? rawItem : Undefined.Value(); + } + + return this.item; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/SortOrder.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/SortOrder.java new file mode 100644 index 0000000000000..cbd6fccb8e613 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/SortOrder.java @@ -0,0 +1,31 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query; + +/** + * Sort order in the Azure Cosmos DB database service. + */ +public enum SortOrder { + Ascending, Descending, +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/TakeContinuationToken.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/TakeContinuationToken.java new file mode 100644 index 0000000000000..37562f903246a --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/TakeContinuationToken.java @@ -0,0 +1,92 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.JsonSerializable; +import com.azure.data.cosmos.internal.Utils.ValueHolder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * While this class is public, but it is not part of our published public APIs. + * This is meant to be internally used only by our sdk. + */ +public final class TakeContinuationToken extends JsonSerializable { + private static final String LimitPropertyName = "limit"; + private static final String SourceTokenPropetryName = "sourceToken"; + private static final Logger logger = LoggerFactory.getLogger(TakeContinuationToken.class); + + public TakeContinuationToken(int takeCount, String sourceToken) { + if (takeCount < 0) { + throw new IllegalArgumentException("takeCount must be a non negative number."); + } + + // sourceToken is allowed to be null. + this.setTakeCount(takeCount); + this.setSourceToken(sourceToken); + } + + private TakeContinuationToken(String serializedTakeContinuationToken) { + super(serializedTakeContinuationToken); + } + + public static boolean tryParse(String serializedTakeContinuationToken, + ValueHolder outTakeContinuationToken) { + boolean parsed; + try { + TakeContinuationToken takeContinuationToken = new TakeContinuationToken(serializedTakeContinuationToken); + takeContinuationToken.getSourceToken(); + takeContinuationToken.getTakeCount(); + outTakeContinuationToken.v = takeContinuationToken; + parsed = true; + } catch (Exception ex) { + logger.debug( + "Received exception {} when trying to parse: {}", + ex.getMessage(), + serializedTakeContinuationToken); + parsed = false; + outTakeContinuationToken.v = null; + } + + return parsed; + } + + public int getTakeCount() { + return super.getInt(LimitPropertyName); + } + + public String getSourceToken() { + return super.getString(SourceTokenPropetryName); + } + + private void setTakeCount(int takeCount) { + BridgeInternal.setProperty(this, LimitPropertyName, takeCount); + } + + private void setSourceToken(String sourceToken) { + BridgeInternal.setProperty(this, SourceTokenPropetryName, sourceToken); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/TopDocumentQueryExecutionContext.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/TopDocumentQueryExecutionContext.java new file mode 100644 index 0000000000000..661b132cf5a90 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/TopDocumentQueryExecutionContext.java @@ -0,0 +1,148 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.Utils.ValueHolder; +import reactor.core.publisher.Flux; + +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; +import java.util.function.Predicate; + +public class TopDocumentQueryExecutionContext implements IDocumentQueryExecutionComponent { + + private final IDocumentQueryExecutionComponent component; + private final int top; + + public TopDocumentQueryExecutionContext(IDocumentQueryExecutionComponent component, int top) { + this.component = component; + this.top = top; + } + + public static Flux> createAsync( + Function>> createSourceComponentFunction, + int topCount, String topContinuationToken) { + TakeContinuationToken takeContinuationToken; + + if (topContinuationToken == null) { + takeContinuationToken = new TakeContinuationToken(topCount, null); + } else { + ValueHolder outTakeContinuationToken = new ValueHolder(); + if (!TakeContinuationToken.tryParse(topContinuationToken, outTakeContinuationToken)) { + String message = String.format("INVALID JSON in continuation token %s for Top~Context", + topContinuationToken); + CosmosClientException dce = BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.BADREQUEST, + message); + return Flux.error(dce); + } + + takeContinuationToken = outTakeContinuationToken.v; + } + + if (takeContinuationToken.getTakeCount() > topCount) { + String message = String.format( + "top count in continuation token: %d can not be greater than the top count in the query: %d.", + takeContinuationToken.getTakeCount(), topCount); + CosmosClientException dce = BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.BADREQUEST, message); + return Flux.error(dce); + } + + return createSourceComponentFunction + .apply(takeContinuationToken.getSourceToken()) + .map(component -> new TopDocumentQueryExecutionContext<>(component, takeContinuationToken.getTakeCount())); + } + + @Override + public Flux> drainAsync(int maxPageSize) { + ParallelDocumentQueryExecutionContextBase context; + + if (this.component instanceof AggregateDocumentQueryExecutionContext) { + context = (ParallelDocumentQueryExecutionContextBase) ((AggregateDocumentQueryExecutionContext) this.component) + .getComponent(); + } else { + context = (ParallelDocumentQueryExecutionContextBase) this.component; + } + + context.setTop(this.top); + + return this.component.drainAsync(maxPageSize).takeUntil(new Predicate>() { + + private volatile int fetchedItems = 0; + + @Override + public boolean test(FeedResponse frp) { + + fetchedItems += frp.results().size(); + + // take until we have at least top many elements fetched + return fetchedItems >= top; + } + }).map(new Function, FeedResponse>() { + + private volatile int collectedItems = 0; + private volatile boolean lastPage = false; + + @Override + public FeedResponse apply(FeedResponse t) { + + if (collectedItems + t.results().size() <= top) { + collectedItems += t.results().size(); + + Map headers = new HashMap<>(t.responseHeaders()); + if (top != collectedItems) { + // Add Take Continuation Token + String sourceContinuationToken = t.continuationToken(); + TakeContinuationToken takeContinuationToken = new TakeContinuationToken(top - collectedItems, + sourceContinuationToken); + headers.put(HttpConstants.HttpHeaders.CONTINUATION, takeContinuationToken.toJson()); + } else { + // Null out the continuation token + headers.put(HttpConstants.HttpHeaders.CONTINUATION, null); + } + + return BridgeInternal.createFeedResponseWithQueryMetrics(t.results(), headers, + BridgeInternal.queryMetricsFromFeedResponse(t)); + } else { + assert lastPage == false; + lastPage = true; + int lastPageSize = top - collectedItems; + collectedItems += lastPageSize; + + // Null out the continuation token + Map headers = new HashMap<>(t.responseHeaders()); + headers.put(HttpConstants.HttpHeaders.CONTINUATION, null); + + return BridgeInternal.createFeedResponseWithQueryMetrics(t.results().subList(0, lastPageSize), + headers, BridgeInternal.queryMetricsFromFeedResponse(t)); + } + } + }); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/TriFunction.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/TriFunction.java new file mode 100644 index 0000000000000..fc4772ad8e940 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/TriFunction.java @@ -0,0 +1,23 @@ +package com.azure.data.cosmos.internal.query; + +/** + * A functional interface (callback) that computes a value based on multiple input values. + * @param the first value type + * @param the second value type + * @param the third value type + * @param the result type + */ + +@FunctionalInterface +public interface TriFunction { + + /** + * Applies this function to the given arguments. + * + * @param t the first function argument + * @param u the second function argument + * @param v the third function argument + * @return the function result + */ + R apply(T t, U u, V v); +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/aggregation/AggregateOperator.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/aggregation/AggregateOperator.java new file mode 100644 index 0000000000000..b84421411f04c --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/aggregation/AggregateOperator.java @@ -0,0 +1,32 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query.aggregation; + +public enum AggregateOperator { + Average, + Count, + Max, + Min, + Sum +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/aggregation/Aggregator.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/aggregation/Aggregator.java new file mode 100644 index 0000000000000..82a6301bc8d3c --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/aggregation/Aggregator.java @@ -0,0 +1,30 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query.aggregation; + +public interface Aggregator { + void aggregate(Object item); + + Object getResult(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/aggregation/AverageAggregator.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/aggregation/AverageAggregator.java new file mode 100644 index 0000000000000..d64ba353914c5 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/aggregation/AverageAggregator.java @@ -0,0 +1,80 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query.aggregation; + +import com.azure.data.cosmos.internal.Undefined; +import com.azure.data.cosmos.internal.Utils; + +import java.io.IOException; + +public class AverageAggregator implements Aggregator { + private AverageInfo averageInfo; + + public AverageAggregator() { + this.averageInfo = new AverageInfo(); + } + + @Override + public void aggregate(Object item) { + AverageInfo averageInfo; + try { + averageInfo = Utils.getSimpleObjectMapper().readValue(item.toString(), AverageInfo.class); + } catch (IOException e) { + throw new IllegalStateException("Failed to deserialize aggregate result"); + } + this.averageInfo.add(averageInfo); + } + + @Override + public Object getResult() { + return this.averageInfo.getAverage(); + } + + private static class AverageInfo { + public Double sum; + public long count; + + public void add(AverageInfo other) { + if (other == null) { + throw new IllegalArgumentException("other"); + } + if (other.sum == null) { + return; + } + if (this.sum == null) { + this.sum = 0.0; + } + + this.sum += other.sum; + this.count += other.count; + } + + Object getAverage() { + if (this.sum == null || this.count <= 0) { + return Undefined.Value(); + } + return this.sum / this.count; + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/aggregation/CountAggregator.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/aggregation/CountAggregator.java new file mode 100644 index 0000000000000..bb18db582ed57 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/aggregation/CountAggregator.java @@ -0,0 +1,39 @@ + +/** + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query.aggregation; + +public class CountAggregator implements Aggregator { + private long value; + + @Override + public void aggregate(Object item) { + value += Long.parseLong(item.toString()); + } + + @Override + public Object getResult() { + return value; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/aggregation/MaxAggregator.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/aggregation/MaxAggregator.java new file mode 100644 index 0000000000000..ea59e3b4538e6 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/aggregation/MaxAggregator.java @@ -0,0 +1,50 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query.aggregation; + +import com.azure.data.cosmos.internal.Undefined; +import com.azure.data.cosmos.internal.query.ItemComparator; + +public class MaxAggregator implements Aggregator { + private Object value; + + public MaxAggregator() { + this.value = Undefined.Value(); + } + + @Override + public void aggregate(Object item) { + if (Undefined.Value().equals(this.value)) { + this.value = item; + } else if (ItemComparator.getInstance().compare(item, this.value) > 0) { + this.value = item; + } + + } + + @Override + public Object getResult() { + return this.value; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/aggregation/MinAggregator.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/aggregation/MinAggregator.java new file mode 100644 index 0000000000000..981e55a957b89 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/aggregation/MinAggregator.java @@ -0,0 +1,49 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query.aggregation; + +import com.azure.data.cosmos.internal.Undefined; +import com.azure.data.cosmos.internal.query.ItemComparator; + +public class MinAggregator implements Aggregator { + private Object value; + + public MinAggregator() { + this.value = Undefined.Value(); + } + + @Override + public void aggregate(Object item) { + if (Undefined.Value().equals(this.value)) { + this.value = item; + } else if (ItemComparator.getInstance().compare(item, this.value) < 0) { + this.value = item; + } + } + + @Override + public Object getResult() { + return this.value; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/aggregation/SumAggregator.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/aggregation/SumAggregator.java new file mode 100644 index 0000000000000..0d238dc9a0c00 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/aggregation/SumAggregator.java @@ -0,0 +1,50 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query.aggregation; + +import com.azure.data.cosmos.internal.Undefined; + +public class SumAggregator implements Aggregator { + private Double sum; + + @Override + public void aggregate(Object item) { + if (Undefined.Value().equals(item)) { + return; + } + + if (this.sum == null) { + this.sum = 0.0; + } + this.sum += ((Number) item).doubleValue(); + } + + @Override + public Object getResult() { + if (this.sum == null) { + return Undefined.Value(); + } + return this.sum; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/ClientSideMetrics.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/ClientSideMetrics.java new file mode 100644 index 0000000000000..f29d1c001a0ba --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/ClientSideMetrics.java @@ -0,0 +1,143 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query.metrics; + +import org.apache.commons.lang3.tuple.ImmutablePair; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; + +/** + * Client side QueryMetrics + */ +public class ClientSideMetrics { + + public static final ClientSideMetrics ZERO = new ClientSideMetrics( + 0, /* retries*/ + 0, /* requestCharge */ + new ArrayList<>(), /* fetchExecutionRanges */ + new ArrayList<>()); /* partitionSchedulingTimeSpans */ + + private final long retries; + private final double requestCharge; + private final List fetchExecutionRanges; + private final List> partitionSchedulingTimeSpans; + + /** + * Constructor + * + * @param retries The number of retries required to execute the query. + * @param requestCharge The request charge incurred from executing the query. + * @param executionRanges The fetch execution ranges from executing the query. + * @param schedulingTimeSpans The partition scheduling timespans from the query. + */ + public ClientSideMetrics(int retries, double requestCharge, List executionRanges, + List> schedulingTimeSpans) { + if (executionRanges == null || executionRanges.contains(null)) { + throw new NullPointerException("executionRanges"); + } + if (schedulingTimeSpans == null || schedulingTimeSpans.contains(null)) { + throw new NullPointerException("schedulingTimeSpans"); + } + if (retries < 0) { + throw new IllegalArgumentException("retries must not be negative"); + } + if (requestCharge < 0) { + throw new IllegalArgumentException("requestCharge must not be negative"); + } + + this.retries = retries; + this.requestCharge = requestCharge; + this.fetchExecutionRanges = executionRanges; + this.partitionSchedulingTimeSpans = schedulingTimeSpans; + } + + /** + * Gets number of retries in the Azure Cosmos database service + * + * @return the retries + */ + public long getRetries() { + return retries; + } + + /** + * Gets the request charge for this continuation of the query. + * + * @return the requestCharge + */ + public double getRequestCharge() { + return requestCharge; + } + + /** + * create ClientSideMetrics from collection + * + * @param clientSideMetricsCollection + * @return + */ + public static ClientSideMetrics createFromCollection(Collection clientSideMetricsCollection) { + if (clientSideMetricsCollection == null) { + throw new NullPointerException("clientSideMetricsCollection"); + } + + int retries = 0; + double requestCharge = 0; + List fetchExecutionRanges = new ArrayList<>(); + List> partitionSchedulingTimeSpans = new ArrayList<>(); + + for (ClientSideMetrics clientSideQueryMetrics : clientSideMetricsCollection) { + retries += clientSideQueryMetrics.retries; + requestCharge += clientSideQueryMetrics.requestCharge; + fetchExecutionRanges.addAll(clientSideQueryMetrics.fetchExecutionRanges); + partitionSchedulingTimeSpans.addAll(clientSideQueryMetrics.partitionSchedulingTimeSpans); + } + + return new ClientSideMetrics(retries, requestCharge, fetchExecutionRanges, partitionSchedulingTimeSpans); + } + + static double getOrDefault(HashMap metrics, String key) { + Double doubleReference = metrics.get(key); + return doubleReference == null ? 0 : doubleReference; + } + + /** + * Gets the Fetch Execution Ranges for this continuation of the query. + * + * @return the Fetch Execution Ranges for this continuation of the query + */ + public List getFetchExecutionRanges() { + return fetchExecutionRanges; + } + + /** + * Gets the Partition Scheduling TimeSpans for this query. + * + * @return the List of Partition Scheduling TimeSpans for this query + */ + public List> getPartitionSchedulingTimeSpans() { + return partitionSchedulingTimeSpans; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/FetchExecutionRange.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/FetchExecutionRange.java new file mode 100644 index 0000000000000..b0cfc0a2d1bac --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/FetchExecutionRange.java @@ -0,0 +1,110 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query.metrics; + +import java.time.Instant; + +/** + * Stores information about fetch execution + */ +public class FetchExecutionRange { + private final Instant startTime; + private final Instant endTime; + private final String partitionId; + private final long numberOfDocuments; + private final long retryCount; + private final String activityId; + + /** + * Constructor + * + * @param activityId The activityId of the fetch + * @param startTime The start time of the fetch + * @param endTime The end time of the fetch + * @param partitionId The partitionkeyrangeid from which you are fetching for + * @param numberOfDocuments The number of documents that were fetched in the particular execution range + * @param retryCount The number of times we retried for this fetch execution range + */ + FetchExecutionRange(String activityId, Instant startTime, Instant endTime, String partitionId, long numberOfDocuments, long retryCount) { + this.activityId = activityId; + this.startTime = startTime; + this.endTime = endTime; + this.partitionId = partitionId; + this.numberOfDocuments = numberOfDocuments; + this.retryCount = retryCount; + } + + /** + * Gets the start time of the fetch. + * + * @return the start time of the fetch. + */ + public Instant getStartTime() { + return startTime; + } + + /** + * Gets the end time of the fetch. + * + * @return the end time of the fetch. + */ + public Instant getEndTime() { + return endTime; + } + + /** + * Gets the partition id that was fetched from. + * + * @return the partition id that was fetched from. + */ + public String getPartitionId() { + return partitionId; + } + + /** + * Gets the number of documents that where fetched in the particular execution range. + * + * @return the number of documents that where fetched in the particular execution range. + */ + public long getNumberOfDocuments() { + return numberOfDocuments; + } + + /** + * Gets the number of times we retried for this fetch execution range. + * + * @return the number of times we retried for this fetch execution range. + */ + public long getRetryCount() { + return retryCount; + } + + /** + * Gets the activityId of the fetch. + * + * @return the activityId of the fetch. + */ + public String getActivityId() { + return activityId; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/FetchExecutionRangeAccumulator.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/FetchExecutionRangeAccumulator.java new file mode 100644 index 0000000000000..3f86b0d01aef4 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/FetchExecutionRangeAccumulator.java @@ -0,0 +1,98 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query.metrics; + +import org.apache.commons.lang3.time.StopWatch; + +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * Accumlator that acts as a builder of FetchExecutionRanges + */ +public class FetchExecutionRangeAccumulator { + private final String partitionKeyRangeId; + private final Instant constructionTime; + private final StopWatch stopwatch; + private List fetchExecutionRanges; + private Instant startTime; + private Instant endTime; + private boolean isFetching; + + public FetchExecutionRangeAccumulator(String partitionKeyRangeId) { + this.partitionKeyRangeId = partitionKeyRangeId; + this.constructionTime = Instant.now(); + // This stopwatch is always running and is only used to calculate deltas that are synchronized with the construction time. + this.stopwatch = new StopWatch(); + stopwatch.start(); + this.fetchExecutionRanges = new ArrayList(); + } + + /** + * Gets the FetchExecutionRanges and resets the accumulator. + * + * @return the SchedulingMetricsResult. + */ + public List getExecutionRanges() { + List returnValue = this.fetchExecutionRanges; + this.fetchExecutionRanges = new ArrayList<>(); + return returnValue; + } + + /** + * Updates the most recent start time internally. + */ + public void beginFetchRange() { + if (!this.isFetching) { + // Calculating the start time as the construction time and the stopwatch as a delta. + this.startTime = this.constructionTime.plus(Duration.ofMillis(this.stopwatch.getTime(TimeUnit.MILLISECONDS))); + this.isFetching = true; + } + } + + /** + * Updates the most recent end time internally and constructs a new FetchExecutionRange + * + * @param numberOfDocuments The number of documents that were fetched for this range. + * @param retryCount The number of times we retried for this fetch execution range. + */ + public void endFetchRange(String activityId, long numberOfDocuments, long retryCount) { + if (this.isFetching) { + // Calculating the end time as the construction time and the stopwatch as a delta. + this.endTime = this.constructionTime.plus(Duration.ofMillis(this.stopwatch.getTime(TimeUnit.MILLISECONDS))); + FetchExecutionRange fetchExecutionRange = new FetchExecutionRange( + activityId, + this.startTime, + this.endTime, + this.partitionKeyRangeId, + numberOfDocuments, + retryCount); + this.fetchExecutionRanges.add(fetchExecutionRange); + this.isFetching = false; + } + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/QueryMetricsTextWriter.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/QueryMetricsTextWriter.java new file mode 100644 index 0000000000000..171edf56b41d9 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/QueryMetricsTextWriter.java @@ -0,0 +1,600 @@ +package com.azure.data.cosmos.internal.query.metrics; + +import org.apache.commons.lang3.StringUtils; + +import java.time.Duration; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.UUID; + +public class QueryMetricsTextWriter extends QueryMetricsWriter { + + private final StringBuilder stringBuilder; + + // QueryMetrics + private static final String ActivityIds = "Activity Ids"; + private static final String RetrievedDocumentCount = "Retrieved Document Count"; + private static final String RetrievedDocumentSize = "Retrieved Document Size"; + private static final String OutputDocumentCount = "Output Document Count"; + private static final String OutputDocumentSize = "Output Document Size"; + private static final String IndexUtilizationText = "Index Utilization"; + private static final String TotalQueryExecutionTime = "Total Query Execution Time"; + + // QueryPreparationTimes + private static final String QueryPreparationTimes = "Query Preparation Times"; + private static final String QueryCompileTime = "Query Compilation Time"; + private static final String LogicalPlanBuildTime = "Logical Plan Build Time"; + private static final String PhysicalPlanBuildTime = "Physical Plan Build Time"; + private static final String QueryOptimizationTime = "Query Optimization Time"; + + // QueryTimes + private static final String QueryEngineTimes = "Query Engine Times"; + private static final String IndexLookupTime = "Index Lookup Time"; + private static final String DocumentLoadTime = "Document Load Time"; + private static final String DocumentWriteTime = "Document Write Time"; + + // RuntimeExecutionTimes + private static final String RuntimeExecutionTimes = "Runtime Execution Times"; + private static final String TotalExecutionTime = "Query Engine Execution Time"; + private static final String SystemFunctionExecuteTime = "System Function Execution Time"; + private static final String UserDefinedFunctionExecutionTime = "User-defined Function Execution Time"; + + // ClientSideQueryMetrics + private static final String ClientSideQueryMetrics = "Client Side Metrics"; + private static final String Retries = "Retry Count"; + private static final String RequestCharge = "Request Charge"; + private static final String FetchExecutionRanges = "Partition Execution Timeline"; + private static final String SchedulingMetrics = "Scheduling Metrics"; + + // Constants for Partition Execution Timeline Table + private static final String StartTimeHeader = "Start Time (UTC)"; + private static final String EndTimeHeader = "End Time (UTC)"; + private static final String DurationHeader = "Duration (ms)"; + private static final String PartitionKeyRangeIdHeader = "Partition Id"; + private static final String NumberOfDocumentsHeader = "NUMBER of Documents"; + private static final String RetryCountHeader = "Retry Count"; + private static final String ActivityIdHeader = "Activity Id"; + + // Constants for Scheduling Metrics Table + private static final String PartitionIdHeader = "Partition Id"; + private static final String ResponseTimeHeader = "Response Time (ms)"; + private static final String RunTimeHeader = "Run Time (ms)"; + private static final String WaitTimeHeader = "Wait Time (ms)"; + private static final String TurnaroundTimeHeader = "Turnaround Time (ms)"; + private static final String NumberOfPreemptionHeader = "NUMBER of Preemptions"; + + // Static for Partition Execution Timeline Table + // private static int MaxDateTimeStringLength = LocalDateTime.MAX.toString().length(); + private static final int MaxDateTimeStringLength = 16; + private static final int StartTimeHeaderLength = Math.max(MaxDateTimeStringLength, StartTimeHeader.length()); + private static final int EndTimeHeaderLength = Math.max(MaxDateTimeStringLength, EndTimeHeader.length()); + private static final int DurationHeaderLength = DurationHeader.length(); + private static final int PartitionKeyRangeIdHeaderLength = PartitionKeyRangeIdHeader.length(); + private static final int NumberOfDocumentsHeaderLength = NumberOfDocumentsHeader.length(); + private static final int RetryCountHeaderLength = RetryCountHeader.length(); + private static final int ActivityIdHeaderLength = UUID.randomUUID().toString().length(); + + private static TextTable.Column[] PartitionExecutionTimelineColumns = new TextTable.Column[] + { + new TextTable.Column(PartitionKeyRangeIdHeader, PartitionKeyRangeIdHeaderLength), + new TextTable.Column(ActivityIdHeader, ActivityIdHeaderLength), + new TextTable.Column(StartTimeHeader, StartTimeHeaderLength), + new TextTable.Column(EndTimeHeader, EndTimeHeaderLength), + new TextTable.Column(DurationHeader, DurationHeaderLength), + new TextTable.Column(NumberOfDocumentsHeader, NumberOfDocumentsHeaderLength), + new TextTable.Column(RetryCountHeader, RetryCountHeaderLength), + }; + + private static TextTable PartitionExecutionTimelineTable = new TextTable(Arrays.asList(PartitionExecutionTimelineColumns)); + + // Static for Scheduling Metrics Table + //private static readonly int MaxTimeSpanStringLength = Math.Max(TimeSpan.MaxValue.TotalMilliseconds.ToString + // ("G17").Length, TurnaroundTimeHeader.Length); + private static final int PartitionIdHeaderLength = PartitionIdHeader.length(); + private static final int ResponseTimeHeaderLength = ResponseTimeHeader.length(); + private static final int RunTimeHeaderLength = RunTimeHeader.length(); + private static final int WaitTimeHeaderLength = WaitTimeHeader.length(); + private static final int TurnaroundTimeHeaderLength = TurnaroundTimeHeader.length(); + private static final int NumberOfPreemptionHeaderLength = NumberOfPreemptionHeader.length(); + + private static TextTable.Column[] SchedulingMetricsColumns = new TextTable.Column[] + { + new TextTable.Column(PartitionIdHeader, PartitionIdHeaderLength), + new TextTable.Column(ResponseTimeHeader, ResponseTimeHeaderLength), + new TextTable.Column(RunTimeHeader, RunTimeHeaderLength), + new TextTable.Column(WaitTimeHeader, WaitTimeHeaderLength), + new TextTable.Column(TurnaroundTimeHeader, TurnaroundTimeHeaderLength), + new TextTable.Column(NumberOfPreemptionHeader, NumberOfPreemptionHeaderLength), + }; + + private static TextTable SchedulingMetricsTable = new TextTable(Arrays.asList(SchedulingMetricsColumns)); + + // FetchExecutionRange state + private String lastFetchPartitionId; + private String lastActivityId; + private Instant lastStartTime; + private Instant lastEndTime; + private long lastFetchDocumentCount; + private long lastFetchRetryCount; + + // PartitionSchedulingTimeSpan state + private String lastSchedulingPartitionId; + private long lastResponseTime; + private long lastRunTime; + private long lastWaitTime; + private long lastTurnaroundTime; + private long lastNumberOfPreemptions; + + static DateTimeFormatter formatter = + DateTimeFormatter.ofPattern("HH:mm:ss:SSSS").withZone(ZoneOffset.UTC); + + public QueryMetricsTextWriter(StringBuilder stringBuilder) { + assert stringBuilder != null; + this.stringBuilder = stringBuilder; + } + + @Override + protected void writeBeforeQueryMetrics() { + // Do Nothing + } + + @Override + protected void writeRetrievedDocumentCount(long retrievedDocumentCount) { + QueryMetricsTextWriter.appendCountToStringBuilder(stringBuilder, + QueryMetricsTextWriter.RetrievedDocumentCount, retrievedDocumentCount, 0); + } + + @Override + protected void writeRetrievedDocumentSize(long retrievedDocumentSize) { + QueryMetricsTextWriter.appendBytesToStringBuilder(stringBuilder, QueryMetricsTextWriter.RetrievedDocumentSize + , retrievedDocumentSize, 0); + } + + @Override + protected void writeOutputDocumentCount(long outputDocumentCount) { + QueryMetricsTextWriter.appendCountToStringBuilder(stringBuilder, QueryMetricsTextWriter.OutputDocumentCount, + outputDocumentCount, 0); + } + + @Override + protected void writeOutputDocumentSize(long outputDocumentSize) { + QueryMetricsTextWriter.appendBytesToStringBuilder(stringBuilder, QueryMetricsTextWriter.OutputDocumentSize, + outputDocumentSize, 0); + } + + @Override + protected void writeIndexHitRatio(double indexHitRatio) { + QueryMetricsTextWriter.appendPercentageToStringBuilder(stringBuilder, QueryMetricsTextWriter.IndexUtilizationText + , indexHitRatio, 0); + } + + @Override + protected void writeTotalQueryExecutionTime(Duration totalQueryExecutionTime) { + QueryMetricsTextWriter.appendNanosecondsToStringBuilder(stringBuilder, + QueryMetricsTextWriter.TotalQueryExecutionTime, durationToMilliseconds(totalQueryExecutionTime), 0); + } + + @Override + protected void writeBeforeQueryPreparationTimes() { + QueryMetricsTextWriter.appendHeaderToStringBuilder(stringBuilder, + QueryMetricsTextWriter.QueryPreparationTimes, 1); + } + + @Override + protected void writeQueryCompilationTime(Duration queryCompilationTime) { + QueryMetricsTextWriter.appendMillisecondsToStringBuilder(stringBuilder, + QueryMetricsTextWriter.QueryCompileTime, durationToMilliseconds(queryCompilationTime), 2); + } + + @Override + protected void writeLogicalPlanBuildTime(Duration logicalPlanBuildTime) { + QueryMetricsTextWriter.appendMillisecondsToStringBuilder(stringBuilder, + QueryMetricsTextWriter.LogicalPlanBuildTime, durationToMilliseconds(logicalPlanBuildTime), 2); + } + + @Override + protected void writePhysicalPlanBuildTime(Duration physicalPlanBuildTime) { + QueryMetricsTextWriter.appendMillisecondsToStringBuilder(stringBuilder, + QueryMetricsTextWriter.PhysicalPlanBuildTime, durationToMilliseconds(physicalPlanBuildTime), 2); + } + + @Override + protected void writeQueryOptimizationTime(Duration queryOptimizationTime) { + QueryMetricsTextWriter.appendMillisecondsToStringBuilder(stringBuilder, + QueryMetricsTextWriter.QueryOptimizationTime, durationToMilliseconds(queryOptimizationTime), 2); + } + + @Override + protected void writeAfterQueryPreparationTimes() { + // Do Nothing + } + + @Override + protected void writeIndexLookupTime(Duration indexLookupTime) { + QueryMetricsTextWriter.appendMillisecondsToStringBuilder(stringBuilder, + QueryMetricsTextWriter.IndexLookupTime, durationToMilliseconds(indexLookupTime), 1); + } + + @Override + protected void writeDocumentLoadTime(Duration documentLoadTime) { + QueryMetricsTextWriter.appendMillisecondsToStringBuilder(stringBuilder, + QueryMetricsTextWriter.DocumentLoadTime, durationToMilliseconds(documentLoadTime), 1); + } + + @Override + protected void writeVMExecutionTime(Duration vMExecutionTime) { + // Do Nothing + } + + @Override + protected void writeBeforeRuntimeExecutionTimes() { + QueryMetricsTextWriter.appendHeaderToStringBuilder(stringBuilder, + QueryMetricsTextWriter.RuntimeExecutionTimes, 1); + } + + @Override + protected void writeQueryEngineExecutionTime(Duration queryEngineExecutionTime) { + QueryMetricsTextWriter.appendMillisecondsToStringBuilder(stringBuilder, + QueryMetricsTextWriter.QueryEngineTimes, durationToMilliseconds(queryEngineExecutionTime), 2); + } + + @Override + protected void writeSystemFunctionExecutionTime(Duration systemFunctionExecutionTime) { + QueryMetricsTextWriter.appendMillisecondsToStringBuilder(stringBuilder, + QueryMetricsTextWriter.SystemFunctionExecuteTime, durationToMilliseconds(systemFunctionExecutionTime) + , 2); + } + + @Override + protected void writeUserDefinedFunctionExecutionTime(Duration userDefinedFunctionExecutionTime) { + QueryMetricsTextWriter.appendMillisecondsToStringBuilder(stringBuilder, + QueryMetricsTextWriter.UserDefinedFunctionExecutionTime, + durationToMilliseconds(userDefinedFunctionExecutionTime), 2); + } + + @Override + protected void writeAfterRuntimeExecutionTimes() { + // Do Nothing + } + + @Override + protected void writeDocumentWriteTime(Duration documentWriteTime) { + QueryMetricsTextWriter.appendMillisecondsToStringBuilder(stringBuilder, + QueryMetricsTextWriter.DocumentWriteTime, durationToMilliseconds(documentWriteTime), 1); + } + + @Override + protected void writeBeforeClientSideMetrics() { + QueryMetricsTextWriter.appendHeaderToStringBuilder(stringBuilder, + QueryMetricsTextWriter.ClientSideQueryMetrics, 0); + } + + @Override + protected void writeRetries(long retries) { + QueryMetricsTextWriter.appendCountToStringBuilder(stringBuilder, QueryMetricsTextWriter.Retries, retries, 1); + } + + @Override + protected void writeRequestCharge(double requestCharge) { + QueryMetricsTextWriter.appendRUToStringBuilder(stringBuilder, QueryMetricsTextWriter.RequestCharge, + requestCharge, 1); + } + + @Override + protected void writeBeforePartitionExecutionTimeline() { + QueryMetricsTextWriter.appendNewlineToStringBuilder(stringBuilder); + + // Building the table for fetch execution ranges + QueryMetricsTextWriter.appendHeaderToStringBuilder(stringBuilder, QueryMetricsTextWriter.FetchExecutionRanges + , 1); + QueryMetricsTextWriter.appendHeaderToStringBuilder(stringBuilder, PartitionExecutionTimelineTable.getTopLine(), 1); + QueryMetricsTextWriter.appendHeaderToStringBuilder(stringBuilder, PartitionExecutionTimelineTable.getHeader(), 1); + QueryMetricsTextWriter.appendHeaderToStringBuilder(stringBuilder, PartitionExecutionTimelineTable.getMiddleLine(), 1); + } + + @Override + protected void writeBeforeFetchExecutionRange() { + // Do Nothing + } + + @Override + protected void writeFetchPartitionKeyRangeId(String partitionId) { + this.lastFetchPartitionId = partitionId; + } + + @Override + protected void writeActivityId(String activityId) { + this.lastActivityId = activityId; + } + + @Override + protected void writeStartTime(Instant startTime) { + this.lastStartTime = startTime; + } + + @Override + protected void writeEndTime(Instant endTime) { + this.lastEndTime = endTime; + } + + @Override + protected void writeFetchDocumentCount(long numberOfDocuments) { + this.lastFetchDocumentCount = numberOfDocuments; + } + + @Override + protected void writeFetchRetryCount(long retryCount) { + this.lastFetchRetryCount = retryCount; + } + + @Override + protected void writeAfterFetchExecutionRange() { + QueryMetricsTextWriter.appendHeaderToStringBuilder( + stringBuilder, + PartitionExecutionTimelineTable.getRow(Arrays.asList( + this.lastFetchPartitionId, + this.lastActivityId, + formatter.format(this.lastStartTime), + formatter.format(this.lastEndTime), + nanosToMilliSeconds(this.lastEndTime.minusNanos(lastStartTime.getNano()).getNano()), + this.lastFetchDocumentCount, + this.lastFetchRetryCount)), + 1); + } + + @Override + protected void writeAfterPartitionExecutionTimeline() { + QueryMetricsTextWriter.appendHeaderToStringBuilder(stringBuilder, PartitionExecutionTimelineTable.getBottomLine(), + 1); + } + + @Override + protected void writeBeforeSchedulingMetrics() { + QueryMetricsTextWriter.appendNewlineToStringBuilder(stringBuilder); + + // Building the table for scheduling metrics + QueryMetricsTextWriter.appendHeaderToStringBuilder(stringBuilder, QueryMetricsTextWriter.SchedulingMetrics, 1); + QueryMetricsTextWriter.appendHeaderToStringBuilder(stringBuilder, SchedulingMetricsTable.getTopLine(), 1); + QueryMetricsTextWriter.appendHeaderToStringBuilder(stringBuilder, SchedulingMetricsTable.getHeader(), 1); + QueryMetricsTextWriter.appendHeaderToStringBuilder(stringBuilder, SchedulingMetricsTable.getMiddleLine(), 1); + } + + @Override + protected void writeBeforePartitionSchedulingDuration() { + // Do Nothing + } + + @Override + protected void writePartitionSchedulingDurationId(String partitionId) { + this.lastSchedulingPartitionId = partitionId; + } + + @Override + protected void writeResponseTime(long responseTime) { + this.lastResponseTime = responseTime; + } + + @Override + protected void writeRunTime(long runTime) { + this.lastRunTime = runTime; + } + + @Override + protected void writeWaitTime(long waitTime) { + this.lastWaitTime = waitTime; + } + + @Override + protected void writeTurnaroundTime(long turnaroundTime) { + this.lastTurnaroundTime = turnaroundTime; + } + + @Override + protected void writeNumberOfPreemptions(long numPreemptions) { + this.lastNumberOfPreemptions = numPreemptions; + } + + @Override + protected void writeAfterPartitionSchedulingDuration() { + QueryMetricsTextWriter.appendHeaderToStringBuilder( + stringBuilder, + SchedulingMetricsTable.getRow(Arrays.asList( + this.lastSchedulingPartitionId, + this.lastResponseTime, + this.lastRunTime, + this.lastWaitTime, + this.lastTurnaroundTime, + this.lastNumberOfPreemptions)), + 1); + } + + @Override + protected void writeAfterSchedulingMetrics() { + QueryMetricsTextWriter.appendHeaderToStringBuilder(stringBuilder, SchedulingMetricsTable.getBottomLine(), 1); + } + + @Override + protected void writeAfterClientSideMetrics() { + // Do Nothing + } + + @Override + protected void writeAfterQueryMetrics() { + // Do Nothing + } + + // Util functions + private static final int NANOS_TO_MILLIS = 1000000; + + static HashMap parseDelimitedString(String delimitedString) { + if (delimitedString == null) { + throw new NullPointerException("delimitedString"); + } + + HashMap metrics = new HashMap<>(); + + final int key = 0; + final int value = 1; + String[] headerAttributes = StringUtils.split(delimitedString, ";"); + + for (String attribute : headerAttributes) { + String[] attributeKeyValue = StringUtils.split(attribute, "="); + + if (attributeKeyValue.length != 2) { + throw new NullPointerException("recieved a malformed delimited STRING"); + } + + String attributeKey = attributeKeyValue[key]; + double attributeValue = Double.parseDouble(attributeKeyValue[value]); + metrics.put(attributeKey, attributeValue); + } + + return metrics; + } + + static Duration durationFromMetrics(HashMap metrics, String key) { + // Just attempt to get the metrics + Double durationInMilliseconds = metrics.get(key); + if (durationInMilliseconds == null) { + return Duration.ZERO; + } + + long seconds = (long) (durationInMilliseconds / 1e3); + long nanoseconds = (long) ((durationInMilliseconds - (seconds * 1e3)) * 1e6); + + return Duration.ofSeconds(seconds, nanoseconds); + } + + static double durationToMilliseconds(Duration duration) { + double seconds = duration.getSeconds(); + double nano = duration.getNano(); + + return (seconds * 1e3) + (nano / 1e6); + } + + static Duration getDurationFromMetrics(HashMap metrics, String key) { + double timeSpanInMilliseconds; + Duration timeSpanFromMetrics; + timeSpanInMilliseconds = metrics.get(key); + timeSpanFromMetrics = doubleMillisecondsToDuration(timeSpanInMilliseconds); + return timeSpanFromMetrics; + } + + private static Duration doubleMillisecondsToDuration(double timeSpanInMilliseconds) { + long timeInNanoSeconds = (long) (timeSpanInMilliseconds * NANOS_TO_MILLIS); + return Duration.ofNanos(timeInNanoSeconds); + } + + private static void appendToStringBuilder(StringBuilder stringBuilder, String property, String value, + String units, int indentLevel) { + final String Indent = " "; + final String FormatString = "%-40s : %15s %-12s %s"; + + stringBuilder.append(String.format( + Locale.ROOT, + FormatString, + StringUtils.repeat(Indent, indentLevel) + property, + value, + units, + System.lineSeparator())); + } + + static void appendBytesToStringBuilder(StringBuilder stringBuilder, String property, long bytes, int indentLevel) { + final String BytesFormatString = "%d"; + final String BytesUnitString = "bytes"; + + appendToStringBuilder( + stringBuilder, + property, + String.format(BytesFormatString, bytes), + BytesUnitString, + indentLevel); + } + + static void appendMillisecondsToStringBuilder(StringBuilder stringBuilder, String property, double milliseconds, + int indentLevel) { + final String MillisecondsFormatString = "%f"; + final String MillisecondsUnitString = "milliseconds"; + + appendToStringBuilder(stringBuilder, property, String.format(MillisecondsFormatString, + milliseconds), MillisecondsUnitString, indentLevel); + } + + static void appendNanosecondsToStringBuilder(StringBuilder stringBuilder, String property, double nanoSeconds, + int indentLevel) { + final String MillisecondsFormatString = "%.2f"; + final String MillisecondsUnitString = "milliseconds"; + appendToStringBuilder(stringBuilder, property, String.format(MillisecondsFormatString, + nanosToMilliSeconds(nanoSeconds)), MillisecondsUnitString, indentLevel); + } + + static double nanosToMilliSeconds(double nanos) { + return nanos / NANOS_TO_MILLIS; + } + + static void appendHeaderToStringBuilder(StringBuilder stringBuilder, String headerTitle, int indentLevel) { + final String Indent = " "; + final String FormatString = "%s %s"; + stringBuilder.append(String.format( + Locale.ROOT, + FormatString, + String.join(StringUtils.repeat(Indent, indentLevel)) + headerTitle, + System.lineSeparator())); + } + + static void appendRUToStringBuilder(StringBuilder stringBuilder, String property, double requestCharge, + int indentLevel) { + final String RequestChargeFormatString = "%s"; + final String RequestChargeUnitString = "RUs"; + + appendToStringBuilder( + stringBuilder, + property, + String.format(Locale.ROOT, RequestChargeFormatString, requestCharge), + RequestChargeUnitString, + indentLevel); + } + + static void appendActivityIdsToStringBuilder(StringBuilder stringBuilder, String activityIdsLabel, + List activityIds, int indentLevel) { + final String Indent = " "; + stringBuilder.append(activityIdsLabel); + stringBuilder.append(System.lineSeparator()); + for (String activityId : activityIds) { + stringBuilder.append(Indent); + stringBuilder.append(activityId); + stringBuilder.append(System.lineSeparator()); + } + } + + static void appendPercentageToStringBuilder(StringBuilder stringBuilder, String property, double percentage, + int indentLevel) { + final String PercentageFormatString = "%.2f"; + final String PercentageUnitString = "%"; + + appendToStringBuilder(stringBuilder, property, String.format(PercentageFormatString, + percentage * 100), PercentageUnitString, indentLevel); + } + + static void appendCountToStringBuilder(StringBuilder stringBuilder, String property, long count, int indentLevel) { + final String CountFormatString = "%s"; + final String CountUnitString = ""; + + appendToStringBuilder( + stringBuilder, + property, + String.format(CountFormatString, count), + CountUnitString, + indentLevel); + } + + static void appendNewlineToStringBuilder(StringBuilder stringBuilder) { + appendHeaderToStringBuilder(stringBuilder, StringUtils.EMPTY, 0); + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/QueryMetricsWriter.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/QueryMetricsWriter.java new file mode 100644 index 0000000000000..d61094020e326 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/QueryMetricsWriter.java @@ -0,0 +1,225 @@ +package com.azure.data.cosmos.internal.query.metrics; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.internal.QueryMetrics; +import com.azure.data.cosmos.internal.QueryPreparationTimes; +import com.azure.data.cosmos.internal.RuntimeExecutionTimes; +import org.apache.commons.lang3.tuple.ImmutablePair; + +import java.time.Duration; +import java.time.Instant; +import java.util.List; + +abstract class QueryMetricsWriter { + + public void writeQueryMetrics(QueryMetrics queryMetrics) { + this.writeBeforeQueryMetrics(); + + // Top Level Properties + this.writeRetrievedDocumentCount(queryMetrics.getRetrievedDocumentCount()); + this.writeRetrievedDocumentSize(queryMetrics.getRetrievedDocumentSize()); + this.writeOutputDocumentCount(queryMetrics.getOutputDocumentCount()); + this.writeOutputDocumentSize(queryMetrics.getOutputDocumentSize()); + this.writeIndexHitRatio(queryMetrics.getIndexHitRatio()); + this.writeTotalQueryExecutionTime(queryMetrics.getTotalQueryExecutionTime()); + + // QueryPreparationTimes + this.writeQueryPreparationTimes(queryMetrics.getQueryPreparationTimes()); + + this.writeIndexLookupTime(queryMetrics.getIndexLookupTime()); + this.writeDocumentLoadTime(queryMetrics.getDocumentLoadTime()); + this.writeVMExecutionTime(queryMetrics.getVMExecutionTime()); + + // RuntimesExecutionTimes + this.writeRuntimesExecutionTimes(queryMetrics.getRuntimeExecutionTimes()); + + this.writeDocumentWriteTime(queryMetrics.getDocumentWriteTime()); + + // ClientSideMetrics + this.writeClientSideMetrics(BridgeInternal.getClientSideMetrics(queryMetrics)); + + this.writeAfterQueryMetrics(); + } + + protected abstract void writeBeforeQueryMetrics(); + + protected abstract void writeRetrievedDocumentCount(long retrievedDocumentCount); + + protected abstract void writeRetrievedDocumentSize(long retrievedDocumentSize); + + protected abstract void writeOutputDocumentCount(long outputDocumentCount); + + protected abstract void writeOutputDocumentSize(long outputDocumentSize); + + protected abstract void writeIndexHitRatio(double indexHitRatio); + + protected abstract void writeTotalQueryExecutionTime(Duration totalQueryExecutionTime); + + //QueryPreparationTimes + private void writeQueryPreparationTimes(QueryPreparationTimes queryPreparationTimes) { + this.writeBeforeQueryPreparationTimes(); + + this.writeQueryCompilationTime(queryPreparationTimes.getQueryCompilationTime()); + this.writeLogicalPlanBuildTime(queryPreparationTimes.getLogicalPlanBuildTime()); + this.writePhysicalPlanBuildTime(queryPreparationTimes.getPhysicalPlanBuildTime()); + this.writeQueryOptimizationTime(queryPreparationTimes.getQueryOptimizationTime()); + + this.writeAfterQueryPreparationTimes(); + } + + protected abstract void writeBeforeQueryPreparationTimes(); + + protected abstract void writeQueryCompilationTime(Duration queryCompilationTime); + + protected abstract void writeLogicalPlanBuildTime(Duration logicalPlanBuildTime); + + protected abstract void writePhysicalPlanBuildTime(Duration physicalPlanBuildTime); + + protected abstract void writeQueryOptimizationTime(Duration queryOptimizationTime); + + protected abstract void writeAfterQueryPreparationTimes(); + + protected abstract void writeIndexLookupTime(Duration indexLookupTime); + + protected abstract void writeDocumentLoadTime(Duration documentLoadTime); + + protected abstract void writeVMExecutionTime(Duration vMExecutionTime); + + // RuntimeExecutionTimes + private void writeRuntimesExecutionTimes(RuntimeExecutionTimes runtimeExecutionTimes) { + this.writeBeforeRuntimeExecutionTimes(); + + this.writeQueryEngineExecutionTime(runtimeExecutionTimes.getQueryEngineExecutionTime()); + this.writeSystemFunctionExecutionTime(runtimeExecutionTimes.getSystemFunctionExecutionTime()); + this.writeUserDefinedFunctionExecutionTime(runtimeExecutionTimes.getUserDefinedFunctionExecutionTime()); + + this.writeAfterRuntimeExecutionTimes(); + } + + + protected abstract void writeBeforeRuntimeExecutionTimes(); + + protected abstract void writeQueryEngineExecutionTime(Duration queryEngineExecutionTime); + + protected abstract void writeSystemFunctionExecutionTime(Duration systemFunctionExecutionTime); + + protected abstract void writeUserDefinedFunctionExecutionTime(Duration userDefinedFunctionExecutionTime); + + protected abstract void writeAfterRuntimeExecutionTimes(); + + protected abstract void writeDocumentWriteTime(Duration documentWriteTime); + + // ClientSideMetrics + private void writeClientSideMetrics(ClientSideMetrics clientSideMetrics) { + this.writeBeforeClientSideMetrics(); + + this.writeRetries(clientSideMetrics.getRetries()); + this.writeRequestCharge(clientSideMetrics.getRequestCharge()); + this.writePartitionExecutionTimeline(clientSideMetrics); + this.writeSchedulingMetrics(clientSideMetrics); + + this.writeAfterClientSideMetrics(); + } + + protected abstract void writeBeforeClientSideMetrics(); + + protected abstract void writeRetries(long retries); + + protected abstract void writeRequestCharge(double requestCharge); + + private void writePartitionExecutionTimeline(ClientSideMetrics clientSideMetrics) { + this.writeBeforePartitionExecutionTimeline(); + List fetchExecutionRanges = clientSideMetrics.getFetchExecutionRanges(); + fetchExecutionRanges.sort((f1, f2) -> f2.getStartTime().compareTo(f1.getStartTime())); + for (FetchExecutionRange fetchExecutionRange : clientSideMetrics.getFetchExecutionRanges()) { + this.writeFetchExecutionRange(fetchExecutionRange); + } + this.writeAfterPartitionExecutionTimeline(); + } + + protected abstract void writeBeforePartitionExecutionTimeline(); + + private void writeFetchExecutionRange(FetchExecutionRange fetchExecutionRange) { + this.writeBeforeFetchExecutionRange(); + + this.writeFetchPartitionKeyRangeId(fetchExecutionRange.getPartitionId()); + this.writeActivityId(fetchExecutionRange.getActivityId()); + this.writeStartTime(fetchExecutionRange.getStartTime()); + this.writeEndTime(fetchExecutionRange.getEndTime()); + this.writeFetchDocumentCount(fetchExecutionRange.getNumberOfDocuments()); + this.writeFetchRetryCount(fetchExecutionRange.getRetryCount()); + + this.writeAfterFetchExecutionRange(); + } + + protected abstract void writeBeforeFetchExecutionRange(); + + protected abstract void writeFetchPartitionKeyRangeId(String partitionId); + + protected abstract void writeActivityId(String activityId); + + protected abstract void writeStartTime(Instant startTime); + + protected abstract void writeEndTime(Instant endTime); + + protected abstract void writeFetchDocumentCount(long numberOfDocuments); + + protected abstract void writeFetchRetryCount(long retryCount); + + protected abstract void writeAfterFetchExecutionRange(); + + protected abstract void writeAfterPartitionExecutionTimeline(); + + private void writeSchedulingMetrics(ClientSideMetrics clientSideMetrics) { + this.writeBeforeSchedulingMetrics(); + List> partitionSchedulingTimeSpans = clientSideMetrics.getPartitionSchedulingTimeSpans(); + partitionSchedulingTimeSpans.sort((o1, o2) -> (int) (o2.right.getResponseTime() - o1.right.getResponseTime())); + for (ImmutablePair partitionSchedulingDuration : + partitionSchedulingTimeSpans) { + String partitionId = partitionSchedulingDuration.getLeft(); + SchedulingTimeSpan schedulingDuration = partitionSchedulingDuration.getRight(); + + this.writePartitionSchedulingDuration(partitionId, schedulingDuration); + } + + this.writeAfterSchedulingMetrics(); + } + + protected abstract void writeBeforeSchedulingMetrics(); + + private void writePartitionSchedulingDuration(String partitionId, SchedulingTimeSpan schedulingDuration) { + this.writeBeforePartitionSchedulingDuration(); + + this.writePartitionSchedulingDurationId(partitionId); + this.writeResponseTime(schedulingDuration.getResponseTime()); + this.writeRunTime(schedulingDuration.getRunTime()); + this.writeWaitTime(schedulingDuration.getWaitTime()); + this.writeTurnaroundTime(schedulingDuration.getTurnaroundTime()); + this.writeNumberOfPreemptions(schedulingDuration.getNumPreemptions()); + + this.writeAfterPartitionSchedulingDuration(); + } + + protected abstract void writeBeforePartitionSchedulingDuration(); + + protected abstract void writePartitionSchedulingDurationId(String partitionId); + + protected abstract void writeResponseTime(long responseTime); + + protected abstract void writeRunTime(long runTime); + + protected abstract void writeWaitTime(long waitTime); + + protected abstract void writeTurnaroundTime(long turnaroundTime); + + protected abstract void writeNumberOfPreemptions(long numPreemptions); + + protected abstract void writeAfterPartitionSchedulingDuration(); + + protected abstract void writeAfterSchedulingMetrics(); + + protected abstract void writeAfterClientSideMetrics(); + + protected abstract void writeAfterQueryMetrics(); + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/SchedulingStopwatch.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/SchedulingStopwatch.java new file mode 100644 index 0000000000000..64fbf2c103c99 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/SchedulingStopwatch.java @@ -0,0 +1,90 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query.metrics; + +import org.apache.commons.lang3.time.StopWatch; + +public class SchedulingStopwatch { + private StopWatch turnaroundTimeStopwatch; + private StopWatch responseTimeStopwatch; + private StopWatch runTimeStopwatch; + private long numPreemptions; + private boolean responded; + + public SchedulingStopwatch() { + this.turnaroundTimeStopwatch = new StopWatch(); + this.responseTimeStopwatch = new StopWatch(); + this.runTimeStopwatch = new StopWatch(); + } + + public SchedulingTimeSpan getElapsedTime() { + return new SchedulingTimeSpan(this.turnaroundTimeStopwatch.getTime(), this.responseTimeStopwatch.getTime(), + this.runTimeStopwatch.getTime(), + this.turnaroundTimeStopwatch.getTime() - this.runTimeStopwatch.getTime(), this.numPreemptions); + } + + /** + * Tells the SchedulingStopwatch know that the process is in a state where it is ready to be worked on, + * which in turn starts the stopwatch for for response time and turnaround time. + */ + public void ready() { + startStopWatch(this.turnaroundTimeStopwatch); + startStopWatch(this.responseTimeStopwatch); + } + + public void start() { + if (!this.runTimeStopwatch.isStarted()) { + if (!this.responded) { + // This is the first time the process got a response, so the response time stopwatch needs to stop. + this.responseTimeStopwatch.stop(); + this.responded = true; + } + this.runTimeStopwatch.reset(); + startStopWatch(this.runTimeStopwatch); + } + } + + public void stop() { + if (this.runTimeStopwatch.isStarted()) { + stopStopWatch(this.runTimeStopwatch); + this.numPreemptions++; + } + } + + public void terminate() { + stopStopWatch(this.turnaroundTimeStopwatch); + stopStopWatch(this.responseTimeStopwatch); + } + + private void startStopWatch(StopWatch stopwatch) { + synchronized (stopwatch) { + stopwatch.start(); + } + } + + private void stopStopWatch(StopWatch stopwatch) { + synchronized (stopwatch) { + stopwatch.stop(); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/SchedulingTimeSpan.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/SchedulingTimeSpan.java new file mode 100644 index 0000000000000..0600e78daf637 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/SchedulingTimeSpan.java @@ -0,0 +1,79 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query.metrics; + +public class SchedulingTimeSpan { + + /** + * The total time taken from when the process arrives to when it ended. + */ + private final long turnaroundTime; + + /** + * The total latency (time) taken from when the process arrived to when the CPU actually started working on it. + */ + private final long responseTime; + + /** + * The total time the process spent in the running state. + */ + private final long runTime; + + /** + * The total time that the process spent is on the ready or waiting state. + */ + private final long waitTime; + + /** + * NUMBER of times the process was preempted. + */ + private final long numPreemptions; + + public SchedulingTimeSpan(long turnaroundTime, long responseTime, long runTime, long waitTime, long numPreemptions) { + this.turnaroundTime = turnaroundTime; + this.responseTime = responseTime; + this.runTime = runTime; + this.waitTime = waitTime; + this.numPreemptions = numPreemptions; + } + + public long getTurnaroundTime() { + return turnaroundTime; + } + + public long getResponseTime() { + return responseTime; + } + + public long getRunTime() { + return runTime; + } + + public long getWaitTime() { + return waitTime; + } + + public long getNumPreemptions() { + return numPreemptions; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/TextTable.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/TextTable.java new file mode 100644 index 0000000000000..367384ad62bb8 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/metrics/TextTable.java @@ -0,0 +1,131 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query.metrics; + +import org.apache.commons.lang3.StringUtils; + +import java.util.ArrayList; +import java.util.List; + +class TextTable { + private static final char CellLeftTop = '┌'; + private static final char CellRightTop = '┐'; + private static final char CellLeftBottom = '└'; + private static final char CellRightBottom = '┘'; + private static final char CellHorizontalJointTop = '┬'; + private static final char CellHorizontalJointBottom = '┴'; + private static final char CellVerticalJointLeft = '├'; + private static final char CellTJoint = '┼'; + private static final char CellVerticalJointRight = '┤'; + private static final char CellHorizontalLine = '-'; + private static final char CellVerticalLine = '│'; + + private List columns; + + private String header; + private String topLine; + private String middleLine; + private String bottomLine; + + private String rowFormatString; + + /** + * Initializes a new instance of the TextTable class. + * + * @param columns The columns of the table + */ + public TextTable(List columns) { + this.columns = new ArrayList<>(columns); + + // Building the table header + String headerFormatString = TextTable.buildLineFormatString(columns); + this.header = String.format(headerFormatString, columns.stream().map(textTableColumn -> textTableColumn.columnName).toArray()); + + // building the different lines + this.topLine = TextTable.buildLine(CellLeftTop, CellRightTop, CellHorizontalJointTop, columns); + this.middleLine = TextTable.buildLine(CellVerticalJointLeft, CellVerticalJointRight, CellTJoint, columns); + this.bottomLine = TextTable.buildLine(CellLeftBottom, CellRightBottom, CellHorizontalJointBottom, columns); + + // building the row format string + this.rowFormatString = TextTable.buildLineFormatString(columns); + } + + public String getRow(List cells) { + if (cells.size() != this.columns.size()) { + throw new IllegalArgumentException("Cells in a row needs to have exactly 1 element per column"); + } + return String.format(this.rowFormatString, cells.toArray()); + } + + private static String buildLine(char firstChar, char lastChar, char seperator, List columns) { + StringBuilder lineStringBuilder = new StringBuilder(); + lineStringBuilder.append(firstChar); + for (Column column : columns.subList(0, columns.size() - 1)) { + lineStringBuilder.append(StringUtils.repeat(CellHorizontalLine, column.columnWidth)); + lineStringBuilder.append(seperator); + } + + lineStringBuilder.append(StringUtils.repeat(CellHorizontalLine, columns.get(columns.size() - 1).columnWidth)); + lineStringBuilder.append(lastChar); + + return lineStringBuilder.toString(); + } + + private static String buildLineFormatString(List columns) { + StringBuilder lineFormatStringBuilder = new StringBuilder(); + lineFormatStringBuilder.append(CellVerticalLine); + for (Column column : columns) { + lineFormatStringBuilder.append("%" + column.columnWidth + "s"); + lineFormatStringBuilder.append(CellVerticalLine); + } + + return lineFormatStringBuilder.toString(); + } + + + static class Column { + String columnName; + int columnWidth; + + public Column(String columnName, int columnWidth) { + this.columnName = columnName; + this.columnWidth = columnWidth; + } + } + + public String getHeader() { + return header; + } + + public String getTopLine() { + return topLine; + } + + public String getMiddleLine() { + return middleLine; + } + + public String getBottomLine() { + return bottomLine; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/orderbyquery/OrderByRowResult.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/orderbyquery/OrderByRowResult.java new file mode 100644 index 0000000000000..2427b16d25355 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/orderbyquery/OrderByRowResult.java @@ -0,0 +1,69 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query.orderbyquery; + +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.query.QueryItem; + +import java.util.List; + +/** + * Represents the result of a query in the Azure Cosmos DB database service. + */ +public final class OrderByRowResult extends Document { + private final Class klass; + private volatile List orderByItems; + private volatile T payload; + private final PartitionKeyRange targetRange; + private final String backendContinuationToken; + + public OrderByRowResult( + Class klass, + String jsonString, + PartitionKeyRange targetRange, + String backendContinuationToken) { + super(jsonString); + this.klass = klass; + this.targetRange = targetRange; + this.backendContinuationToken = backendContinuationToken; + } + + public List getOrderByItems() { + return this.orderByItems != null ? this.orderByItems + : (this.orderByItems = super.getList("orderByItems", QueryItem.class)); + } + + public T getPayload() { + return this.payload != null ? this.payload : (this.payload = super.getObject("payload", klass)); + } + + public PartitionKeyRange getSourcePartitionKeyRange() { + return this.targetRange; + } + + public String getSourceBackendContinuationToken() { + return this.backendContinuationToken; + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/orderbyquery/OrderbyRowComparer.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/orderbyquery/OrderbyRowComparer.java new file mode 100644 index 0000000000000..c205d04529834 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/query/orderbyquery/OrderbyRowComparer.java @@ -0,0 +1,116 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query.orderbyquery; + +import com.azure.data.cosmos.internal.query.ItemComparator; +import com.azure.data.cosmos.internal.query.ItemType; +import com.azure.data.cosmos.internal.query.ItemTypeHelper; +import com.azure.data.cosmos.internal.query.QueryItem; +import com.azure.data.cosmos.internal.query.SortOrder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Comparator; +import java.util.List; + +public final class OrderbyRowComparer implements Comparator> { + private static final Logger logger = LoggerFactory.getLogger(OrderbyRowComparer.class); + + private final List sortOrders; + private volatile List itemTypes; + + public OrderbyRowComparer(Collection sortOrders) { + this.sortOrders = new ArrayList<>(sortOrders); + } + + @Override + public int compare(OrderByRowResult r1, OrderByRowResult r2) { + try { + // comparing document (row) vs document (row) + List result1 = r1.getOrderByItems(); + List result2 = r2.getOrderByItems(); + + if (result1.size() != result2.size()) { + throw new IllegalStateException("OrderByItems cannot have different sizes."); + } + + if (result1.size() != this.sortOrders.size()) { + throw new IllegalStateException("OrderByItems cannot have a different size than sort orders."); + } + + if (this.itemTypes == null) { + synchronized (this) { + if (this.itemTypes == null) { + this.itemTypes = new ArrayList(result1.size()); + for (QueryItem item : result1) { + this.itemTypes.add(ItemTypeHelper.getOrderByItemType(item.getItem())); + } + } + } + } + + this.checkOrderByItemType(result1); + this.checkOrderByItemType(result2); + + for (int i = 0; i < result1.size(); ++i) { + int cmp = ItemComparator.getInstance().compare(result1.get(i).getItem(), result2.get(i).getItem()); + if (cmp != 0) { + switch (this.sortOrders.get(i)) { + case Ascending: + return cmp; + case Descending: + return -cmp; + } + } + } + + return r1.getSourcePartitionKeyRange().getMinInclusive().compareTo(r2.getSourcePartitionKeyRange().getMinInclusive()); + } catch (Exception e) { + // Due to a bug in rxjava-extras <= 0.8.0.15 dependency, + // if OrderbyRowComparer throws an unexpected exception, + // then the observable returned by Transformers.orderedMergeWith(.) will never emit a terminal event. + // rxjava-extras lib provided a quick fix on the bugreport: + // https://github.com/davidmoten/rxjava-extras/issues/30 (0.8.0.16) + // we are also capturing the exception stacktrace here + logger.error("Orderby Row comparision failed {}, {}", r1.toJson(), r2.toJson(), e); + throw e; + } + } + + private void checkOrderByItemType(List orderByItems) { + for (int i = 0; i < this.itemTypes.size(); ++i) { + ItemType type = ItemTypeHelper.getOrderByItemType(orderByItems.get(i).getItem()); + if (type != this.itemTypes.get(i)) { + throw new UnsupportedOperationException( + String.format("Expected %s, but got %s.", this.itemTypes.get(i).toString(), type.toString())); + } + } + } + + public List getSortOrders() { + return this.sortOrders; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/BoolPartitionKeyComponent.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/BoolPartitionKeyComponent.java new file mode 100644 index 0000000000000..053dea7c7eee2 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/BoolPartitionKeyComponent.java @@ -0,0 +1,98 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.azure.data.cosmos.internal.Utils; +import com.fasterxml.jackson.core.JsonGenerator; + +import java.io.IOException; +import java.io.OutputStream; + +class BoolPartitionKeyComponent implements IPartitionKeyComponent { + + private final boolean value; + + public BoolPartitionKeyComponent(boolean value) { + this.value = value; + } + + @Override + public int CompareTo(IPartitionKeyComponent other) { + BoolPartitionKeyComponent otherBool = Utils.as(other, BoolPartitionKeyComponent.class); + if (otherBool == null) { + throw new IllegalArgumentException("other"); + } + + return (int) Math.signum((this.value ? 1 : 0) - (otherBool.value ? 1 : 0)); + } + + @Override + public int GetTypeOrdinal() { + return this.value ? PartitionKeyComponentType.TRUE.type : PartitionKeyComponentType.FALSE.type; + } + + @Override + public void JsonEncode(JsonGenerator writer) { + try { + writer.writeBoolean(this.value); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public void WriteForHashing(OutputStream outputStream) { + try { + outputStream.write((byte) (this.value ? PartitionKeyComponentType.TRUE.type + : PartitionKeyComponentType.FALSE.type)); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public void WriteForHashingV2(OutputStream outputStream) { + try { + outputStream.write((byte) (this.value ? PartitionKeyComponentType.TRUE.type + : PartitionKeyComponentType.FALSE.type)); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public void WriteForBinaryEncoding(OutputStream outputStream) { + try { + outputStream.write((byte) (this.value ? PartitionKeyComponentType.TRUE.type + : PartitionKeyComponentType.FALSE.type)); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public IPartitionKeyComponent Truncate() { + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/CaseInsensitiveHashMap.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/CaseInsensitiveHashMap.java new file mode 100644 index 0000000000000..dc000cd04c1ad --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/CaseInsensitiveHashMap.java @@ -0,0 +1,80 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import java.util.HashMap; +import java.util.Map; +import java.util.function.BiFunction; +import java.util.function.Function; + + +// TODO: commons-collections lib has CaseInsensitiveHashMap we should switch to that. +// https://commons.apache.org/proper/commons-collections/javadocs/api-3.2.2/org/apache/commons/collections/map/CaseInsensitiveMap.html +public class CaseInsensitiveHashMap extends HashMap { + + private static String safeToLower(String key) { + return key != null ? key.toLowerCase() : null; + } + + @Override + public V get(Object key) { + return super.get(safeToLower((String) key)); + } + + + @Override + public void putAll(Map m) { + super.putAll(m); + } + + @Override + public V put(String key, V value) { + return super.put(safeToLower(key), value); + } + + @Override + public V putIfAbsent(String key, V value) { + return super.putIfAbsent(safeToLower(key), value); + } + + @Override + public V compute(String key, BiFunction remappingFunction) { + return super.compute(safeToLower(key), remappingFunction); + } + + @Override + public V computeIfAbsent(String key, Function mappingFunction) { + return super.computeIfAbsent(safeToLower(key), mappingFunction); + } + + @Override + public V computeIfPresent(String key, BiFunction remappingFunction) { + return super.computeIfPresent(safeToLower(key), remappingFunction); + } + + @Override + public boolean containsKey(Object key) { + return super.containsKey(safeToLower((String) key)); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/CollectionRoutingMap.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/CollectionRoutingMap.java new file mode 100644 index 0000000000000..a180c1955aeaa --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/CollectionRoutingMap.java @@ -0,0 +1,55 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.azure.data.cosmos.internal.PartitionKeyRange; +import org.apache.commons.lang3.tuple.ImmutablePair; + +import java.util.Collection; +import java.util.List; + +/** + * Used internally in request routing in the Azure Cosmos DB database service. + */ +public interface CollectionRoutingMap { + List getOrderedPartitionKeyRanges(); + + PartitionKeyRange getRangeByEffectivePartitionKey(String effectivePartitionKeyValue); + + PartitionKeyRange getRangeByPartitionKeyRangeId(String partitionKeyRangeId); + + List getOverlappingRanges(Range range); + + List getOverlappingRanges(Collection> providedPartitionKeyRanges); + + PartitionKeyRange tryGetRangeByPartitionKeyRangeId(String partitionKeyRangeId); + + IServerIdentity tryGetInfoByPartitionKeyRangeId(String partitionKeyRangeId); + + boolean IsGone(String partitionKeyRangeId); + + String getCollectionUniqueId(); + + CollectionRoutingMap tryCombine(List> ranges); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/IPartitionKeyComponent.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/IPartitionKeyComponent.java new file mode 100644 index 0000000000000..e65fade4fe831 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/IPartitionKeyComponent.java @@ -0,0 +1,44 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.fasterxml.jackson.core.JsonGenerator; + +import java.io.OutputStream; + +interface IPartitionKeyComponent { + int CompareTo(IPartitionKeyComponent other); + + int GetTypeOrdinal(); + + void JsonEncode(JsonGenerator writer); + + void WriteForHashing(OutputStream outputStream); + + void WriteForHashingV2(OutputStream binaryWriter); + + void WriteForBinaryEncoding(OutputStream binaryWriter); + + IPartitionKeyComponent Truncate(); +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/IServerIdentity.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/IServerIdentity.java new file mode 100644 index 0000000000000..2ebb41e349f17 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/IServerIdentity.java @@ -0,0 +1,27 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +public interface IServerIdentity { +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/InMemoryCollectionRoutingMap.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/InMemoryCollectionRoutingMap.java new file mode 100644 index 0000000000000..89d14550f1394 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/InMemoryCollectionRoutingMap.java @@ -0,0 +1,257 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.azure.data.cosmos.internal.PartitionKeyRange; +import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.apache.commons.lang3.tuple.Pair; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.stream.Collectors; + +/** + * Used internally to cache partition key ranges of a collection in the Azure Cosmos DB database service. + */ +public class InMemoryCollectionRoutingMap implements CollectionRoutingMap { + private final Map> rangeById; + private final List orderedPartitionKeyRanges; + private final List> orderedRanges; + + private final Set goneRanges; + + private String collectionUniqueId; + + private InMemoryCollectionRoutingMap(Map> rangeById, + List orderedPartitionKeyRanges, + String collectionUniqueId) { + this.rangeById = rangeById; + this.orderedPartitionKeyRanges = orderedPartitionKeyRanges; + this.orderedRanges = orderedPartitionKeyRanges.stream().map( + range -> + new Range<>( + range.getMinInclusive(), + range.getMaxExclusive(), + true, + false)).collect(Collectors.toList()); + + this.collectionUniqueId = collectionUniqueId; + this.goneRanges = new HashSet<>(orderedPartitionKeyRanges.stream().flatMap(r -> CollectionUtils.emptyIfNull(r.getParents()).stream()).collect(Collectors.toSet())); + + } + + public static InMemoryCollectionRoutingMap tryCreateCompleteRoutingMap( + Iterable> ranges, String collectionUniqueId) { + + Map> rangeById = + new HashMap<>(); + + for (ImmutablePair range: ranges) { + rangeById.put(range.left.id(), range); + } + + List> sortedRanges = new ArrayList<>(rangeById.values()); + Collections.sort(sortedRanges, new MinPartitionKeyPairComparator()); + List orderedRanges = sortedRanges.stream().map(range -> range.left).collect(Collectors.toList()); + + if (!isCompleteSetOfRanges(orderedRanges)) { + return null; + } + + return new InMemoryCollectionRoutingMap(rangeById, orderedRanges, collectionUniqueId); + } + + private static boolean isCompleteSetOfRanges(List orderedRanges) { + boolean isComplete = false; + if (orderedRanges.size() > 0) { + PartitionKeyRange firstRange = orderedRanges.get(0); + PartitionKeyRange lastRange = orderedRanges.get(orderedRanges.size() - 1); + isComplete = firstRange.getMinInclusive() + .compareTo(PartitionKeyRange.MINIMUM_INCLUSIVE_EFFECTIVE_PARTITION_KEY) == 0; + isComplete &= lastRange.getMaxExclusive() + .compareTo(PartitionKeyRange.MAXIMUM_EXCLUSIVE_EFFECTIVE_PARTITION_KEY) == 0; + + for (int i = 1; i < orderedRanges.size(); i++) { + PartitionKeyRange previousRange = orderedRanges.get(i - 1); + PartitionKeyRange currentRange = orderedRanges.get(i); + isComplete &= previousRange.getMaxExclusive().compareTo(currentRange.getMinInclusive()) == 0; + + if (!isComplete) { + if (previousRange.getMaxExclusive().compareTo(currentRange.getMinInclusive()) > 0) { + throw new IllegalStateException("Ranges overlap"); + } + + break; + } + } + } + + return isComplete; + } + + public String getCollectionUniqueId() { + return collectionUniqueId; + } + + @Override + public List getOrderedPartitionKeyRanges() { + return this.orderedPartitionKeyRanges; + } + + @Override + public PartitionKeyRange getRangeByEffectivePartitionKey(String effectivePartitionKeyValue) { + if (PartitionKeyRange.MINIMUM_INCLUSIVE_EFFECTIVE_PARTITION_KEY.compareTo(effectivePartitionKeyValue) == 0) { + return this.orderedPartitionKeyRanges.get(0); + } + + if (PartitionKeyRange.MAXIMUM_EXCLUSIVE_EFFECTIVE_PARTITION_KEY.compareTo(effectivePartitionKeyValue) == 0) { + return null; + } + + int index = Collections.binarySearch(this.orderedRanges, Range.getPointRange(effectivePartitionKeyValue), + new Range.MinComparator()); + + if (index < 0) { + index = Math.max(0, -index - 2); + } + + return this.orderedPartitionKeyRanges.get(index); + } + + @Override + public PartitionKeyRange getRangeByPartitionKeyRangeId(String partitionKeyRangeId) { + ImmutablePair pair = this.rangeById.get(partitionKeyRangeId); + return pair == null ? null : pair.left; + } + + + @Override + public List getOverlappingRanges(Range range) { + return this.getOverlappingRanges(Collections.singletonList(range)); + } + + @Override + public List getOverlappingRanges(Collection> providedPartitionKeyRanges) { + if (providedPartitionKeyRanges == null) { + throw new IllegalArgumentException("providedPartitionKeyRanges"); + } + + Map partitionRanges = new TreeMap(); + + for (Range range : providedPartitionKeyRanges) { + int minIndex = Collections.binarySearch(this.orderedRanges, range, new Range.MinComparator()); + if (minIndex < 0) { + minIndex = Math.max(minIndex, -minIndex - 2); + } + + int maxIndex = Collections.binarySearch(this.orderedRanges, range, new Range.MaxComparator()); + if (maxIndex < 0) { + maxIndex = Math.min(this.orderedRanges.size() - 1, -maxIndex - 1); + } + + for (int i = minIndex; i <= maxIndex; ++i) { + if (Range.checkOverlapping(this.orderedRanges.get(i), range)) { + PartitionKeyRange partitionKeyRange = this.orderedPartitionKeyRanges.get(i); + partitionRanges.put(partitionKeyRange.getMinInclusive(), partitionKeyRange); + } + } + } + + return new ArrayList<>(partitionRanges.values()); + } + + + @Override + public PartitionKeyRange tryGetRangeByPartitionKeyRangeId(String partitionKeyRangeId) + { + Pair addresses; + addresses = this.rangeById.get(partitionKeyRangeId); + if (addresses != null) { + return addresses.getLeft(); + } + + return null; + } + + @Override + public IServerIdentity tryGetInfoByPartitionKeyRangeId(String partitionKeyRangeId) + { + Pair addresses; + addresses = this.rangeById.get(partitionKeyRangeId); + if (addresses != null) { + return addresses.getRight(); + } + + return null; + } + + @Override + public boolean IsGone(String partitionKeyRangeId) { + return this.goneRanges.contains(partitionKeyRangeId); + } + + private static class MinPartitionKeyPairComparator + implements Comparator> { + public int compare(ImmutablePair pair1, + ImmutablePair pair2) { + return pair1.left.getMinInclusive().compareTo(pair2.left.getMinInclusive()); + } + } + + + public CollectionRoutingMap tryCombine( + List> ranges) { + Set newGoneRanges = new HashSet<>(ranges.stream().flatMap(tuple -> CollectionUtils.emptyIfNull(tuple.getLeft().getParents()).stream()).collect(Collectors.toSet())); + newGoneRanges.addAll(this.goneRanges); + + Map> newRangeById = + this.rangeById.values().stream().filter(tuple -> !newGoneRanges.contains(tuple.left.id())).collect(Collectors. + toMap(tuple -> tuple.left.id(), tuple -> tuple)); + + for (ImmutablePair tuple : ranges.stream().filter(tuple -> !newGoneRanges.contains(tuple.getLeft().id())).collect(Collectors.toList())) { + newRangeById.put(tuple.getLeft().id(), tuple); + } + + List> sortedRanges = newRangeById.values().stream().collect(Collectors.toList()); + + Collections.sort(sortedRanges, new MinPartitionKeyPairComparator()); + + List newOrderedRanges = sortedRanges.stream().map(range -> range.left).collect(Collectors.toList()); + + if (!isCompleteSetOfRanges(newOrderedRanges)) { + return null; + } + + return new InMemoryCollectionRoutingMap(newRangeById, newOrderedRanges, this.getCollectionUniqueId()); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/InfinityPartitionKeyComponent.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/InfinityPartitionKeyComponent.java new file mode 100644 index 0000000000000..18d02d76c1f9c --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/InfinityPartitionKeyComponent.java @@ -0,0 +1,76 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.azure.data.cosmos.internal.Utils; +import com.fasterxml.jackson.core.JsonGenerator; + +import java.io.IOException; +import java.io.OutputStream; + +class InfinityPartitionKeyComponent implements IPartitionKeyComponent { + @Override + public int CompareTo(IPartitionKeyComponent other) { + InfinityPartitionKeyComponent otherInfinity = Utils.as(other, InfinityPartitionKeyComponent.class); + if (otherInfinity == null) { + throw new IllegalArgumentException("other"); + } + + return 0; + } + + @Override + public int GetTypeOrdinal() { + return PartitionKeyComponentType.INFINITY.type; + } + + @Override + public void JsonEncode(JsonGenerator writer) { + throw new UnsupportedOperationException(); + } + + @Override + public void WriteForHashing(OutputStream outputStream) { + throw new IllegalStateException(); + } + + @Override + public void WriteForHashingV2(OutputStream outputStream) { + throw new IllegalStateException(); + } + + @Override + public void WriteForBinaryEncoding(OutputStream outputStream) { + try { + outputStream.write((byte) PartitionKeyComponentType.INFINITY.type); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public IPartitionKeyComponent Truncate() { + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/Int128.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/Int128.java new file mode 100644 index 0000000000000..07cd07b6d877c --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/Int128.java @@ -0,0 +1,101 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + + +import java.math.BigInteger; + +public class Int128 { + + private final BigInteger value; + + private static final BigInteger MaxBigIntValue = + new BigInteger(new byte[] { + (byte) 0x80, (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00, + (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00 + }); + + public static final Int128 MaxValue = new Int128( + new BigInteger(new byte[] { + (byte) 0x7F, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, + (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, + }) + ); + + private Int128(BigInteger value) { + this.value = value.remainder(MaxBigIntValue); + } + + public Int128(int n) { + this(BigInteger.valueOf(n)); + } + + public Int128(byte[] data) { + if (data.length != 16) { + throw new IllegalArgumentException("data"); + } + + this.value = new BigInteger(data); + + if (this.value.compareTo(MaxValue.value) > 0) { + throw new IllegalArgumentException(); + } + } + + public static Int128 multiply(Int128 left, Int128 right) { + return new Int128(left.value.multiply(right.value)); + } + + public static Int128 add(Int128 left, Int128 right) { + return new Int128(left.value.add(right.value)); + } + + public static Int128 subtract(Int128 left, Int128 right) { + return new Int128(left.value.subtract(right.value)); + } + + public static Int128 div (Int128 left, Int128 right) { + return new Int128(left.value.divide(right.value)); + } + + public static boolean gt(Int128 left, Int128 right) { + return left.value.compareTo(right.value) > 0; + } + + public static boolean lt(Int128 left, Int128 right) { + return left.value.compareTo(right.value) < 0; + } + + public byte[] bytes() { + byte[] bytes = this.value.toByteArray(); + if (bytes.length < 16) { + byte[] paddedBytes = new byte[16]; + System.arraycopy(bytes, 0, paddedBytes, 0, bytes.length); + return paddedBytes; + } + + return bytes; + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/LocationCache.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/LocationCache.java new file mode 100644 index 0000000000000..78c2cf6d7ea72 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/LocationCache.java @@ -0,0 +1,582 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.internal.DatabaseAccount; +import com.azure.data.cosmos.internal.DatabaseAccountLocation; +import com.azure.data.cosmos.internal.Configs; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.Strings; +import com.azure.data.cosmos.internal.Utils; +import org.apache.commons.collections4.list.UnmodifiableList; +import org.apache.commons.collections4.map.CaseInsensitiveMap; +import org.apache.commons.collections4.map.UnmodifiableMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.URL; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.BiFunction; +import java.util.stream.Collectors; + +/** + * Implements the abstraction to resolve target location for geo-replicated DatabaseAccount + * with multiple writable and readable locations. + */ +public class LocationCache { + private final static Logger logger = LoggerFactory.getLogger(LocationCache.class); + + private final boolean enableEndpointDiscovery; + private final URL defaultEndpoint; + private final boolean useMultipleWriteLocations; + private final Object lockObject; + private final Duration unavailableLocationsExpirationTime; + private final ConcurrentHashMap locationUnavailabilityInfoByEndpoint; + + private DatabaseAccountLocationsInfo locationInfo; + + private Instant lastCacheUpdateTimestamp; + private boolean enableMultipleWriteLocations; + + public LocationCache( + List preferredLocations, + URL defaultEndpoint, + boolean enableEndpointDiscovery, + boolean useMultipleWriteLocations, + Configs configs) { + this.locationInfo = new DatabaseAccountLocationsInfo(preferredLocations, defaultEndpoint); + this.defaultEndpoint = defaultEndpoint; + this.enableEndpointDiscovery = enableEndpointDiscovery; + this.useMultipleWriteLocations = useMultipleWriteLocations; + + this.lockObject = new Object(); + + + this.locationUnavailabilityInfoByEndpoint = new ConcurrentHashMap<>(); + + this.lastCacheUpdateTimestamp = Instant.MIN; + this.enableMultipleWriteLocations = false; + this.unavailableLocationsExpirationTime = Duration.ofSeconds(configs.getUnavailableLocationsExpirationTimeInSeconds()); + } + + /** + * Gets list of read endpoints ordered by + * + * 1. Preferred location + * 2. Endpoint availability + * @return + */ + public UnmodifiableList getReadEndpoints() { + if (this.locationUnavailabilityInfoByEndpoint.size() > 0 + && unavailableLocationsExpirationTimePassed()) { + this.updateLocationCache(); + } + + return this.locationInfo.readEndpoints; + } + + /** + * Gets list of write endpoints ordered by + * 1. Preferred location + * 2. Endpoint availability + * @return + */ + public UnmodifiableList getWriteEndpoints() { + if (this.locationUnavailabilityInfoByEndpoint.size() > 0 + && unavailableLocationsExpirationTimePassed()) { + this.updateLocationCache(); + } + + return this.locationInfo.writeEndpoints; + } + + /** + * Marks the current location unavailable for read + */ + public void markEndpointUnavailableForRead(URL endpoint) { + this.markEndpointUnavailable(endpoint, OperationType.Read); + } + + /** + * Marks the current location unavailable for write + */ + public void markEndpointUnavailableForWrite(URL endpoint) { + this.markEndpointUnavailable(endpoint, OperationType.Write); + } + + /** + * Invoked when {@link DatabaseAccount} is read + * @param databaseAccount READ DatabaseAccount + */ + public void onDatabaseAccountRead(DatabaseAccount databaseAccount) { + this.updateLocationCache( + databaseAccount.getWritableLocations(), + databaseAccount.getReadableLocations(), + null, + BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); + } + + void onLocationPreferenceChanged(UnmodifiableList preferredLocations) { + this.updateLocationCache( + null, null , preferredLocations, null); + } + + /** + * Resolves request to service endpoint. + * 1. If this is a write request + * (a) If UseMultipleWriteLocations = true + * (i) For document writes, resolve to most preferred and available write endpoint. + * Once the endpoint is marked unavailable, it is moved to the end of available write endpoint. Current request will + * be retried on next preferred available write endpoint. + * (ii) For all other resources, always resolve to first/second (regardless of preferred locations) + * write endpoint in {@link DatabaseAccount#getWritableLocations()}. + * Endpoint of first write location in {@link DatabaseAccount#getWritableLocations()} is the only endpoint that supports + * write operation on all resource types (except during that region's failover). + * Only during manual failover, client would retry write on second write location in {@link DatabaseAccount#getWritableLocations()}. + * (b) Else resolve the request to first write endpoint in {@link DatabaseAccount#getWritableLocations()} OR + * second write endpoint in {@link DatabaseAccount#getWritableLocations()} in case of manual failover of that location. + * 2. Else resolve the request to most preferred available read endpoint (automatic failover for read requests) + * @param request Request for which endpoint is to be resolved + * @return Resolved endpoint + */ + public URL resolveServiceEndpoint(RxDocumentServiceRequest request) { + if(request.requestContext != null && request.requestContext.locationEndpointToRoute != null) { + return request.requestContext.locationEndpointToRoute; + } + + int locationIndex = Utils.getValueOrDefault(request.requestContext.locationIndexToRoute, 0); + + boolean usePreferredLocations = request.requestContext.usePreferredLocations != null ? request.requestContext.usePreferredLocations : true; + if(!usePreferredLocations || (request.getOperationType().isWriteOperation() && !this.canUseMultipleWriteLocations(request))) { + // For non-document resource types in case of client can use multiple write locations + // or when client cannot use multiple write locations, flip-flop between the + // first and the second writable region in DatabaseAccount (for manual failover) + DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; + + if(this.enableEndpointDiscovery && currentLocationInfo.availableWriteLocations.size() > 0) { + locationIndex = Math.min(locationIndex%2, currentLocationInfo.availableWriteLocations.size()-1); + String writeLocation = currentLocationInfo.availableWriteLocations.get(locationIndex); + return currentLocationInfo.availableWriteEndpointByLocation.get(writeLocation); + } else { + return this.defaultEndpoint; + } + } else { + UnmodifiableList endpoints = request.getOperationType().isWriteOperation()? this.getWriteEndpoints() : this.getReadEndpoints(); + return endpoints.get(locationIndex % endpoints.size()); + } + } + + public boolean shouldRefreshEndpoints(Utils.ValueHolder canRefreshInBackground) { + canRefreshInBackground.v = true; + DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; + String mostPreferredLocation = Utils.firstOrDefault(currentLocationInfo.preferredLocations); + + // we should schedule refresh in background if we are unable to target the user's most preferredLocation. + if (this.enableEndpointDiscovery) { + + boolean shouldRefresh = this.useMultipleWriteLocations && !this.enableMultipleWriteLocations; + if (!Strings.isNullOrEmpty(mostPreferredLocation)) { + Utils.ValueHolder mostPreferredReadEndpointHolder = new Utils.ValueHolder<>(); + List readLocationEndpoints = currentLocationInfo.readEndpoints; + logger.debug("getReadEndpoints [{}]", readLocationEndpoints); + + if (Utils.tryGetValue(currentLocationInfo.availableReadEndpointByLocation, mostPreferredLocation, mostPreferredReadEndpointHolder)) { + logger.debug("most preferred is [{}], most preferred available is [{}]", + mostPreferredLocation, mostPreferredReadEndpointHolder.v); + if (!areEqual(mostPreferredReadEndpointHolder.v, readLocationEndpoints.get(0))) { + // For reads, we can always refresh in background as we can alternate to + // other available read endpoints + logger.debug("shouldRefreshEndpoints = true, most preferred location [{}]" + + " is not available for read.", mostPreferredLocation); + return true; + } + + logger.debug("most preferred is [{}], and most preferred available [{}] are the same", + mostPreferredLocation, mostPreferredReadEndpointHolder.v); + } + else { + logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] " + + "is not in available read locations.", mostPreferredLocation); + return true; + } + } + + Utils.ValueHolder mostPreferredWriteEndpointHolder = new Utils.ValueHolder<>(); + List writeLocationEndpoints = currentLocationInfo.writeEndpoints; + logger.debug("getWriteEndpoints [{}]", writeLocationEndpoints); + + if (!this.canUseMultipleWriteLocations()) { + if (this.isEndpointUnavailable(writeLocationEndpoints.get(0), OperationType.Write)) { + // Since most preferred write endpoint is unavailable, we can only refresh in background if + // we have an alternate write endpoint + canRefreshInBackground.v = writeLocationEndpoints.size() > 1; + logger.debug("shouldRefreshEndpoints = true, most preferred location " + + "[{}] endpoint [{}] is not available for write. canRefreshInBackground = [{}]", + mostPreferredLocation, + writeLocationEndpoints.get(0), + canRefreshInBackground.v); + + return true; + } else { + logger.debug("shouldRefreshEndpoints: false, [{}] is available for Write", writeLocationEndpoints.get(0)); + return shouldRefresh; + } + } else if (!Strings.isNullOrEmpty(mostPreferredLocation)) { + if (Utils.tryGetValue(currentLocationInfo.availableWriteEndpointByLocation, mostPreferredLocation, mostPreferredWriteEndpointHolder)) { + shouldRefresh = ! areEqual(mostPreferredWriteEndpointHolder.v,writeLocationEndpoints.get(0)); + + if (shouldRefresh) { + logger.debug("shouldRefreshEndpoints: true, write endpoint [{}] is not the same as most preferred [{}]", + writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); + } else { + logger.debug("shouldRefreshEndpoints: false, write endpoint [{}] is the same as most preferred [{}]", + writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); + } + + return shouldRefresh; + } else { + logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] is not in available write locations", + mostPreferredLocation); + return true; + } + } else { + logger.debug("shouldRefreshEndpoints: false, mostPreferredLocation [{}] is empty", mostPreferredLocation); + return shouldRefresh; + } + } else { + logger.debug("shouldRefreshEndpoints: false, endpoint discovery not enabled"); + return false; + } + } + private boolean areEqual(URL url1, URL url2) { + return url1.equals(url2); + } + + private void clearStaleEndpointUnavailabilityInfo() { + if (!this.locationUnavailabilityInfoByEndpoint.isEmpty()) { + List unavailableEndpoints = new ArrayList<>(this.locationUnavailabilityInfoByEndpoint.keySet()); + + for (URL unavailableEndpoint: unavailableEndpoints) { + Utils.ValueHolder unavailabilityInfoHolder = new Utils.ValueHolder<>(); + Utils.ValueHolder removedHolder = new Utils.ValueHolder<>(); + + if (Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, unavailabilityInfoHolder) + && + durationPassed(Instant.now(), unavailabilityInfoHolder.v.LastUnavailabilityCheckTimeStamp, + this.unavailableLocationsExpirationTime) + + && Utils.tryRemove(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, removedHolder)) { + logger.debug( + "Removed endpoint [{}] unavailable for operations [{}] from unavailableEndpoints", + unavailableEndpoint, + unavailabilityInfoHolder.v.UnavailableOperations); + } + } + } + } + + private boolean isEndpointUnavailable(URL endpoint, OperationType expectedAvailableOperations) { + Utils.ValueHolder unavailabilityInfoHolder = new Utils.ValueHolder<>(); + + if (expectedAvailableOperations == OperationType.None + || !Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, endpoint, unavailabilityInfoHolder) + || !unavailabilityInfoHolder.v.UnavailableOperations.supports(expectedAvailableOperations)) { + return false; + } else { + if (durationPassed(Instant.now(), unavailabilityInfoHolder.v.LastUnavailabilityCheckTimeStamp, this.unavailableLocationsExpirationTime)) { + return false; + } else { + logger.debug( + "Endpoint [{}] unavailable for operations [{}] present in unavailableEndpoints", + endpoint, + unavailabilityInfoHolder.v.UnavailableOperations); + // Unexpired entry present. Endpoint is unavailable + return true; + } + } + } + + private void markEndpointUnavailable( + URL unavailableEndpoint, + OperationType unavailableOperationType) { + Instant currentTime = Instant.now(); + LocationUnavailabilityInfo updatedInfo = this.locationUnavailabilityInfoByEndpoint.compute( + unavailableEndpoint, + new BiFunction() { + @Override + public LocationUnavailabilityInfo apply(URL url, LocationUnavailabilityInfo info) { + + if (info == null) { + // not already present, add + return new LocationUnavailabilityInfo(currentTime, unavailableOperationType); + } else { + // already present, update + info.LastUnavailabilityCheckTimeStamp = currentTime; + info.UnavailableOperations = OperationType.combine(info.UnavailableOperations, unavailableOperationType); + return info; + } + + } + }); + + this.updateLocationCache(); + + logger.debug( + "Endpoint [{}] unavailable for [{}] added/updated to unavailableEndpoints with timestamp [{}]", + unavailableEndpoint, + unavailableOperationType, + updatedInfo.LastUnavailabilityCheckTimeStamp); + } + + private void updateLocationCache(){ + updateLocationCache(null, null, null, null); + } + + private void updateLocationCache( + Iterable writeLocations, + Iterable readLocations, + UnmodifiableList preferenceList, + Boolean enableMultipleWriteLocations) { + synchronized (this.lockObject) { + DatabaseAccountLocationsInfo nextLocationInfo = new DatabaseAccountLocationsInfo(this.locationInfo); + logger.debug("updating location cache ..., current readLocations [{}], current writeLocations [{}]", + nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); + + if (preferenceList != null) { + nextLocationInfo.preferredLocations = preferenceList; + } + + if (enableMultipleWriteLocations != null) { + this.enableMultipleWriteLocations = enableMultipleWriteLocations; + } + + this.clearStaleEndpointUnavailabilityInfo(); + + if (readLocations != null) { + Utils.ValueHolder> out = Utils.ValueHolder.initialize(nextLocationInfo.availableReadLocations); + nextLocationInfo.availableReadEndpointByLocation = this.getEndpointByLocation(readLocations, out); + nextLocationInfo.availableReadLocations = out.v; + } + + if (writeLocations != null) { + Utils.ValueHolder> out = Utils.ValueHolder.initialize(nextLocationInfo.availableWriteLocations); + nextLocationInfo.availableWriteEndpointByLocation = this.getEndpointByLocation(writeLocations, out); + nextLocationInfo.availableWriteLocations = out.v; + } + + nextLocationInfo.writeEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableWriteEndpointByLocation, nextLocationInfo.availableWriteLocations, OperationType.Write, this.defaultEndpoint); + nextLocationInfo.readEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableReadEndpointByLocation, nextLocationInfo.availableReadLocations, OperationType.Read, nextLocationInfo.writeEndpoints.get(0)); + this.lastCacheUpdateTimestamp = Instant.now(); + + logger.debug("updating location cache finished, new readLocations [{}], new writeLocations [{}]", + nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); + this.locationInfo = nextLocationInfo; + } + } + + private UnmodifiableList getPreferredAvailableEndpoints(UnmodifiableMap endpointsByLocation, + UnmodifiableList orderedLocations, + OperationType expectedAvailableOperation, + URL fallbackEndpoint) { + List endpoints = new ArrayList<>(); + DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; + // if enableEndpointDiscovery is false, we always use the defaultEndpoint that user passed in during documentClient init + if (this.enableEndpointDiscovery) { + if (this.canUseMultipleWriteLocations() || expectedAvailableOperation.supports(OperationType.Read)) { + List unavailableEndpoints = new ArrayList<>(); + + // When client can not use multiple write locations, preferred locations list should only be used + // determining read endpoints order. + // If client can use multiple write locations, preferred locations list should be used for determining + // both read and write endpoints order. + + for (String location: currentLocationInfo.preferredLocations) { + Utils.ValueHolder endpoint = new Utils.ValueHolder<>(); + if (Utils.tryGetValue(endpointsByLocation, location, endpoint)) { + if (this.isEndpointUnavailable(endpoint.v, expectedAvailableOperation)) { + unavailableEndpoints.add(endpoint.v); + } else { + endpoints.add(endpoint.v); + } + } + } + + if (endpoints.isEmpty()) { + endpoints.add(fallbackEndpoint); + } + + endpoints.addAll(unavailableEndpoints); + } else { + for (String location : orderedLocations) { + + Utils.ValueHolder endpoint = Utils.ValueHolder.initialize(null); + if (!Strings.isNullOrEmpty(location) && // location is empty during manual failover + Utils.tryGetValue(endpointsByLocation, location, endpoint)) { + endpoints.add(endpoint.v); + } + } + } + } + + if (endpoints.isEmpty()) { + endpoints.add(fallbackEndpoint); + } + + return new UnmodifiableList(endpoints); + } + + private UnmodifiableMap getEndpointByLocation(Iterable locations, + Utils.ValueHolder> orderedLocations) { + Map endpointsByLocation = new CaseInsensitiveMap<>(); + List parsedLocations = new ArrayList<>(); + + for (DatabaseAccountLocation location: locations) { + if (!Strings.isNullOrEmpty(location.getName())) { + try { + URL endpoint = new URL(location.getEndpoint().toLowerCase()); + endpointsByLocation.put(location.getName().toLowerCase(), endpoint); + parsedLocations.add(location.getName()); + + } catch (Exception e) { + logger.warn("GetAvailableEndpointsByLocation() - skipping add for location = [{}] as it is location name is either empty or endpoint is malformed [{}]", + location.getName(), + location.getEndpoint()); + } + } + } + + orderedLocations.v = new UnmodifiableList(parsedLocations); + return (UnmodifiableMap) UnmodifiableMap.unmodifiableMap(endpointsByLocation); + } + + private boolean canUseMultipleWriteLocations() { + return this.useMultipleWriteLocations && this.enableMultipleWriteLocations; + } + + public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { + return this.canUseMultipleWriteLocations() && + (request.getResourceType() == ResourceType.Document || + (request.getResourceType() == ResourceType.StoredProcedure && request.getOperationType() == + com.azure.data.cosmos.internal.OperationType.ExecuteJavaScript)); + } + + + private class LocationUnavailabilityInfo { + LocationUnavailabilityInfo(Instant instant, OperationType type) { + this.LastUnavailabilityCheckTimeStamp = instant; + this.UnavailableOperations = type; + } + + public Instant LastUnavailabilityCheckTimeStamp; + public OperationType UnavailableOperations; + } + + private enum OperationType { + None(0x0), + Read(0x1), + Write(0x2), + ReadAndWrite(0x3); + + private final int flag; + + public boolean hasReadFlag() { + return (flag & Read.flag) != 0; + } + + public boolean hasWriteFlag() { + return (flag & Write.flag) != 0; + } + + public static OperationType combine(OperationType t1, OperationType t2) { + switch (t1.flag | t2.flag) { + case 0x0: + return None; + case 0x1: + return Read; + case 0x2: + return Write; + default: + return ReadAndWrite; + } + } + + public boolean supports(OperationType type) { + return (flag & type.flag) != 0; + } + + OperationType(int flag) { + this.flag = flag; + } + } + + private boolean durationPassed(Instant end, Instant start, Duration duration) { + return end.minus(duration).isAfter(start); + } + + private boolean unavailableLocationsExpirationTimePassed() { + return durationPassed(Instant.now(), this.lastCacheUpdateTimestamp, this.unavailableLocationsExpirationTime); + } + + class DatabaseAccountLocationsInfo { + private UnmodifiableList preferredLocations; + // lower-case region + private UnmodifiableList availableWriteLocations; + // lower-case region + private UnmodifiableList availableReadLocations; + private UnmodifiableMap availableWriteEndpointByLocation; + private UnmodifiableMap availableReadEndpointByLocation; + + private UnmodifiableList writeEndpoints; + private UnmodifiableList readEndpoints; + + public DatabaseAccountLocationsInfo(List preferredLocations, + URL defaultEndpoint) { + this.preferredLocations = new UnmodifiableList<>(preferredLocations.stream().map(loc -> loc.toLowerCase()).collect(Collectors.toList())); + this.availableWriteEndpointByLocation = (UnmodifiableMap) UnmodifiableMap.unmodifiableMap(new CaseInsensitiveMap<>()); + this.availableReadEndpointByLocation = (UnmodifiableMap) UnmodifiableMap.unmodifiableMap(new CaseInsensitiveMap<>()); + this.availableReadLocations = new UnmodifiableList<>(Collections.emptyList()); + this.availableWriteLocations = new UnmodifiableList<>(Collections.emptyList()); + this.readEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); + this.writeEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); + } + + public DatabaseAccountLocationsInfo(DatabaseAccountLocationsInfo other) { + this.preferredLocations = other.preferredLocations; + this.availableWriteLocations = other.availableWriteLocations; + this.availableReadLocations = other.availableReadLocations; + this.availableWriteEndpointByLocation = other.availableWriteEndpointByLocation; + this.availableReadEndpointByLocation = other.availableReadEndpointByLocation; + this.writeEndpoints = other.writeEndpoints; + this.readEndpoints = other.readEndpoints; + } + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/LocationHelper.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/LocationHelper.java new file mode 100644 index 0000000000000..857da425275f5 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/LocationHelper.java @@ -0,0 +1,68 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import org.apache.commons.lang3.StringUtils; + +import java.net.URL; + +public class LocationHelper { + /** + * For example, for https://contoso.documents.azure.com:443/ and "West US", this will return https://contoso-westus.documents.azure.com:443/ + * NOTE: This ONLY called by client first boot when the input endpoint is not available. + * + * @param serviceEndpoint + * @param location + * @return + */ + public static URL getLocationEndpoint(URL serviceEndpoint, String location) { + + // Split the host into 2 parts seperated by '.' + // For example, "contoso.documents.azure.com" is separated into "contoso" and "documents.azure.com" + // If the host doesn't contains '.', this will return the host as is, as the only element + String[] hostParts = StringUtils.split(serviceEndpoint.getHost(), ".", 2); + + String host; + if (hostParts.length != 0) { + // hostParts[0] will be the global account name + hostParts[0] = hostParts[0] + "-" + dataCenterToUriPostfix(location); + + // if hostParts has only one element, '.' is not included in the returned string + host = String.join(".", hostParts); + } else { + host = serviceEndpoint.getHost(); + } + + try { + return new URL(serviceEndpoint.getProtocol(), host, serviceEndpoint.getPort(), serviceEndpoint.getFile()); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private static String dataCenterToUriPostfix(String dataCenter) { + return dataCenter.replace(" ", ""); + } +} + diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/MaxNumberPartitionKeyComponent.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/MaxNumberPartitionKeyComponent.java new file mode 100644 index 0000000000000..cc21423e379fb --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/MaxNumberPartitionKeyComponent.java @@ -0,0 +1,78 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.azure.data.cosmos.internal.Utils; +import com.fasterxml.jackson.core.JsonGenerator; + +import java.io.IOException; +import java.io.OutputStream; + +class MaxNumberPartitionKeyComponent implements IPartitionKeyComponent { + public static final MaxNumberPartitionKeyComponent VALUE = new MaxNumberPartitionKeyComponent(); + + @Override + public int CompareTo(IPartitionKeyComponent other) { + MaxNumberPartitionKeyComponent otherMaxNumber = Utils.as(other, MaxNumberPartitionKeyComponent.class); + if (otherMaxNumber == null) { + throw new IllegalArgumentException("other"); + } + + return 0; + } + + @Override + public int GetTypeOrdinal() { + return PartitionKeyComponentType.MAXNUMBER.ordinal(); + } + + @Override + public void JsonEncode(JsonGenerator writer) { + PartitionKeyInternal.PartitionKeyInternalJsonSerializer.jsonEncode(this, writer); + } + + @Override + public void WriteForHashing(OutputStream outputStream) { + throw new UnsupportedOperationException(); + } + + @Override + public void WriteForHashingV2(OutputStream outputStream) { + throw new UnsupportedOperationException(); + } + + @Override + public void WriteForBinaryEncoding(OutputStream outputStream) { + try { + outputStream.write((byte) PartitionKeyComponentType.MAXNUMBER.type); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public IPartitionKeyComponent Truncate() { + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/MaxStringPartitionKeyComponent.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/MaxStringPartitionKeyComponent.java new file mode 100644 index 0000000000000..f825135217885 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/MaxStringPartitionKeyComponent.java @@ -0,0 +1,80 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.azure.data.cosmos.internal.Utils; +import com.fasterxml.jackson.core.JsonGenerator; + +import java.io.IOException; +import java.io.OutputStream; + +class MaxStringPartitionKeyComponent implements IPartitionKeyComponent { + public static final MaxStringPartitionKeyComponent VALUE = new MaxStringPartitionKeyComponent(); + + @Override + public int CompareTo(IPartitionKeyComponent other) { + MaxStringPartitionKeyComponent otherMaxString = Utils.as(other, MaxStringPartitionKeyComponent.class); + if (otherMaxString == null) { + throw new IllegalArgumentException("other"); + } + + return 0; + } + + @Override + public int GetTypeOrdinal() { + return PartitionKeyComponentType.MAXSTRING.ordinal(); + } + + @Override + public void JsonEncode(JsonGenerator writer) { + PartitionKeyInternal.PartitionKeyInternalJsonSerializer.jsonEncode(this, writer); + } + + @Override + public void WriteForHashing(OutputStream outputStream) { + throw new UnsupportedOperationException(); + } + + @Override + public void WriteForHashingV2(OutputStream outputStream) { + throw new UnsupportedOperationException(); + } + + + @Override + public void WriteForBinaryEncoding(OutputStream outputStream) { + try { + outputStream.write((byte) PartitionKeyComponentType.MAXSTRING.type); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public IPartitionKeyComponent Truncate() { + return this; + } + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/MinNumberPartitionKeyComponent.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/MinNumberPartitionKeyComponent.java new file mode 100644 index 0000000000000..56c6063dfe240 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/MinNumberPartitionKeyComponent.java @@ -0,0 +1,78 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.azure.data.cosmos.internal.Utils; +import com.fasterxml.jackson.core.JsonGenerator; + +import java.io.IOException; +import java.io.OutputStream; + +class MinNumberPartitionKeyComponent implements IPartitionKeyComponent { + public static final MinNumberPartitionKeyComponent VALUE = new MinNumberPartitionKeyComponent(); + + @Override + public int CompareTo(IPartitionKeyComponent other) { + MinNumberPartitionKeyComponent otherMinNumber = Utils.as(other, MinNumberPartitionKeyComponent.class); + if (otherMinNumber == null) { + throw new IllegalArgumentException("other"); + } + + return 0; + } + + @Override + public int GetTypeOrdinal() { + return PartitionKeyComponentType.MINNUMBER.ordinal(); + } + + @Override + public void JsonEncode(JsonGenerator writer) { + PartitionKeyInternal.PartitionKeyInternalJsonSerializer.jsonEncode(this, writer); + } + + @Override + public void WriteForHashing(OutputStream outputStream) { + throw new UnsupportedOperationException(); + } + + @Override + public void WriteForHashingV2(OutputStream outputStream) { + throw new UnsupportedOperationException(); + } + + @Override + public void WriteForBinaryEncoding(OutputStream outputStream) { + try { + outputStream.write((byte) PartitionKeyComponentType.MINNUMBER.type); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public IPartitionKeyComponent Truncate() { + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/MinStringPartitionKeyComponent.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/MinStringPartitionKeyComponent.java new file mode 100644 index 0000000000000..21ca27251866f --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/MinStringPartitionKeyComponent.java @@ -0,0 +1,79 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.azure.data.cosmos.internal.Utils; +import com.fasterxml.jackson.core.JsonGenerator; + +import java.io.IOException; +import java.io.OutputStream; + +class MinStringPartitionKeyComponent implements IPartitionKeyComponent { + public static final MinStringPartitionKeyComponent VALUE = new MinStringPartitionKeyComponent(); + + @Override + public int CompareTo(IPartitionKeyComponent other) { + MinStringPartitionKeyComponent otherMinString = Utils.as(other, MinStringPartitionKeyComponent.class); + if (otherMinString == null) { + throw new IllegalArgumentException("other"); + } + + return 0; + } + + @Override + public int GetTypeOrdinal() { + return PartitionKeyComponentType.MINSTRING.ordinal(); + } + + @Override + public void JsonEncode(JsonGenerator writer) { + PartitionKeyInternal.PartitionKeyInternalJsonSerializer.jsonEncode(this, writer); + } + + @Override + public void WriteForHashing(OutputStream outputStream) { + throw new UnsupportedOperationException(); + } + + @Override + public void WriteForHashingV2(OutputStream outputStream) { + throw new UnsupportedOperationException(); + } + + + @Override + public void WriteForBinaryEncoding(OutputStream outputStream) { + try { + outputStream.write((byte) PartitionKeyComponentType.MINSTRING.type); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public IPartitionKeyComponent Truncate() { + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/MurmurHash3_128.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/MurmurHash3_128.java new file mode 100644 index 0000000000000..c7214375ff803 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/MurmurHash3_128.java @@ -0,0 +1,156 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +public class MurmurHash3_128 { + + public static UInt128 hash128(byte[] bytes) { + return hash128(bytes, bytes.length, new UInt128(0, 0)); + } + + public static UInt128 hash128(byte[] bytes, int length, UInt128 seed) { + final long c1 = 0x87c37b91114253d5L; + final long c2 = 0x4cf5ad432745937fL; + + long h1 = seed.high; + long h2 = seed.low; + + // body + int position; + for (position = 0; position < length - 15; position += 16) { + long k1 = getLittleEndianLong(bytes, position); + long k2 = getLittleEndianLong(bytes, position + 8); + + // k1, h1 + k1 *= c1; + k1 = rotateLeft64(k1, 31); + k1 *= c2; + + h1 ^= k1; + h1 = rotateLeft64(h1, 27); + h1 += h2; + h1 = h1 * 5 + 0x52dce729; + + // k2, h2 + k2 *= c2; + k2 = rotateLeft64(k2, 33); + k2 *= c1; + + h2 ^= k2; + h2 = rotateLeft64(h2, 31); + h2 += h1; + h2 = h2 * 5 + 0x38495ab5; + } + + + { + // tail + long k1 = 0; + long k2 = 0; + + int n = length & 15; + if (n >= 15) k2 ^= (bytes[position + 14] & 0xffL) << 48; + if (n >= 14) k2 ^= (bytes[position + 13] & 0xffL) << 40; + if (n >= 13) k2 ^= (bytes[position + 12] & 0xffL) << 32; + if (n >= 12) k2 ^= (bytes[position + 11] & 0xffL) << 24; + if (n >= 11) k2 ^= (bytes[position + 10] & 0xffL) << 16; + if (n >= 10) k2 ^= (bytes[position + 9] & 0xffL) << 8; + if (n >= 9) k2 ^= (bytes[position + 8] & 0xffL) << 0; + + k2 *= c2; + k2 = rotateLeft64(k2, 33); + k2 *= c1; + h2 ^= k2; + + if (n >= 8) k1 ^= (bytes[position + 7] & 0xffL) << 56; + if (n >= 7) k1 ^= (bytes[position + 6] & 0xffL) << 48; + if (n >= 6) k1 ^= (bytes[position + 5] & 0xffL) << 40; + if (n >= 5) k1 ^= (bytes[position + 4] & 0xffL) << 32; + if (n >= 4) k1 ^= (bytes[position + 3] & 0xffL) << 24; + if (n >= 3) k1 ^= (bytes[position + 2] & 0xffL) << 16; + if (n >= 2) k1 ^= (bytes[position + 1] & 0xffL) << 8; + if (n >= 1) k1 ^= (bytes[position + 0] & 0xffL) << 0; + + k1 *= c1; + k1 = rotateLeft64(k1, 31); + k1 *= c2; + h1 ^= k1; + } + + // finalization + h1 ^= length; + h2 ^= length; + + h1 += h2; + h2 += h1; + + // h1 + h1 ^= h1 >>> 33; + h1 *= 0xff51afd7ed558ccdL; + h1 ^= h1 >>> 33; + h1 *= 0xc4ceb9fe1a85ec53L; + h1 ^= h1 >>> 33; + + // h2 + h2 ^= h2 >>> 33; + h2 *= 0xff51afd7ed558ccdL; + h2 ^= h2 >>> 33; + h2 *= 0xc4ceb9fe1a85ec53L; + h2 ^= h2 >>> 33; + + h1 += h2; + h2 += h1; + + h1 = Long.reverseBytes(h1); + h2 = Long.reverseBytes(h2); + + return new UInt128(h1, h2); + } + + + private static int rotateLeft32(int n, int numBits) { + assert numBits < 32; + return Integer.rotateLeft(n, numBits); + } + + private static long rotateLeft64(long n, int numBits) { + assert numBits < 64; + return Long.rotateLeft(n, numBits); + } + + private static final long getLittleEndianLong(byte[] bytes, int offset) { + return ((long) bytes[offset + 7] << 56) // no mask needed + | ((bytes[offset + 6] & 0xffL) << 48) + | ((bytes[offset + 5] & 0xffL) << 40) + | ((bytes[offset + 4] & 0xffL) << 32) + | ((bytes[offset + 3] & 0xffL) << 24) + | ((bytes[offset + 2] & 0xffL) << 16) + | ((bytes[offset + 1] & 0xffL) << 8) + | ((bytes[offset] & 0xffL)); + } + + private static int intAsLittleIndian(byte[] bytes, int i) { + return (bytes[i] & 0xff) | ((bytes[i + 1] & 0xff) << 8) | ((bytes[i + 2] & 0xff) << 16) | (bytes[i + 3] << 24); + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/MurmurHash3_32.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/MurmurHash3_32.java new file mode 100644 index 0000000000000..b5c723f177afb --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/MurmurHash3_32.java @@ -0,0 +1,96 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +/* + * The MurmurHash3 algorithm was created by Austin Appleby and placed in the public domain. + * This java port was authored by Yonik Seeley and also placed into the public domain. + * The author hereby disclaims copyright to this source code. + *

+ * This produces exactly the same hash values as the final C++ + * version of MurmurHash3 and is thus suitable for producing the same hash values across + * platforms. + *

+ * The 32 bit x86 version of this hash should be the fastest variant for relatively short keys like ids. + * See http://github.com/yonik/java_util for future updates to this file. + */ +public class MurmurHash3_32 { + /** + * Returns the MurmurHash3_x86_32 hash. + * + * @param data a byte array containing the data to be hashed + * @param len an integer indicating the length of data + * @param seed an integer to be used as hash seed + * @return the hash value + */ + public static int hash(byte[] data, int len, int seed) { + final int c1 = 0xcc9e2d51; + final int c2 = 0x1b873593; + + int h1 = seed; + int roundedEnd = (len & 0xfffffffc); // round down to 4 byte block + + for (int i = 0; i < roundedEnd; i += 4) { + // little endian load order + int k1 = (data[i] & 0xff) | ((data[i + 1] & 0xff) << 8) | ((data[i + 2] & 0xff) << 16) | (data[i + 3] << 24); + k1 *= c1; + k1 = (k1 << 15) | (k1 >>> 17); // ROTL32(k1,15); + k1 *= c2; + + h1 ^= k1; + h1 = (h1 << 13) | (h1 >>> 19); // ROTL32(h1,13); + h1 = h1 * 5 + 0xe6546b64; + } + + // tail + int k1 = 0; + + switch (len & 0x03) { + case 3: + k1 = (data[roundedEnd + 2] & 0xff) << 16; + // fallthrough + case 2: + k1 |= (data[roundedEnd + 1] & 0xff) << 8; + // fallthrough + case 1: + k1 |= (data[roundedEnd] & 0xff); + k1 *= c1; + k1 = (k1 << 15) | (k1 >>> 17); // ROTL32(k1,15); + k1 *= c2; + h1 ^= k1; + } + + // finalization + h1 ^= len; + + // fmix(h1); + h1 ^= h1 >>> 16; + h1 *= 0x85ebca6b; + h1 ^= h1 >>> 13; + h1 *= 0xc2b2ae35; + h1 ^= h1 >>> 16; + + return h1; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/NullPartitionKeyComponent.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/NullPartitionKeyComponent.java new file mode 100644 index 0000000000000..5f5a4b9848b86 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/NullPartitionKeyComponent.java @@ -0,0 +1,91 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.azure.data.cosmos.internal.Utils; +import com.fasterxml.jackson.core.JsonGenerator; + +import java.io.IOException; +import java.io.OutputStream; + +class NullPartitionKeyComponent implements IPartitionKeyComponent { + + public static final NullPartitionKeyComponent VALUE = new NullPartitionKeyComponent(); + + @Override + public int CompareTo(IPartitionKeyComponent other) { + NullPartitionKeyComponent otherMinString = Utils.as(other, NullPartitionKeyComponent.class); + if (otherMinString == null) { + throw new IllegalArgumentException("other"); + } + + return 0; + } + + @Override + public int GetTypeOrdinal() { + return PartitionKeyComponentType.NULL.type; + } + + @Override + public void JsonEncode(JsonGenerator writer) { + try { + writer.writeObject(null); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public void WriteForHashing(OutputStream outputStream) { + try { + outputStream.write((byte) PartitionKeyComponentType.NULL.type); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public void WriteForHashingV2(OutputStream outputStream) { + try { + outputStream.write((byte) PartitionKeyComponentType.NULL.type); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public void WriteForBinaryEncoding(OutputStream outputStream) { + try { + outputStream.write((byte) PartitionKeyComponentType.NULL.type); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public IPartitionKeyComponent Truncate() { + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/NumberPartitionKeyComponent.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/NumberPartitionKeyComponent.java new file mode 100644 index 0000000000000..4fa9d70aa7d1c --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/NumberPartitionKeyComponent.java @@ -0,0 +1,141 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.azure.data.cosmos.internal.Utils; +import com.fasterxml.jackson.core.JsonGenerator; + +import java.io.IOException; +import java.io.OutputStream; + +/** + * Used internally to represent a number component in the partition key of the Azure Cosmos DB database service. + */ +public class NumberPartitionKeyComponent implements IPartitionKeyComponent { + + public static final NumberPartitionKeyComponent Zero = new NumberPartitionKeyComponent(0); + private final double value; + + public NumberPartitionKeyComponent(double value) { + this.value = value; + } + + private static byte[] doubleToByteArray(double d) { + byte[] output = new byte[8]; + long lng = Double.doubleToLongBits(d); + for (int i = 0; i < 8; i++) { + output[i] = (byte) ((lng >> (i * 8)) & 0xff); + } + return output; + } + + private static long EncodeDoubleAsUInt64(double value) { + long rawLongBits = Double.doubleToRawLongBits(value); + long mask = 0x8000000000000000L; + return Long.compareUnsigned(rawLongBits, mask) < 0 + ? rawLongBits ^ mask + : (~rawLongBits) + 1; + } + + @Override + public int CompareTo(IPartitionKeyComponent other) { + NumberPartitionKeyComponent otherBool = Utils.as(other, NumberPartitionKeyComponent.class); + if (otherBool == null) { + throw new IllegalArgumentException("other"); + } + + return Double.compare(this.value, ((NumberPartitionKeyComponent) other).value); + } + + @Override + public int GetTypeOrdinal() { + return PartitionKeyComponentType.NUMBER.type; + } + + @Override + public void JsonEncode(JsonGenerator writer) { + try { + writer.writeNumber(String.valueOf(value)); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public void WriteForHashing(OutputStream outputStream) { + try { + outputStream.write((byte) PartitionKeyComponentType.NUMBER.type); + outputStream.write(doubleToByteArray(this.value)); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public void WriteForHashingV2(OutputStream outputStream) { + try { + outputStream.write((byte) PartitionKeyComponentType.NUMBER.type); + outputStream.write(doubleToByteArray(this.value)); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public void WriteForBinaryEncoding(OutputStream outputStream) { + try { + outputStream.write((byte) PartitionKeyComponentType.NUMBER.type); + + long payload = NumberPartitionKeyComponent.EncodeDoubleAsUInt64(this.value); + + // Encode first chunk with 8-bits of payload + outputStream.write((byte) (payload >> (64 - 8))); + payload <<= 8; + + // Encode remaining chunks with 7 bits of payload followed by single "1" bit each. + byte byteToWrite = 0; + boolean firstIteration = true; + do { + if (!firstIteration) { + outputStream.write(byteToWrite); + } else { + firstIteration = false; + } + + byteToWrite = (byte) ((payload >> (64 - 8)) | 0x01); + payload <<= 7; + } while (payload != 0); + + // Except for last chunk that ends with "0" bit. + outputStream.write((byte) (byteToWrite & 0xFE)); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public IPartitionKeyComponent Truncate() { + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/PartitionKeyAndResourceTokenPair.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/PartitionKeyAndResourceTokenPair.java new file mode 100644 index 0000000000000..66183124b6a95 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/PartitionKeyAndResourceTokenPair.java @@ -0,0 +1,77 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.routing; + +/** + * This is the pair for Partition key and its corresponding Resource Token , + * this is the value in resource token map which is getting filled during the + * construction of AsyncDocumentClient + */ +public class PartitionKeyAndResourceTokenPair { + + private PartitionKeyInternal partitionKey; + private String resourceToken; + + public PartitionKeyAndResourceTokenPair(PartitionKeyInternal partitionKey, String resourceToken) { + this.partitionKey = partitionKey; + this.resourceToken = resourceToken; + } + + /** + * Get the Partition Key + * + * @return Partition Key + */ + public PartitionKeyInternal getPartitionKey() { + return partitionKey; + } + + /** + * Sets the PartitionKey + * + * @param partitionKey + * The Partition key + */ + public void setPartitionKey(PartitionKeyInternal partitionKey) { + this.partitionKey = partitionKey; + } + + /** + * Gets the Resource Token + * + * @return Resource Token + */ + public String getResourceToken() { + return resourceToken; + } + + /** + * Sets the Resource Token + * + * @param resourceToken + * The Resource Token + */ + public void setResourceToken(String resourceToken) { + this.resourceToken = resourceToken; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/PartitionKeyComponentType.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/PartitionKeyComponentType.java new file mode 100644 index 0000000000000..24d8af6948aaf --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/PartitionKeyComponentType.java @@ -0,0 +1,43 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +enum PartitionKeyComponentType { + UNDEFINED(0x0), + NULL(0x1), + FALSE(0x2), + TRUE(0x3), + MINNUMBER(0x4), + NUMBER(0x5), + MAXNUMBER(0x6), + MINSTRING(0x7), + STRING(0x8), + MAXSTRING(0x9), + INFINITY(0xFF); + + public final int type; + PartitionKeyComponentType(int type) { + this.type = type; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/PartitionKeyInternal.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/PartitionKeyInternal.java new file mode 100644 index 0000000000000..fc9c6e63c4e35 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/PartitionKeyInternal.java @@ -0,0 +1,351 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.azure.data.cosmos.internal.Undefined; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.Strings; +import com.azure.data.cosmos.internal.Utils; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.ObjectCodec; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import com.fasterxml.jackson.databind.deser.std.StdDeserializer; +import com.fasterxml.jackson.databind.node.NullNode; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.fasterxml.jackson.databind.ser.std.StdSerializer; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; + +import static com.azure.data.cosmos.internal.Utils.as; + +/** + * Used internally to encapsulate internal information of a partition key in the Azure Cosmos DB database service. + */ +@JsonSerialize(using = PartitionKeyInternal.PartitionKeyInternalJsonSerializer.class) +@JsonDeserialize(using = PartitionKeyInternal.PartitionKeyInternalJsonDeserializer.class) +public class PartitionKeyInternal implements Comparable { + + private static final String TYPE = "type"; + private static final String MIN_NUMBER = "MinNumber"; + private static final String MAX_NUMBER = "MaxNumber"; + private static final String MIN_STRING = "MinString"; + private static final String MAX_STRING = "MaxString"; + private static final String INFINITY = "Infinity"; + + public static final PartitionKeyInternal NonePartitionKey = + new PartitionKeyInternal(); + + public static final PartitionKeyInternal EmptyPartitionKey = + new PartitionKeyInternal(new ArrayList<>()); + + @SuppressWarnings("serial") + public static final PartitionKeyInternal InfinityPartitionKey = + new PartitionKeyInternal(new ArrayList() {{ + add(new InfinityPartitionKeyComponent()); + }}); + + @SuppressWarnings("serial") + public static final PartitionKeyInternal UndefinedPartitionKey = + new PartitionKeyInternal(new ArrayList() {{ + add(new UndefinedPartitionKeyComponent()); + }}); + + public static final PartitionKeyInternal InclusiveMinimum = PartitionKeyInternal.EmptyPartitionKey; + public static final PartitionKeyInternal ExclusiveMaximum = PartitionKeyInternal.InfinityPartitionKey; + public static final PartitionKeyInternal Empty = PartitionKeyInternal.EmptyPartitionKey; + public static final PartitionKeyInternal None = PartitionKeyInternal.NonePartitionKey; + + final List components; + + public PartitionKeyInternal(List values) { + if (values == null) { + throw new IllegalArgumentException("values"); + } + + this.components = values; + } + + public PartitionKeyInternal() { + this.components = null; + } + + public static PartitionKeyInternal fromJsonString(String partitionKey) { + if (Strings.isNullOrEmpty(partitionKey)) { + throw new IllegalArgumentException(String.format(RMResources.UnableToDeserializePartitionKeyValue, partitionKey)); + } + + try { + return Utils.getSimpleObjectMapper().readValue(partitionKey, PartitionKeyInternal.class); + } catch (IOException e) { + throw new IllegalArgumentException(e); + } + } + + public static PartitionKeyInternal fromObjectArray(Object[] values, boolean strict) { + if (values == null) { + throw new IllegalArgumentException("values"); + } + + return PartitionKeyInternal.fromObjectArray(Arrays.asList(values), strict); + } + + public static PartitionKeyInternal fromObjectArray(List values, boolean strict) { + if (values == null) { + throw new IllegalArgumentException("values"); + } + + List components = new ArrayList<>(); + for (Object value : values) { + if (value == NullNode.instance || value == null) { + components.add(NullPartitionKeyComponent.VALUE); + } else if (value instanceof Undefined) { + components.add(UndefinedPartitionKeyComponent.VALUE); + } else if (value instanceof Boolean) { + components.add(new BoolPartitionKeyComponent((boolean) value)); + } else if (value instanceof String) { + components.add(new StringPartitionKeyComponent((String) value)); + } else if (isNumeric(value)) { + components.add(new NumberPartitionKeyComponent(((Number) value).doubleValue())); + } else if (value instanceof ObjectNode && ((ObjectNode) value).get(TYPE) != null) { + switch (((ObjectNode) value).get(TYPE).asText()) { + case MIN_NUMBER: + components.add(MinNumberPartitionKeyComponent.VALUE); + break; + case MAX_NUMBER: + components.add(MaxNumberPartitionKeyComponent.VALUE); + break; + case MIN_STRING: + components.add(MinStringPartitionKeyComponent.VALUE); + break; + case MAX_STRING: + components.add(MaxStringPartitionKeyComponent.VALUE); + break; + } + } else { + if (strict) { + throw new IllegalArgumentException("Unable to construct PartitionKeyInternal from objects array"); + } else { + components.add(UndefinedPartitionKeyComponent.VALUE); + } + } + } + + return new PartitionKeyInternal(components); + } + + private static boolean isNumeric(Object value) { + return value instanceof Number; + } + + private static PartitionKeyInternal getExclusiveMaximum() { + return PartitionKeyInternal.InfinityPartitionKey; + } + + public static PartitionKeyInternal getEmpty() { + return PartitionKeyInternal.EmptyPartitionKey; + } + + @Override + public boolean equals(Object obj) { + PartitionKeyInternal pki = as(obj, PartitionKeyInternal.class); + if (pki == null) { + return false; + } + + if (pki == this) { + return true; + } + + return this.compareTo(pki) == 0; + } + + public int compareTo(PartitionKeyInternal other) { + if (other == null) { + throw new IllegalArgumentException("other"); + } else if (other.components == null || this.components == null) { + int otherComponentsCount = other.components == null ? 0 : other.components.size(); + int thisComponentsCount = this.components == null ? 0 : this.components.size(); + return (int) Math.signum(thisComponentsCount - otherComponentsCount); + } + + for (int i = 0; i < Math.min(this.components.size(), other.components.size()); i++) { + int leftOrdinal = this.components.get(i).GetTypeOrdinal(); + int rightOrdinal = other.components.get(i).GetTypeOrdinal(); + if (leftOrdinal != rightOrdinal) { + return (int) Math.signum(leftOrdinal - rightOrdinal); + } + + int result = this.components.get(i).CompareTo(other.components.get(i)); + if (result != 0) { + return (int) Math.signum(result); + } + } + + return (int) Math.signum(this.components.size() - other.components.size()); + } + + public String toJson() { + try { + return Utils.getSimpleObjectMapper().writeValueAsString(this); + } catch (IOException e) { + throw new IllegalArgumentException("Unable serialize the partition key internal into the JSON string", e); + } + } + + public boolean contains(PartitionKeyInternal nestedPartitionKey) { + if (this.components.size() > nestedPartitionKey.components.size()) { + return false; + } + + for (int i = 0; i < this.components.size(); i++) { + if (this.components.get(i).CompareTo(nestedPartitionKey.components.get(i)) != 0) { + return false; + } + } + + return true; + } + + public List getComponents() { + return components; + } + + @SuppressWarnings("serial") + static final class PartitionKeyInternalJsonSerializer extends StdSerializer { + + protected PartitionKeyInternalJsonSerializer() { this(null); } + + protected PartitionKeyInternalJsonSerializer(Class t) { + super(t); + } + + @Override + public void serialize(PartitionKeyInternal partitionKey, JsonGenerator writer, SerializerProvider serializerProvider) { + try { + if (partitionKey.equals(PartitionKeyInternal.getExclusiveMaximum())) { + writer.writeString(INFINITY); + return; + } + + writer.writeStartArray(); + for (IPartitionKeyComponent componentValue : partitionKey.getComponents()) { + componentValue.JsonEncode(writer); + } + writer.writeEndArray(); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + static void jsonEncode(MinNumberPartitionKeyComponent component, JsonGenerator writer) { + jsonEncodeLimit(writer, MIN_NUMBER); + } + + static void jsonEncode(MaxNumberPartitionKeyComponent component, JsonGenerator writer) { + jsonEncodeLimit(writer, MAX_NUMBER); + } + + static void jsonEncode(MinStringPartitionKeyComponent component, JsonGenerator writer) { + jsonEncodeLimit(writer, MIN_STRING); + } + + static void jsonEncode(MaxStringPartitionKeyComponent component, JsonGenerator writer) { + jsonEncodeLimit(writer, MAX_STRING); + } + + private static void jsonEncodeLimit(JsonGenerator writer, String value) { + try { + writer.writeStartObject(); + writer.writeFieldName(TYPE); + writer.writeString(value); + writer.writeEndObject(); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + } + + @SuppressWarnings("serial") + static final class PartitionKeyInternalJsonDeserializer extends StdDeserializer { + + protected PartitionKeyInternalJsonDeserializer() { this(null); } + + protected PartitionKeyInternalJsonDeserializer(Class vc) { + super(vc); + } + + @Override + public PartitionKeyInternal deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) { + + ObjectCodec objectCodec = jsonParser.getCodec(); + JsonNode root; + try { + root = objectCodec.readTree(jsonParser); + } catch (IOException e) { + throw new IllegalArgumentException(e); + } + + if (root.isTextual() && root.asText().equals(INFINITY)) { + return PartitionKeyInternal.getExclusiveMaximum(); + } + + List objects = new ArrayList<>(); + if (root.isArray()) { + Iterator iterator = root.iterator(); + while (iterator.hasNext()) { + JsonNode node = iterator.next(); + if (node.isNull()) { + objects.add(null); + } else if (node.isNumber()) { + objects.add(node.asDouble()); + } else if (node.isBoolean()) { + objects.add(node.asBoolean()); + } else if (node.isTextual()) { + objects.add(node.asText()); + } else if (node.isArray() && node.size() == 0 + || node.isObject() + && (node.fields() == null || !node.fields().hasNext())) { + objects.add(Undefined.Value()); + } else { + objects.add(node); + } + } + return PartitionKeyInternal.fromObjectArray(objects, true); + } + + throw new IllegalStateException(String.format( + "Unable to deserialize PartitionKeyInternal '%s'", + root.toString())); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/PartitionKeyInternalHelper.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/PartitionKeyInternalHelper.java new file mode 100644 index 0000000000000..a322e3bc213e5 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/PartitionKeyInternalHelper.java @@ -0,0 +1,196 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.azure.data.cosmos.CommonsBridgeInternal; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.PartitionKind; +import com.azure.data.cosmos.internal.Bytes; +import com.azure.data.cosmos.internal.RMResources; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; + +public class PartitionKeyInternalHelper { + + public static final String MinimumInclusiveEffectivePartitionKey = PartitionKeyInternalHelper.toHexEncodedBinaryString(PartitionKeyInternal.EmptyPartitionKey.components); + public static final String MaximumExclusiveEffectivePartitionKey = PartitionKeyInternalHelper.toHexEncodedBinaryString(PartitionKeyInternal.InfinityPartitionKey.components); + + static final int MaxPartitionKeyBinarySize = + (1 /*type marker */ + + 9 /* hash value*/ + + 1 /* type marker*/ + StringPartitionKeyComponent.MAX_STRING_BYTES_TO_APPEND + + 1 /*trailing zero*/ + ) * 3; + private static final Int128 MaxHashV2Value = new Int128(new byte[] { + (byte) 0x3F, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, + (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF}); + + static byte[] uIntToBytes(UInt128 unit) { + ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES * 2); + buffer.putLong(unit.low); + buffer.putLong(unit.high); + return buffer.array(); + } + + static long asUnsignedLong(int x) { + return x & 0x00000000ffffffffL; + } + + static byte[] longToBytes(long x) { + ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES); + buffer.putLong(x); + return buffer.array(); + } + + static String toHexEncodedBinaryString(IPartitionKeyComponent... components) { + ByteArrayOutputStream stream = new ByteArrayOutputStream(MaxPartitionKeyBinarySize); + for (IPartitionKeyComponent component: components) { + component.WriteForBinaryEncoding(stream); + } + + return HexConvert.bytesToHex(stream.toByteArray()); + } + + static String toHexEncodedBinaryString(List components) { + ByteArrayOutputStream stream = new ByteArrayOutputStream(MaxPartitionKeyBinarySize); + for (IPartitionKeyComponent component: components) { + component.WriteForBinaryEncoding(stream); + } + + return HexConvert.bytesToHex(stream.toByteArray()); + } + + static public String getEffectivePartitionKeyForHashPartitioningV2(PartitionKeyInternal partitionKeyInternal) { + try(ByteArrayOutputStream byteArrayBuffer = new ByteArrayOutputStream()) { + for (int i = 0; i < partitionKeyInternal.components.size(); i++) { + partitionKeyInternal.components.get(i).WriteForHashingV2(byteArrayBuffer); + } + + byte[] bytes = byteArrayBuffer.toByteArray(); + UInt128 hashAsUnit128 = MurmurHash3_128.hash128(bytes); + + byte[] hash = uIntToBytes(hashAsUnit128); + Bytes.reverse(hash); + + // Reset 2 most significant bits, as max exclusive value is 'FF'. + // Plus one more just in case. + hash[0] &= 0x3F; + + return HexConvert.bytesToHex(hash); + } catch (IOException e) { + throw new IllegalArgumentException(e); + } + } + + static String getEffectivePartitionKeyForHashPartitioning(PartitionKeyInternal partitionKeyInternal) { + IPartitionKeyComponent[] truncatedComponents = new IPartitionKeyComponent[partitionKeyInternal.components.size()]; + + for (int i = 0; i < truncatedComponents.length; i++) { + truncatedComponents[i] = partitionKeyInternal.components.get(i).Truncate(); + } + + double hash; + try(ByteArrayOutputStream byteArrayBuffer = new ByteArrayOutputStream()) { + for (int i = 0; i < truncatedComponents.length; i++) { + truncatedComponents[i].WriteForHashing(byteArrayBuffer); + } + + byte[] bytes = byteArrayBuffer.toByteArray(); + int hashAsInt = MurmurHash3_32.hash(bytes, bytes.length, 0); + hash = (double) asUnsignedLong(hashAsInt); + } catch (IOException e) { + throw new IllegalArgumentException(e); + } + + IPartitionKeyComponent[] partitionKeyComponents = new IPartitionKeyComponent[partitionKeyInternal.components.size() + 1]; + partitionKeyComponents[0] = new NumberPartitionKeyComponent(hash); + for (int i = 0; i < truncatedComponents.length; i++) { + partitionKeyComponents[i + 1] = truncatedComponents[i]; + } + + return toHexEncodedBinaryString(partitionKeyComponents); + } + + public static String getEffectivePartitionKeyString(PartitionKeyInternal partitionKeyInternal, PartitionKeyDefinition partitionKeyDefinition) { + return getEffectivePartitionKeyString(partitionKeyInternal, partitionKeyDefinition, true); + } + + public static String getEffectivePartitionKeyString(PartitionKeyInternal partitionKeyInternal, PartitionKeyDefinition partitionKeyDefinition, boolean strict) { + if (partitionKeyInternal.components == null) { + throw new IllegalArgumentException(RMResources.TooFewPartitionKeyComponents); + } + + if (partitionKeyInternal.equals(PartitionKeyInternal.EmptyPartitionKey)) { + return MinimumInclusiveEffectivePartitionKey; + } + + if (partitionKeyInternal.equals(PartitionKeyInternal.InfinityPartitionKey)) { + return MaximumExclusiveEffectivePartitionKey; + } + + if (partitionKeyInternal.components.size() < partitionKeyDefinition.paths().size()) { + throw new IllegalArgumentException(RMResources.TooFewPartitionKeyComponents); + } + + if (partitionKeyInternal.components.size() > partitionKeyDefinition.paths().size() && strict) { + throw new IllegalArgumentException(RMResources.TooManyPartitionKeyComponents); + } + + PartitionKind kind = partitionKeyDefinition.kind(); + if (kind == null) { + kind = PartitionKind.HASH; + } + + switch (kind) { + case HASH: + if (CommonsBridgeInternal.isV2(partitionKeyDefinition)) { + // V2 + return getEffectivePartitionKeyForHashPartitioningV2(partitionKeyInternal); + } else { + // V1 + return getEffectivePartitionKeyForHashPartitioning(partitionKeyInternal); + } + + default: + return toHexEncodedBinaryString(partitionKeyInternal.components); + } + } + + static class HexConvert { + final protected static char[] hexArray = "0123456789ABCDEF".toCharArray(); + + public static String bytesToHex(byte[] bytes) { + char[] hexChars = new char[bytes.length * 2]; + for (int j = 0; j < bytes.length; j++) { + int v = bytes[j] & 0xFF; + hexChars[j * 2] = hexArray[v >>> 4]; + hexChars[j * 2 + 1] = hexArray[v & 0x0F]; + } + return new String(hexChars); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/PartitionKeyRangeIdentity.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/PartitionKeyRangeIdentity.java new file mode 100644 index 0000000000000..148c4401c58d9 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/PartitionKeyRangeIdentity.java @@ -0,0 +1,111 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import org.apache.commons.lang3.StringUtils; + +/** + * Used internally to represents the identity of a partition key range in the Azure Cosmos DB database service. + */ +public final class PartitionKeyRangeIdentity { + private String collectionRid; + private String partitionKeyRangeId; + + public PartitionKeyRangeIdentity(String collectionRid, String partitionKeyRangeId) { + if (collectionRid == null) { + throw new IllegalArgumentException("collectionRid"); + } + + if (partitionKeyRangeId == null) { + throw new IllegalArgumentException("partitionKeyRangeId"); + } + + this.collectionRid = collectionRid; + this.partitionKeyRangeId = partitionKeyRangeId; + } + + /** + * This should only be used for user provided partitionKeyRangeId, because in this case + * he knows what he is doing. If collection was deleted/created with same name - it is his responsibility. + *

+ * If our code infers partitionKeyRangeId automatically and uses collection information from collection cache, + * we need to ensure that request will reach correct collection. In this case constructor which takes collectionRid MUST + * be used. + * + * @param partitionKeyRangeId a string represents the partition key range Id + */ + public PartitionKeyRangeIdentity(String partitionKeyRangeId) { + if (partitionKeyRangeId == null) { + throw new IllegalArgumentException("partitionKeyRangeId"); + } + + this.partitionKeyRangeId = partitionKeyRangeId; + } + + public static PartitionKeyRangeIdentity fromHeader(String header) { + String[] parts = StringUtils.split(header,","); + if (parts.length == 2) { + return new PartitionKeyRangeIdentity(parts[0], parts[1]); + } else if (parts.length == 1) { + return new PartitionKeyRangeIdentity(parts[0]); + } else { + throw new IllegalStateException("x-ms-documentdb-partitionkeyrangeid header contains invalid value '" + header + "'"); + } + } + + public String toHeader() { + if (this.collectionRid != null) { + return String.format("%s,%s", this.collectionRid, this.partitionKeyRangeId); + } + + return String.format("%s", this.partitionKeyRangeId); + } + + @Override + public boolean equals(Object other) { + if (null == other) { + return false; + } + if (this == other) { + return true; + } + return other instanceof PartitionKeyRangeIdentity + && ((PartitionKeyRangeIdentity) other).collectionRid.equals(this.collectionRid) + && ((PartitionKeyRangeIdentity) other).partitionKeyRangeId.equals(this.partitionKeyRangeId); + } + + @Override + public int hashCode() { + return ((this.collectionRid != null ? this.collectionRid.hashCode() : 0) * 397) + ^ (this.partitionKeyRangeId != null ? this.partitionKeyRangeId.hashCode() : 0); + } + + public String getCollectionRid() { + return collectionRid; + } + + public String getPartitionKeyRangeId() { + return partitionKeyRangeId; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/Range.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/Range.java new file mode 100644 index 0000000000000..6e074fc337874 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/Range.java @@ -0,0 +1,193 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.JsonSerializable; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.Comparator; + +@JsonIgnoreProperties({ "empty", "singleValue", "hashMap" }) +public final class Range> extends JsonSerializable { + private static final String MIN_PROPERTY = "min"; + private static final String MAX_PROPERTY = "max"; + private static final String IS_MIN_INCLUSIVE_PROPERTY = "isMinInclusive"; + private static final String IS_MAX_INCLUSIVE_PROPERTY = "isMaxInclusive"; + + private T minValue; + private T maxValue; + + public Range() { + super(); + } + + public Range(String jsonString) { + super(jsonString); + } + + public Range(T min, T max, boolean isMinInclusive, boolean isMaxInclusive) { + this.setMin(min); + this.setMax(max); + this.setMinInclusive(isMinInclusive); + this.setMaxInclusive(isMaxInclusive); + } + + public static > Range getPointRange(T value) { + return new Range(value, value, true, true); + } + + public static > Range getEmptyRange(T value) { + return new Range(value, value, true, false); + } + + public static > boolean checkOverlapping(Range range1, Range range2) { + if (range1 == null || range2 == null || range1.isEmpty() || range2.isEmpty()) { + return false; + } + + int cmp1 = range1.getMin().compareTo(range2.getMax()); + int cmp2 = range2.getMin().compareTo(range1.getMax()); + + if (cmp1 <= 0 && cmp2 <= 0) { + return !((cmp1 == 0 && !(range1.isMinInclusive() && range2.isMaxInclusive())) + || (cmp2 == 0 && !(range2.isMinInclusive() && range1.isMaxInclusive()))); + } + + return false; + } + + @SuppressWarnings("unchecked") + public T getMin() { + if (this.minValue == null) { + this.minValue = (T) super.get(Range.MIN_PROPERTY); + } + + return this.minValue; + } + + public void setMin(T min) { + this.minValue = min; + BridgeInternal.setProperty(this, Range.MIN_PROPERTY, min); + } + + @SuppressWarnings("unchecked") + public T getMax() { + if (this.maxValue == null) { + this.maxValue = (T) super.get(Range.MAX_PROPERTY); + } + + return this.maxValue; + } + + public void setMax(T max) { + this.maxValue = max; + BridgeInternal.setProperty(this, Range.MAX_PROPERTY, max); + } + + @JsonProperty("isMinInclusive") + public boolean isMinInclusive() { + return super.getBoolean(Range.IS_MIN_INCLUSIVE_PROPERTY); + } + + public void setMinInclusive(boolean isMinInclusive) { + BridgeInternal.setProperty(this, Range.IS_MIN_INCLUSIVE_PROPERTY, isMinInclusive); + } + + @JsonProperty("isMaxInclusive") + public boolean isMaxInclusive() { + return super.getBoolean(Range.IS_MAX_INCLUSIVE_PROPERTY); + } + + public void setMaxInclusive(boolean isMaxInclusive) { + BridgeInternal.setProperty(this, Range.IS_MAX_INCLUSIVE_PROPERTY, isMaxInclusive); + } + + public boolean isSingleValue() { + return this.isMinInclusive() && this.isMaxInclusive() && this.getMin().compareTo(this.getMax()) == 0; + } + + public boolean isEmpty() { + return this.getMin().compareTo(this.getMax()) == 0 && !(this.isMinInclusive() && this.isMaxInclusive()); + } + + public boolean contains(T value) { + int minToValueRelation = this.getMin().compareTo(value); + int maxToValueRelation = this.getMax().compareTo(value); + + return ((this.isMinInclusive() && minToValueRelation <= 0) + || (!this.isMinInclusive() && minToValueRelation < 0)) + && ((this.isMaxInclusive() && maxToValueRelation >= 0) + || (!this.isMaxInclusive() && maxToValueRelation > 0)); + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof Range)) + return false; + if (obj == this) + return true; + @SuppressWarnings("unchecked") + Range otherRange = (Range) obj; + + return this.getMin().compareTo(otherRange.getMin()) == 0 && this.getMax().compareTo(otherRange.getMax()) == 0 + && this.isMinInclusive() == otherRange.isMinInclusive() + && this.isMaxInclusive() == otherRange.isMaxInclusive(); + } + + @Override + public int hashCode() { + int hash = 0; + hash = (hash * 397) ^ this.getMin().hashCode(); + hash = (hash * 397) ^ this.getMax().hashCode(); + hash = (hash * 397) ^ Boolean.compare(this.isMinInclusive(), false); + hash = (hash * 397) ^ Boolean.compare(this.isMaxInclusive(), false); + return hash; + } + + public static class MinComparator> implements Comparator> { + @Override + public int compare(Range range1, Range range2) { + int result = range1.getMin().compareTo(range2.getMin()); + if (result != 0 || range1.isMinInclusive() == range2.isMinInclusive()) { + return result; + } + + return range1.isMinInclusive() ? -1 : 1; + } + } + + public static class MaxComparator> implements Comparator> { + @Override + public int compare(Range range1, Range range2) { + int result = range1.getMax().compareTo(range2.getMax()); + if (result != 0 || range1.isMaxInclusive() == range2.isMaxInclusive()) { + return result; + } + + return range1.isMaxInclusive() ? 1 : -1; + } + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/RoutingMapProvider.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/RoutingMapProvider.java new file mode 100644 index 0000000000000..1b71b61269153 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/RoutingMapProvider.java @@ -0,0 +1,40 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.azure.data.cosmos.internal.PartitionKeyRange; + +import java.util.Collection; + +/** + * Used internally in request routing in the Azure Cosmos DB database service. + */ +public interface RoutingMapProvider { + Collection getOverlappingRanges(String collectionSelfLink, Range range, boolean forceRefresh); + + PartitionKeyRange tryGetRangeByEffectivePartitionKey(String collectionSelfLink, String effectivePartitionKey); + + PartitionKeyRange getPartitionKeyRangeById(String collectionSelfLink, String partitionKeyRangeId, boolean forceRefresh); + +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/RoutingMapProviderHelper.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/RoutingMapProviderHelper.java new file mode 100644 index 0000000000000..512e12641f636 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/RoutingMapProviderHelper.java @@ -0,0 +1,97 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.azure.data.cosmos.internal.PartitionKeyRange; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +/** + * Provide utility functionality to route request in direct connectivity mode in the Azure Cosmos DB database service. + */ +public final class RoutingMapProviderHelper { + private static final Range.MaxComparator MAX_COMPARATOR = new Range.MaxComparator(); + + private static String max(String left, String right) { + return left.compareTo(right) < 0 ? right : left; + } + + private static > boolean IsSortedAndNonOverlapping(List> list) { + for (int i = 1; i < list.size(); i++) { + Range previousRange = list.get(i - 1); + Range currentRange = list.get(i); + + int compareResult = previousRange.getMax().compareTo(currentRange.getMin()); + if (compareResult > 0) { + return false; + } else if (compareResult == 0 && previousRange.isMaxInclusive() && currentRange.isMinInclusive()) { + return false; + } + } + + return true; + } + + public static Collection getOverlappingRanges(RoutingMapProvider routingMapProvider, + String collectionSelfLink, List> sortedRanges) { + if (!IsSortedAndNonOverlapping(sortedRanges)) { + throw new IllegalArgumentException("sortedRanges"); + } + + List targetRanges = new ArrayList(); + int currentProvidedRange = 0; + while (currentProvidedRange < sortedRanges.size()) { + if (sortedRanges.get(currentProvidedRange).isEmpty()) { + currentProvidedRange++; + continue; + } + + Range queryRange; + if (!targetRanges.isEmpty()) { + String left = max(targetRanges.get(targetRanges.size() - 1).getMaxExclusive(), + sortedRanges.get(currentProvidedRange).getMin()); + + boolean leftInclusive = left.compareTo(sortedRanges.get(currentProvidedRange).getMin()) == 0 + ? sortedRanges.get(currentProvidedRange).isMinInclusive() : false; + + queryRange = new Range(left, sortedRanges.get(currentProvidedRange).getMax(), leftInclusive, + sortedRanges.get(currentProvidedRange).isMaxInclusive()); + } else { + queryRange = sortedRanges.get(currentProvidedRange); + } + + targetRanges.addAll(routingMapProvider.getOverlappingRanges(collectionSelfLink, queryRange, false)); + + Range lastKnownTargetRange = targetRanges.get(targetRanges.size() - 1).toRange(); + while (currentProvidedRange < sortedRanges.size() + && MAX_COMPARATOR.compare(sortedRanges.get(currentProvidedRange), lastKnownTargetRange) <= 0) { + currentProvidedRange++; + } + } + + return targetRanges; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/StringPartitionKeyComponent.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/StringPartitionKeyComponent.java new file mode 100644 index 0000000000000..66b79466f46f3 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/StringPartitionKeyComponent.java @@ -0,0 +1,132 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.azure.data.cosmos.internal.Utils; +import com.fasterxml.jackson.core.JsonGenerator; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.UnsupportedEncodingException; + +class StringPartitionKeyComponent implements IPartitionKeyComponent { + + public static final int MAX_STRING_CHARS = 100; + public static final int MAX_STRING_BYTES_TO_APPEND = 100; + private final String value; + private final byte[] utf8Value; + + public StringPartitionKeyComponent(String value) { + if (value == null) { + throw new IllegalArgumentException("value"); + } + + this.value = value; + try { + this.utf8Value = com.azure.data.cosmos.internal.Utils.getUTF8Bytes(value); + } catch (UnsupportedEncodingException e) { + throw new IllegalArgumentException(e); + } + } + + @Override + public int CompareTo(IPartitionKeyComponent other) { + StringPartitionKeyComponent otherString = Utils.as(other, StringPartitionKeyComponent.class) ; + if (otherString == null) { + throw new IllegalArgumentException("other"); + } + + return this.value.compareTo(otherString.value); + } + + @Override + public int GetTypeOrdinal() { + return PartitionKeyComponentType.STRING.type; + } + + @Override + public int hashCode() { + // hashCode for hashmap dictionary, etc + return value.hashCode(); + } + + public IPartitionKeyComponent Truncate() { + if (this.value.length() > MAX_STRING_CHARS) { + return new StringPartitionKeyComponent(this.value.substring(0, MAX_STRING_CHARS)); + } + + return this; + } + + @Override + public void JsonEncode(JsonGenerator writer) { + try { + writer.writeString(this.value); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public void WriteForHashing(OutputStream outputStream) { + try { + outputStream.write((byte) PartitionKeyComponentType.STRING.type); + outputStream.write(utf8Value); + outputStream.write((byte) 0); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public void WriteForHashingV2(OutputStream outputStream) { + try { + outputStream.write((byte) PartitionKeyComponentType.STRING.type); + outputStream.write(utf8Value); + outputStream.write((byte) 0xFF); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public void WriteForBinaryEncoding(OutputStream outputStream) { + try { + outputStream.write((byte) PartitionKeyComponentType.STRING.type); + boolean shortString = this.utf8Value.length <= MAX_STRING_BYTES_TO_APPEND; + + for (int index = 0; index < (shortString ? this.utf8Value.length : MAX_STRING_BYTES_TO_APPEND + 1); index++) { + byte charByte = this.utf8Value[index]; + if (charByte < 0xFF) charByte++; + outputStream.write(charByte); + } + + if (shortString) { + outputStream.write((byte) 0x00); + } + } catch (IOException e) { + throw new IllegalStateException(e); + } + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/UInt128.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/UInt128.java new file mode 100644 index 0000000000000..f34a5ba77b712 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/UInt128.java @@ -0,0 +1,34 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +class UInt128 { + long low; + long high; + + UInt128(long x, long y) { + this.low = x; + this.high = y; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/UndefinedPartitionKeyComponent.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/UndefinedPartitionKeyComponent.java new file mode 100644 index 0000000000000..f67302bf73fb2 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/internal/routing/UndefinedPartitionKeyComponent.java @@ -0,0 +1,92 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.azure.data.cosmos.internal.Utils; +import com.fasterxml.jackson.core.JsonGenerator; + +import java.io.IOException; +import java.io.OutputStream; + +class UndefinedPartitionKeyComponent implements IPartitionKeyComponent { + + public static final UndefinedPartitionKeyComponent VALUE = new UndefinedPartitionKeyComponent(); + + @Override + public int CompareTo(IPartitionKeyComponent other) { + UndefinedPartitionKeyComponent otherUndefined = Utils.as(other, UndefinedPartitionKeyComponent.class); + if (otherUndefined == null) { + throw new IllegalArgumentException("other"); + } + + return 0; + } + + @Override + public int GetTypeOrdinal() { + return PartitionKeyComponentType.UNDEFINED.type; + } + + @Override + public void JsonEncode(JsonGenerator writer) { + try { + writer.writeStartObject(); + writer.writeEndObject(); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public void WriteForHashing(OutputStream outputStream) { + try { + outputStream.write((byte) PartitionKeyComponentType.UNDEFINED.type); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public void WriteForHashingV2(OutputStream outputStream) { + try { + outputStream.write((byte) PartitionKeyComponentType.UNDEFINED.type); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public void WriteForBinaryEncoding(OutputStream outputStream) { + try { + outputStream.write((byte) PartitionKeyComponentType.UNDEFINED.type); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @Override + public IPartitionKeyComponent Truncate() { + return this; + } +} diff --git a/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/package-info.java b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/package-info.java new file mode 100644 index 0000000000000..683bac20c6956 --- /dev/null +++ b/sdk/cosmos/sdk/src/main/java/com/azure/data/cosmos/package-info.java @@ -0,0 +1,26 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +/** + * This package provides Rx interfaces for interacting with Azure Cosmos DB. + */ +package com.azure.data.cosmos; \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/BridgeUtils.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/BridgeUtils.java new file mode 100644 index 0000000000000..864c5cf448874 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/BridgeUtils.java @@ -0,0 +1,74 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.DatabaseAccount; +import com.azure.data.cosmos.internal.DatabaseAccountLocation; + +import java.util.List; + +/** + * This is a helper class for testing. + */ +public class BridgeUtils { + + public static DatabaseAccount createDatabaseAccount(List readLocations, + List writeLocations, + boolean useMultipleWriteLocations) { + DatabaseAccount dbAccount = new DatabaseAccount(); + dbAccount.setEnableMultipleWriteLocations(useMultipleWriteLocations); + + dbAccount.setReadableLocations(readLocations); + dbAccount.setWritableLocations(writeLocations); + + return dbAccount; + } + + public static DatabaseAccountLocation createDatabaseAccountLocation(String name, String endpoint) { + DatabaseAccountLocation dal = new DatabaseAccountLocation(); + dal.setName(name); + dal.setEndpoint(endpoint); + + return dal; + } + + public static ConflictResolutionPolicy createConflictResolutionPolicy() { + return new ConflictResolutionPolicy(); + } + + public static ConflictResolutionPolicy setMode(ConflictResolutionPolicy policy, ConflictResolutionMode mode) { + policy.mode(mode); + return policy; + } + + public static ConflictResolutionPolicy setPath(ConflictResolutionPolicy policy, String path) { + policy.conflictResolutionPath(path); + return policy; + } + + public static ConflictResolutionPolicy setStoredProc(ConflictResolutionPolicy policy, String storedProcLink) { + policy.conflictResolutionProcedure(storedProcLink); + return policy; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/ClientUnderTestBuilder.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/ClientUnderTestBuilder.java new file mode 100644 index 0000000000000..e597e9d2676d7 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/ClientUnderTestBuilder.java @@ -0,0 +1,58 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.RxDocumentClientUnderTest; +import com.azure.data.cosmos.internal.directconnectivity.ReflectionUtils; + +import java.net.URI; +import java.net.URISyntaxException; + +public class ClientUnderTestBuilder extends CosmosClientBuilder { + + public ClientUnderTestBuilder(CosmosClientBuilder builder) { + this.configs(builder.configs()); + this.connectionPolicy(builder.connectionPolicy()); + this.consistencyLevel(builder.consistencyLevel()); + this.key(builder.key()); + this.endpoint(builder.endpoint()); + } + + @Override + public CosmosClient build() { + RxDocumentClientUnderTest rxClient; + try { + rxClient = new RxDocumentClientUnderTest( + new URI(this.endpoint()), + this.key(), + this.connectionPolicy(), + this.consistencyLevel(), + this.configs()); + } catch (URISyntaxException e) { + throw new IllegalArgumentException(e.getMessage()); + } + CosmosClient cosmosClient = super.build(); + ReflectionUtils.setAsyncDocumentClient(cosmosClient, rxClient); + return cosmosClient; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/ConflictTests.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/ConflictTests.java new file mode 100644 index 0000000000000..2ecafa25a9b90 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/ConflictTests.java @@ -0,0 +1,72 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Conflict; +import com.azure.data.cosmos.internal.Document; +import org.apache.commons.io.IOUtils; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ConflictTests { + private String conflictAsString; + + @BeforeClass(groups = { "unit" }) + public void setup() throws Exception { + conflictAsString = IOUtils.toString( + getClass().getClassLoader().getResourceAsStream("sampleConflict.json"), "UTF-8"); + } + + @Test(groups = { "unit" }) + public void getSourceResourceId() { + Conflict conf = new Conflict(conflictAsString); + assertThat(conf.getSourceResourceId()).isEqualTo("k6d9ALgBmD+ChB4AAAAAAA=="); + } + + @Test(groups = { "unit" }) + public void getOperationKind() { + Conflict conf = new Conflict(conflictAsString); + assertThat(conf.getOperationKind()).isEqualTo("create"); + conf.getSourceResourceId(); + } + + @Test(groups = { "unit" }) + public void getResourceType() { + Conflict conf = new Conflict(conflictAsString); + assertThat(conf.getResouceType()).isEqualTo("document"); + conf.getSourceResourceId(); + } + + @Test(groups = { "unit" }) + public void getResource() { + Conflict conf = new Conflict(conflictAsString); + Document doc = conf.getResource(Document.class); + assertThat(doc.id()).isEqualTo("0007312a-a1c5-4b54-9e39-35de2367fa33"); + assertThat(doc.getInt("regionId")).isEqualTo(2); + assertThat(doc.resourceId()).isEqualTo("k6d9ALgBmD+ChB4AAAAAAA=="); + assertThat(doc.etag()).isEqualTo("\"00000200-0000-0000-0000-5b6e214b0000\""); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/ConnectionPolicyTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/ConnectionPolicyTest.java new file mode 100644 index 0000000000000..ee9878c43ccb5 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/ConnectionPolicyTest.java @@ -0,0 +1,57 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.directconnectivity.Protocol; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ConnectionPolicyTest { + + @DataProvider(name = "connectionModeArgProvider") + public Object[][] connectionModeArgProvider() { + return new Object[][]{ + { ConnectionMode.GATEWAY}, + { ConnectionMode.DIRECT}, + }; + } + + @Test(groups = { "unit" }, dataProvider = "connectionModeArgProvider") + public void connectionMode(ConnectionMode connectionMode) { + ConnectionPolicy policy = new ConnectionPolicy(); + policy.connectionMode(connectionMode); + + assertThat(policy.connectionMode()).isEqualTo(connectionMode); + } + + @DataProvider(name = "connectionProtocolModeArgProvider") + public Object[][] connectionProtocolModeArgProvider() { + return new Object[][]{ + { Protocol.HTTPS}, + { Protocol.TCP}, + }; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/CosmosClientExceptionTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/CosmosClientExceptionTest.java new file mode 100644 index 0000000000000..12b15173cc756 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/CosmosClientExceptionTest.java @@ -0,0 +1,84 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.google.common.collect.ImmutableMap; +import org.testng.annotations.Test; + +import java.util.Map; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CosmosClientExceptionTest { + + @Test(groups = { "unit" }) + public void headerNotNull1() { + CosmosClientException dce = BridgeInternal.createCosmosClientException(0); + assertThat(dce.responseHeaders()).isNotNull(); + assertThat(dce.responseHeaders()).isEmpty(); + } + + @Test(groups = { "unit" }) + public void headerNotNull2() { + CosmosClientException dce = BridgeInternal.createCosmosClientException(0, "dummy"); + assertThat(dce.responseHeaders()).isNotNull(); + assertThat(dce.responseHeaders()).isEmpty(); + } + + @Test(groups = { "unit" }) + public void headerNotNull3() { + CosmosClientException dce = BridgeInternal.createCosmosClientException(0, new RuntimeException()); + assertThat(dce.responseHeaders()).isNotNull(); + assertThat(dce.responseHeaders()).isEmpty(); + } + + @Test(groups = { "unit" }) + public void headerNotNull4() { + CosmosClientException dce = BridgeInternal.createCosmosClientException(0, (CosmosError) null, (Map) null); + assertThat(dce.responseHeaders()).isNotNull(); + assertThat(dce.responseHeaders()).isEmpty(); + } + + @Test(groups = { "unit" }) + public void headerNotNull5() { + CosmosClientException dce = BridgeInternal.createCosmosClientException((String) null, 0, (CosmosError) null, (Map) null); + assertThat(dce.responseHeaders()).isNotNull(); + assertThat(dce.responseHeaders()).isEmpty(); + } + + @Test(groups = { "unit" }) + public void headerNotNull6() { + CosmosClientException dce = BridgeInternal.createCosmosClientException((String) null, (Exception) null, (Map) null, 0, (String) null); + assertThat(dce.responseHeaders()).isNotNull(); + assertThat(dce.responseHeaders()).isEmpty(); + } + + @Test(groups = { "unit" }) + public void headerNotNull7() { + ImmutableMap respHeaders = ImmutableMap.of("key", "value"); + CosmosClientException dce = BridgeInternal.createCosmosClientException((String) null, (Exception) null, respHeaders, 0, (String) null); + assertThat(dce.responseHeaders()).isNotNull(); + assertThat(dce.responseHeaders()).contains(respHeaders.entrySet().iterator().next()); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/CosmosClientTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/CosmosClientTest.java new file mode 100644 index 0000000000000..dce8d389f77aa --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/CosmosClientTest.java @@ -0,0 +1,79 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.google.common.base.Strings; +import org.testng.ITest; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; + +import java.lang.reflect.Method; + +public abstract class CosmosClientTest implements ITest { + + private final CosmosClientBuilder clientBuilder; + private String testName; + + public CosmosClientTest() { + this(new CosmosClientBuilder()); + } + + public CosmosClientTest(CosmosClientBuilder clientBuilder) { + this.clientBuilder = clientBuilder; + } + + public final CosmosClientBuilder clientBuilder() { + return this.clientBuilder; + } + + @Override + public final String getTestName() { + return this.testName; + } + + @BeforeMethod(alwaysRun = true) + public final void setTestName(Method method) { + String testClassAndMethodName = Strings.lenientFormat("%s::%s", + method.getDeclaringClass().getSimpleName(), + method.getName()); + + if (this.clientBuilder.connectionPolicy() != null && this.clientBuilder.configs() != null) { + String connectionMode = this.clientBuilder.connectionPolicy().connectionMode() == ConnectionMode.DIRECT + ? "Direct " + this.clientBuilder.configs().getProtocol() + : "Gateway"; + + this.testName = Strings.lenientFormat("%s[%s with %s consistency]", + testClassAndMethodName, + connectionMode, + clientBuilder.consistencyLevel()); + } else { + this.testName = testClassAndMethodName; + } + } + + @AfterMethod(alwaysRun = true) + public final void unsetTestName() { + this.testName = null; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/CosmosDatabaseForTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/CosmosDatabaseForTest.java new file mode 100644 index 0000000000000..dd9266c790445 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/CosmosDatabaseForTest.java @@ -0,0 +1,130 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.time.Duration; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CosmosDatabaseForTest { + private static Logger logger = LoggerFactory.getLogger(CosmosDatabaseForTest.class); + public static final String SHARED_DB_ID_PREFIX = "RxJava.SDKTest.SharedDatabase"; + private static final Duration CLEANUP_THRESHOLD_DURATION = Duration.ofHours(2); + private static final String DELIMITER = "_"; + private static DateTimeFormatter TIME_FORMATTER = DateTimeFormatter.ofPattern("yyyyMMdd'T'HHmmss"); + + public LocalDateTime createdTime; + public CosmosDatabase createdDatabase; + + private CosmosDatabaseForTest(CosmosDatabase db, LocalDateTime createdTime) { + this.createdDatabase = db; + this.createdTime = createdTime; + } + + private boolean isStale() { + return isOlderThan(CLEANUP_THRESHOLD_DURATION); + } + + private boolean isOlderThan(Duration dur) { + return createdTime.isBefore(LocalDateTime.now().minus(dur)); + } + + public static String generateId() { + return SHARED_DB_ID_PREFIX + DELIMITER + TIME_FORMATTER.format(LocalDateTime.now()) + DELIMITER + RandomStringUtils.randomAlphabetic(3); + } + + private static CosmosDatabaseForTest from(CosmosDatabase db) { + if (db == null || db.id() == null || db.getLink() == null) { + return null; + } + + String id = db.id(); + if (id == null) { + return null; + } + + String[] parts = StringUtils.split(id, DELIMITER); + if (parts.length != 3) { + return null; + } + if (!StringUtils.equals(parts[0], SHARED_DB_ID_PREFIX)) { + return null; + } + + try { + LocalDateTime parsedTime = LocalDateTime.parse(parts[1], TIME_FORMATTER); + return new CosmosDatabaseForTest(db, parsedTime); + } catch (Exception e) { + return null; + } + } + + public static CosmosDatabaseForTest create(DatabaseManager client) { + CosmosDatabaseProperties dbDef = new CosmosDatabaseProperties(generateId()); + + CosmosDatabase db = client.createDatabase(dbDef).block().database(); + CosmosDatabaseForTest dbForTest = CosmosDatabaseForTest.from(db); + assertThat(dbForTest).isNotNull(); + return dbForTest; + } + + public static void cleanupStaleTestDatabases(DatabaseManager client) { + logger.info("Cleaning stale test databases ..."); + List dbs = client.queryDatabases( + new SqlQuerySpec("SELECT * FROM c WHERE STARTSWITH(c.id, @PREFIX)", + new SqlParameterList(new SqlParameter("@PREFIX", CosmosDatabaseForTest.SHARED_DB_ID_PREFIX)))) + .flatMap(page -> Flux.fromIterable(page.results())).collectList().block(); + + for (CosmosDatabaseProperties db : dbs) { + assertThat(db.id()).startsWith(CosmosDatabaseForTest.SHARED_DB_ID_PREFIX); + + CosmosDatabaseForTest dbForTest = CosmosDatabaseForTest.from(client.getDatabase(db.id())); + + if (db != null && dbForTest.isStale()) { + logger.info("Deleting database {}", db.id()); + dbForTest.deleteDatabase(db.id()); + } + } + } + + private void deleteDatabase(String id) { + this.createdDatabase.delete().block(); + } + + public interface DatabaseManager { + Flux> queryDatabases(SqlQuerySpec query); + Mono createDatabase(CosmosDatabaseProperties databaseDefinition); + CosmosDatabase getDatabase(String id); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/CosmosPartitionKeyTests.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/CosmosPartitionKeyTests.java new file mode 100644 index 0000000000000..2e862ea61c2b1 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/CosmosPartitionKeyTests.java @@ -0,0 +1,271 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.*; +import com.azure.data.cosmos.internal.http.HttpClient; +import com.azure.data.cosmos.internal.http.HttpClientConfig; +import com.azure.data.cosmos.internal.http.HttpHeaders; +import com.azure.data.cosmos.internal.http.HttpRequest; +import com.azure.data.cosmos.rx.TestSuiteBase; +import io.netty.handler.codec.http.HttpMethod; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URLEncoder; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; + +public final class CosmosPartitionKeyTests extends TestSuiteBase { + + private final static String NON_PARTITIONED_CONTAINER_ID = "NonPartitionContainer" + UUID.randomUUID().toString(); + private final static String NON_PARTITIONED_CONTAINER_DOCUEMNT_ID = "NonPartitionContainer_Document" + UUID.randomUUID().toString(); + + private CosmosClient client; + private CosmosDatabase createdDatabase; + + @Factory(dataProvider = "clientBuilders") + public CosmosPartitionKeyTests(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() throws URISyntaxException, IOException { + assertThat(this.client).isNull(); + client = clientBuilder().build(); + createdDatabase = getSharedCosmosDatabase(client); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + assertThat(this.client).isNotNull(); + this.client.close(); + } + + private void createContainerWithoutPk() throws URISyntaxException, IOException { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + HttpClientConfig httpClientConfig = new HttpClientConfig(new Configs()) + .withMaxIdleConnectionTimeoutInMillis(connectionPolicy.idleConnectionTimeoutInMillis()) + .withPoolSize(connectionPolicy.maxPoolSize()) + .withHttpProxy(connectionPolicy.proxy()) + .withRequestTimeoutInMillis(connectionPolicy.requestTimeoutInMillis()); + + HttpClient httpClient = HttpClient.createFixed(httpClientConfig); + + // CREATE a non partitioned collection using the rest API and older version + String resourceId = Paths.DATABASES_PATH_SEGMENT + "/" + createdDatabase.id(); + String path = Paths.DATABASES_PATH_SEGMENT + "/" + createdDatabase.id() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/"; + DocumentCollection collection = new DocumentCollection(); + collection.id(NON_PARTITIONED_CONTAINER_ID); + + HashMap headers = new HashMap(); + headers.put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); + headers.put(HttpConstants.HttpHeaders.VERSION, "2018-09-17"); + BaseAuthorizationTokenProvider base = new BaseAuthorizationTokenProvider(TestConfigurations.MASTER_KEY); + String authorization = base.generateKeyAuthorizationSignature(HttpConstants.HttpMethods.POST, resourceId, Paths.COLLECTIONS_PATH_SEGMENT, headers); + headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, URLEncoder.encode(authorization, "UTF-8")); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create, + ResourceType.DocumentCollection, path, collection, headers, new RequestOptions()); + + String[] baseUrlSplit = TestConfigurations.HOST.split(":"); + String resourceUri = baseUrlSplit[0] + ":" + baseUrlSplit[1] + ":" + baseUrlSplit[2].split("/")[ + 0] + "//" + Paths.DATABASES_PATH_SEGMENT + "/" + createdDatabase.id() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/"; + URI uri = new URI(resourceUri); + + HttpRequest httpRequest = new HttpRequest(HttpMethod.POST, uri, uri.getPort(), new HttpHeaders(headers)); + httpRequest.withBody(request.getContent()); + String body = httpClient.send(httpRequest).block().bodyAsString().block(); + assertThat(body).contains("\"id\":\"" + NON_PARTITIONED_CONTAINER_ID + "\""); + + // CREATE a document in the non partitioned collection using the rest API and older version + resourceId = Paths.DATABASES_PATH_SEGMENT + "/" + createdDatabase.id() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + collection.id(); + path = Paths.DATABASES_PATH_SEGMENT + "/" + createdDatabase.id() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + + "/" + collection.id() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/"; + Document document = new Document(); + document.id(NON_PARTITIONED_CONTAINER_DOCUEMNT_ID); + + authorization = base.generateKeyAuthorizationSignature(HttpConstants.HttpMethods.POST, resourceId, Paths.DOCUMENTS_PATH_SEGMENT, headers); + headers.put(HttpConstants.HttpHeaders.AUTHORIZATION, URLEncoder.encode(authorization, "UTF-8")); + request = RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Document, path, + document, headers, new RequestOptions()); + + resourceUri = baseUrlSplit[0] + ":" + baseUrlSplit[1] + ":" + baseUrlSplit[2].split("/")[0] + "//" + Paths.DATABASES_PATH_SEGMENT + "/" + + createdDatabase.id() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + collection.id() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/"; + uri = new URI(resourceUri); + + httpRequest = new HttpRequest(HttpMethod.POST, uri, uri.getPort(), new HttpHeaders(headers)); + httpRequest.withBody(request.getContent()); + + body = httpClient.send(httpRequest).block().bodyAsString().block(); + assertThat(body).contains("\"id\":\"" + NON_PARTITIONED_CONTAINER_DOCUEMNT_ID + "\""); + } + + @Test(groups = { "simple" }) + public void testNonPartitionedCollectionOperations() throws Exception { + createContainerWithoutPk(); + CosmosContainer createdContainer = createdDatabase.getContainer(NON_PARTITIONED_CONTAINER_ID); + + Mono readMono = createdContainer.getItem(NON_PARTITIONED_CONTAINER_DOCUEMNT_ID, PartitionKey.None).read(); + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(NON_PARTITIONED_CONTAINER_DOCUEMNT_ID).build(); + validateSuccess(readMono, validator); + + String createdItemId = UUID.randomUUID().toString(); + Mono createMono = createdContainer.createItem(new CosmosItemProperties("{'id':'" + createdItemId + "'}")); + validator = new CosmosResponseValidator.Builder() + .withId(createdItemId).build(); + validateSuccess(createMono, validator); + + readMono = createdContainer.getItem(createdItemId, PartitionKey.None).read(); + validator = new CosmosResponseValidator.Builder() + .withId(createdItemId).build(); + validateSuccess(readMono, validator); + + CosmosItem itemToReplace = createdContainer.getItem(createdItemId, PartitionKey.None).read().block().item(); + CosmosItemProperties itemSettingsToReplace = itemToReplace.read().block().properties(); + String replacedItemId = UUID.randomUUID().toString(); + itemSettingsToReplace.id(replacedItemId); + Mono replaceMono = itemToReplace.replace(itemSettingsToReplace); + validator = new CosmosResponseValidator.Builder() + .withId(replacedItemId).build(); + validateSuccess(replaceMono, validator); + + String upsertedItemId = UUID.randomUUID().toString(); + + Mono upsertMono = createdContainer.upsertItem(new CosmosItemProperties("{'id':'" + upsertedItemId + "'}")); + validator = new CosmosResponseValidator.Builder() + .withId(upsertedItemId).build(); + validateSuccess(upsertMono, validator); + + // one document was created during setup, one with create (which was replaced) and one with upsert + FeedOptions feedOptions = new FeedOptions(); + feedOptions.partitionKey(PartitionKey.None); + ArrayList expectedIds = new ArrayList(); + expectedIds.add(NON_PARTITIONED_CONTAINER_DOCUEMNT_ID); + expectedIds.add(replacedItemId); + expectedIds.add(upsertedItemId); + Flux> queryFlux = createdContainer.queryItems("SELECT * from c", feedOptions); + FeedResponseListValidator queryValidator = new FeedResponseListValidator.Builder() + .totalSize(3) + .numberOfPages(1) + .containsExactlyIds(expectedIds) + .build(); + validateQuerySuccess(queryFlux, queryValidator); + + queryFlux = createdContainer.readAllItems(feedOptions); + queryValidator = new FeedResponseListValidator.Builder() + .totalSize(3) + .numberOfPages(1) + .containsExactlyIds(expectedIds) + .build(); + validateQuerySuccess(queryFlux, queryValidator); + + String documentCreatedBySprocId = "testDoc"; + CosmosStoredProcedureProperties sproc = new CosmosStoredProcedureProperties( + "{" + + " 'id': '" +UUID.randomUUID().toString() + "'," + + " 'body':'" + + " function() {" + + " var client = getContext().getCollection();" + + " var doc = client.createDocument(client.getSelfLink(), { \\'id\\': \\'" + documentCreatedBySprocId + "\\'}, {}, function(err, docCreated, options) { " + + " if(err) throw new Error(\\'Error while creating document: \\' + err.message);" + + " else {" + + " getContext().getResponse().setBody(1);" + + " }" + + " });" + + "}'" + + "}"); + CosmosStoredProcedure createdSproc = createdContainer.getScripts().createStoredProcedure(sproc).block().storedProcedure(); + + // Partiton Key value same as what is specified in the stored procedure body + RequestOptions options = new RequestOptions(); + options.setPartitionKey(PartitionKey.None); + int result = Integer.parseInt(createdSproc.execute(null, new CosmosStoredProcedureRequestOptions()).block().responseAsString()); + assertThat(result).isEqualTo(1); + + // 3 previous items + 1 created from the sproc + expectedIds.add(documentCreatedBySprocId); + queryFlux = createdContainer.readAllItems(feedOptions); + queryValidator = new FeedResponseListValidator.Builder() + .totalSize(4) + .numberOfPages(1) + .containsExactlyIds(expectedIds) + .build(); + validateQuerySuccess(queryFlux, queryValidator); + + Mono deleteMono = createdContainer.getItem(upsertedItemId, PartitionKey.None).delete(); + validator = new CosmosResponseValidator.Builder() + .nullResource().build(); + validateSuccess(deleteMono, validator); + + deleteMono = createdContainer.getItem(replacedItemId, PartitionKey.None).delete(); + validator = new CosmosResponseValidator.Builder() + .nullResource().build(); + validateSuccess(deleteMono, validator); + + deleteMono = createdContainer.getItem(NON_PARTITIONED_CONTAINER_DOCUEMNT_ID, PartitionKey.None).delete(); + validator = new CosmosResponseValidator.Builder() + .nullResource().build(); + validateSuccess(deleteMono, validator); + + deleteMono = createdContainer.getItem(documentCreatedBySprocId, PartitionKey.None).delete(); + validator = new CosmosResponseValidator.Builder() + .nullResource().build(); + validateSuccess(deleteMono, validator); + + queryFlux = createdContainer.readAllItems(feedOptions); + queryValidator = new FeedResponseListValidator.Builder() + .totalSize(0) + .numberOfPages(1) + .build(); + validateQuerySuccess(queryFlux, queryValidator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT*100) + public void testMultiPartitionCollectionReadDocumentWithNoPk() throws InterruptedException { + String partitionedCollectionId = "PartitionedCollection" + UUID.randomUUID().toString(); + String IdOfDocumentWithNoPk = UUID.randomUUID().toString(); + CosmosContainerProperties containerSettings = new CosmosContainerProperties(partitionedCollectionId, "/mypk"); + CosmosContainer createdContainer = createdDatabase.createContainer(containerSettings).block().container(); + CosmosItemProperties cosmosItemProperties = new CosmosItemProperties(); + cosmosItemProperties.id(IdOfDocumentWithNoPk); + CosmosItem createdItem = createdContainer.createItem(cosmosItemProperties).block().item(); + CosmosItemRequestOptions options = new CosmosItemRequestOptions(); + options.partitionKey(PartitionKey.None); + Mono readMono = createdItem.read(options); + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(IdOfDocumentWithNoPk).build(); + validateSuccess(readMono, validator); + } + +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/CosmosResponseValidator.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/CosmosResponseValidator.java new file mode 100644 index 0000000000000..798e57c1099b2 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/CosmosResponseValidator.java @@ -0,0 +1,281 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map.Entry; + +import static org.assertj.core.api.Assertions.assertThat; + +public interface CosmosResponseValidator { + void validate(T cosmosResponse); + + class Builder { + private List> validators = new ArrayList<>(); + + public CosmosResponseValidator build() { + return new CosmosResponseValidator() { + @SuppressWarnings({"rawtypes", "unchecked"}) + @Override + public void validate(T resourceResponse) { + for (CosmosResponseValidator validator : validators) { + validator.validate(resourceResponse); + } + } + }; + } + + public Builder withId(final String resourceId) { + validators.add(new CosmosResponseValidator() { + + @Override + public void validate(T resourceResponse) { + assertThat(getResource(resourceResponse)).isNotNull(); + assertThat(getResource(resourceResponse).id()).as("check Resource Id").isEqualTo(resourceId); + } + }); + return this; + } + + private Resource getResource(T resourceResponse) { + if (resourceResponse instanceof CosmosDatabaseResponse) { + return ((CosmosDatabaseResponse)resourceResponse).properties(); + } else if (resourceResponse instanceof CosmosContainerResponse) { + return ((CosmosContainerResponse)resourceResponse).properties(); + } else if (resourceResponse instanceof CosmosItemResponse) { + return ((CosmosItemResponse)resourceResponse).properties(); + } else if (resourceResponse instanceof CosmosStoredProcedureResponse) { + return ((CosmosStoredProcedureResponse)resourceResponse).properties(); + } else if (resourceResponse instanceof CosmosTriggerResponse) { + return ((CosmosTriggerResponse)resourceResponse).properties(); + } else if (resourceResponse instanceof CosmosUserDefinedFunctionResponse) { + return ((CosmosUserDefinedFunctionResponse)resourceResponse).properties(); + } else if (resourceResponse instanceof CosmosUserResponse) { + return ((CosmosUserResponse)resourceResponse).properties(); + } else if (resourceResponse instanceof CosmosPermissionResponse) { + return ((CosmosPermissionResponse) resourceResponse).properties(); + } + return null; + } + + public Builder nullResource() { + validators.add(new CosmosResponseValidator() { + + @Override + public void validate(T resourceResponse) { + assertThat(getResource(resourceResponse)).isNull(); + } + }); + return this; + } + + public Builder indexingMode(IndexingMode mode) { + validators.add(new CosmosResponseValidator() { + + @Override + public void validate(CosmosContainerResponse resourceResponse) { + assertThat(resourceResponse.properties()).isNotNull(); + assertThat(resourceResponse.properties().indexingPolicy()).isNotNull(); + assertThat(resourceResponse.properties().indexingPolicy().indexingMode()).isEqualTo(mode); + } + }); + return this; + } + + public Builder withProperty(String propertyName, String value) { + validators.add(new CosmosResponseValidator() { + @Override + public void validate(T cosmosResponse) { + assertThat(getResource(cosmosResponse)).isNotNull(); + assertThat(getResource(cosmosResponse).get(propertyName)).isEqualTo(value); + } + }); + return this; + } + + public Builder withCompositeIndexes(List> compositeIndexesWritten) { + validators.add(new CosmosResponseValidator() { + + @Override + public void validate(CosmosContainerResponse resourceResponse) { + Iterator> compositeIndexesReadIterator = resourceResponse.properties() + .indexingPolicy().compositeIndexes().iterator(); + Iterator> compositeIndexesWrittenIterator = compositeIndexesWritten.iterator(); + + ArrayList readIndexesStrings = new ArrayList(); + ArrayList writtenIndexesStrings = new ArrayList(); + + while (compositeIndexesReadIterator.hasNext() && compositeIndexesWrittenIterator.hasNext()) { + Iterator compositeIndexReadIterator = compositeIndexesReadIterator.next().iterator(); + Iterator compositeIndexWrittenIterator = compositeIndexesWrittenIterator.next().iterator(); + + StringBuilder readIndexesString = new StringBuilder(); + StringBuilder writtenIndexesString = new StringBuilder(); + + while (compositeIndexReadIterator.hasNext() && compositeIndexWrittenIterator.hasNext()) { + CompositePath compositePathRead = compositeIndexReadIterator.next(); + CompositePath compositePathWritten = compositeIndexWrittenIterator.next(); + + readIndexesString.append(compositePathRead.path() + ":" + compositePathRead.order() + ";"); + writtenIndexesString.append(compositePathWritten.path() + ":" + compositePathRead.order() + ";"); + } + + readIndexesStrings.add(readIndexesString.toString()); + writtenIndexesStrings.add(writtenIndexesString.toString()); + } + + assertThat(readIndexesStrings).containsExactlyInAnyOrderElementsOf(writtenIndexesStrings); + } + + }); + return this; + } + + public Builder withSpatialIndexes(Collection spatialIndexes) { + validators.add(new CosmosResponseValidator() { + + @Override + public void validate(CosmosContainerResponse resourceResponse) { + Iterator spatialIndexesReadIterator = resourceResponse.properties() + .indexingPolicy().spatialIndexes().iterator(); + Iterator spatialIndexesWrittenIterator = spatialIndexes.iterator(); + + HashMap> readIndexMap = new HashMap>(); + HashMap> writtenIndexMap = new HashMap>(); + + while (spatialIndexesReadIterator.hasNext() && spatialIndexesWrittenIterator.hasNext()) { + SpatialSpec spatialSpecRead = spatialIndexesReadIterator.next(); + SpatialSpec spatialSpecWritten = spatialIndexesWrittenIterator.next(); + + String readPath = spatialSpecRead.path() + ":"; + String writtenPath = spatialSpecWritten.path() + ":"; + + ArrayList readSpatialTypes = new ArrayList(); + ArrayList writtenSpatialTypes = new ArrayList(); + + Iterator spatialTypesReadIterator = spatialSpecRead.spatialTypes().iterator(); + Iterator spatialTypesWrittenIterator = spatialSpecWritten.spatialTypes().iterator(); + + while (spatialTypesReadIterator.hasNext() && spatialTypesWrittenIterator.hasNext()) { + readSpatialTypes.add(spatialTypesReadIterator.next()); + writtenSpatialTypes.add(spatialTypesWrittenIterator.next()); + } + + readIndexMap.put(readPath, readSpatialTypes); + writtenIndexMap.put(writtenPath, writtenSpatialTypes); + } + + for (Entry> entry : readIndexMap.entrySet()) { + assertThat(entry.getValue()) + .containsExactlyInAnyOrderElementsOf(writtenIndexMap.get(entry.getKey())); + } + } + }); + return this; + } + + public Builder withStoredProcedureBody(String storedProcedureBody) { + validators.add(new CosmosResponseValidator() { + + @Override + public void validate(CosmosStoredProcedureResponse resourceResponse) { + assertThat(resourceResponse.properties().body()).isEqualTo(storedProcedureBody); + } + }); + return this; + } + + public Builder notNullEtag() { + validators.add(new CosmosResponseValidator() { + + @Override + public void validate(T resourceResponse) { + assertThat(resourceResponse.resourceSettings()).isNotNull(); + assertThat(resourceResponse.resourceSettings().etag()).isNotNull(); + } + }); + return this; + } + + public Builder withTriggerBody(String functionBody) { + validators.add(new CosmosResponseValidator() { + + @Override + public void validate(CosmosTriggerResponse resourceResponse) { + assertThat(resourceResponse.properties().body()).isEqualTo(functionBody); + } + }); + return this; + } + + public Builder withTriggerInternals(TriggerType type, TriggerOperation op) { + validators.add(new CosmosResponseValidator() { + + @Override + public void validate(CosmosTriggerResponse resourceResponse) { + assertThat(resourceResponse.properties().triggerType()).isEqualTo(type); + assertThat(resourceResponse.properties().triggerOperation()).isEqualTo(op); + } + }); + return this; + } + + public Builder withUserDefinedFunctionBody(String functionBody) { + validators.add(new CosmosResponseValidator() { + + @Override + public void validate(CosmosUserDefinedFunctionResponse resourceResponse) { + assertThat(resourceResponse.properties().body()).isEqualTo(functionBody); + } + }); + return this; + } + + public Builder withPermissionMode(PermissionMode mode) { + validators.add(new CosmosResponseValidator() { + + @Override + public void validate(CosmosPermissionResponse resourceResponse) { + assertThat(resourceResponse.properties().permissionMode()).isEqualTo(mode); + } + }); + return this; + + } + + public Builder withPermissionResourceLink(String resourceLink) { + validators.add(new CosmosResponseValidator() { + + @Override + public void validate(CosmosPermissionResponse resourceResponse) { + assertThat(resourceResponse.properties().resourceLink()).isEqualTo(resourceLink); + } + }); + return this; + } + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/DocumentClientTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/DocumentClientTest.java new file mode 100644 index 0000000000000..deb7b0166ac96 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/DocumentClientTest.java @@ -0,0 +1,80 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.google.common.base.Strings; +import org.testng.ITest; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; + +import java.lang.reflect.Method; + +public abstract class DocumentClientTest implements ITest { + + private final AsyncDocumentClient.Builder clientBuilder; + private String testName; + + public DocumentClientTest() { + this(new AsyncDocumentClient.Builder()); + } + + public DocumentClientTest(AsyncDocumentClient.Builder clientBuilder) { + this.clientBuilder = clientBuilder; + } + + public final AsyncDocumentClient.Builder clientBuilder() { + return this.clientBuilder; + } + + @Override + public final String getTestName() { + return this.testName; + } + + @BeforeMethod(alwaysRun = true) + public final void setTestName(Method method) { + String testClassAndMethodName = Strings.lenientFormat("%s::%s", + method.getDeclaringClass().getSimpleName(), + method.getName()); + + if (this.clientBuilder.getConnectionPolicy() != null && this.clientBuilder.getConfigs() != null) { + String connectionMode = this.clientBuilder.getConnectionPolicy().connectionMode() == ConnectionMode.DIRECT + ? "Direct " + this.clientBuilder.getConfigs().getProtocol() + : "Gateway"; + + this.testName = Strings.lenientFormat("%s[%s with %s consistency]", + testClassAndMethodName, + connectionMode, + clientBuilder.getDesiredConsistencyLevel()); + } else { + this.testName = testClassAndMethodName; + } + } + + @AfterMethod(alwaysRun = true) + public final void unsetTestName() { + this.testName = null; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/DocumentCollectionTests.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/DocumentCollectionTests.java new file mode 100644 index 0000000000000..da0998b22235d --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/DocumentCollectionTests.java @@ -0,0 +1,77 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.DocumentCollection; +import com.google.common.collect.ImmutableList; +import org.testng.annotations.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +public class DocumentCollectionTests { + + @Test(groups = { "unit" }) + public void getPartitionKey() { + DocumentCollection collection = new DocumentCollection(); + PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); + partitionKeyDefinition.paths(ImmutableList.of("/mypk")); + collection.setPartitionKey(partitionKeyDefinition); + assertThat(collection.getPartitionKey()).isEqualTo(partitionKeyDefinition); + } + + @Test(groups = { "unit" }) + public void getPartitionKey_serializeAndDeserialize() { + DocumentCollection collection = new DocumentCollection(); + PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); + partitionKeyDefinition.paths(ImmutableList.of("/mypk")); + partitionKeyDefinition.version(PartitionKeyDefinitionVersion.V2); + collection.setPartitionKey(partitionKeyDefinition); + + DocumentCollection parsedColl = new DocumentCollection(collection.toJson()); + assertThat(parsedColl.getPartitionKey().kind().toString()).isEqualTo(partitionKeyDefinition.kind().toString()); + assertThat(parsedColl.getPartitionKey().paths()).isEqualTo(partitionKeyDefinition.paths()); + assertThat(parsedColl.getPartitionKey().version()).isEqualTo(partitionKeyDefinition.version()); + } + + @Test(groups = { "unit"}) + public void indexingPolicy_serializeAndDeserialize() { + SpatialSpec spatialSpec = new SpatialSpec(); + List spatialSpecList = new ArrayList<>(); + spatialSpecList.add(spatialSpec); + IndexingPolicy indexingPolicy = new IndexingPolicy(); + indexingPolicy.spatialIndexes(spatialSpecList); + DocumentCollection documentCollection = new DocumentCollection(); + documentCollection.setIndexingPolicy(indexingPolicy); + String json = documentCollection.toJson(); + + DocumentCollection documentCollectionPostSerialization = new DocumentCollection(json); + IndexingPolicy indexingPolicyPostSerialization = documentCollectionPostSerialization.getIndexingPolicy(); + assertThat(indexingPolicyPostSerialization).isNotNull(); + List spatialSpecListPostSerialization = indexingPolicyPostSerialization.spatialIndexes(); + assertThat(spatialSpecListPostSerialization).isNotNull(); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/DocumentTests.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/DocumentTests.java new file mode 100644 index 0000000000000..e336876d67fd9 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/DocumentTests.java @@ -0,0 +1,44 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Document; +import org.testng.annotations.Test; + +import java.time.OffsetDateTime; +import java.time.ZoneOffset; + +import static com.azure.data.cosmos.BridgeInternal.setTimestamp; +import static org.assertj.core.api.Assertions.assertThat; + +public class DocumentTests { + + @Test(groups = { "unit" }) + public void timestamp() { + Document d = new Document(); + OffsetDateTime time = OffsetDateTime.of(2019, 8, 6, 12, 53, 29, 0, ZoneOffset.UTC); + setTimestamp(d, time); + assertThat(d.timestamp()).isEqualTo(time); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/GatewayTestUtils.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/GatewayTestUtils.java new file mode 100644 index 0000000000000..af6900d868dc7 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/GatewayTestUtils.java @@ -0,0 +1,36 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.PartitionKeyRange; + +import java.util.List; + +public class GatewayTestUtils { + + public static PartitionKeyRange setParent(PartitionKeyRange pkr, List parents) { + pkr.setParents(parents); + return pkr; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/IncludedPathTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/IncludedPathTest.java new file mode 100644 index 0000000000000..70ab7d2fe9bed --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/IncludedPathTest.java @@ -0,0 +1,56 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import org.testng.annotations.Test; + +import java.util.Collection; + +import static org.assertj.core.api.Assertions.assertThat; + +public class IncludedPathTest { + + @Test(groups = {"unit"}) + public void deserialize() { + String json = "{" + + " 'path': '\\/*'," + + " 'indexes': [" + + " {" + + " 'kind': 'Range'," + + " 'dataType': 'String'," + + " 'precision': -1" + + " }," + + " {" + + " 'kind': 'Range'," + + " 'dataType': 'Number'," + + " 'precision': -1" + + " }" + + " ]" + + "}"; + IncludedPath path = new IncludedPath(json); + Collection indexes = path.indexes(); + assertThat(indexes).hasSize(2); + assertThat(indexes).usingFieldByFieldElementComparator().contains(Index.Range(DataType.STRING, -1)); + assertThat(indexes).usingFieldByFieldElementComparator().contains(Index.Range(DataType.NUMBER, -1)); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/JsonSerializableTests.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/JsonSerializableTests.java new file mode 100644 index 0000000000000..ed390db320b51 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/JsonSerializableTests.java @@ -0,0 +1,134 @@ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Document; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.core.JsonParseException; +import org.testng.annotations.Test; + +import java.io.Serializable; + +import static com.azure.data.cosmos.BridgeInternal.setProperty; +import static org.assertj.core.api.Assertions.assertThat; +import static org.testng.Assert.fail; + +public class JsonSerializableTests { + + public static class Pojo implements Serializable { + int a; + int b; + + public Pojo(int a, int b) { + this.a = a; + this.b = b; + } + + @JsonCreator + public Pojo(@JsonProperty("a") String a, @JsonProperty("b") String b) { + this.a = Integer.parseInt(a); + this.b = Integer.parseInt(b); + } + + public int getA() { + return a; + } + + public int getB() { + return b; + } + + public void setA(int a) { + this.a = a; + } + + public void setB(int b) { + this.b = b; + } + + } + + public enum enums { + first, second, third + } + + @Test(groups = { "unit" }) + public void getObjectAndCastToClass() { + Document document = new Document(); + // numeric values + setProperty(document, "intValue", Integer.MAX_VALUE); + setProperty(document, "doubleValue", Double.MAX_VALUE); + setProperty(document, "longValue", Long.MAX_VALUE); + + assertThat(document.getObject("intValue", Integer.class).intValue()).isEqualTo(Integer.MAX_VALUE); + assertThat(document.getObject("doubleValue", Double.class).doubleValue()).isEqualTo(Double.MAX_VALUE); + assertThat(document.getObject("longValue", Long.class).longValue()).isEqualTo(Long.MAX_VALUE); + + // string + setProperty(document, "stringValue", "stringField"); + assertThat(document.getObject("stringValue", String.class)).isEqualTo("stringField"); + + // boolean + setProperty(document, "boolValue", true); + assertThat(document.getObject("boolValue", Boolean.class)).isEqualTo(true); + + // enum + setProperty(document, "enumValue", "third"); + assertThat(document.getObject("enumValue", enums.class)).isEqualTo(enums.third); + + // Pojo + Pojo pojo = new Pojo(1, 2); + setProperty(document, "pojoValue", pojo); + Pojo readPojo = document.getObject("pojoValue", Pojo.class); + assertThat(readPojo.getA()).isEqualTo(pojo.getA()); + assertThat(readPojo.getB()).isEqualTo(pojo.getB()); + + // JsonSerializable + Document innerDocument = new Document(); + innerDocument.id("innerDocument"); + setProperty(document, "innerDocument", innerDocument); + Document readInnerDocument = document.getObject("innerDocument", Document.class); + assertThat(readInnerDocument.id()).isEqualTo(innerDocument.id()); + } + + @Test(groups = { "unit" }) + public void objectMapperInvalidJsonNoQuotesForFieldAndValue() { + // INVALID Json - field and value must be quoted + try { + new Document("{ field: value }"); + fail("failure expected"); + } catch (Exception e) { + assertThat(e.getCause() instanceof JsonParseException).isTrue(); + } + } + + @Test(groups = { "unit" }) + public void objectMapperInvalidJsonNoQuotesForField() { + // INVALID Json - field must be quoted + try { + new Document("{ field: 'value' }"); + fail("failure expected"); + } catch (Exception e) { + assertThat(e.getCause() instanceof JsonParseException).isTrue(); + } + } + + @Test(groups = { "unit" }) + public void objectMapperInvalidJsonNoDuplicatesAllowed() { + // INVALID Json - duplicates must not exist in Json string + try { + new Document("{ 'field': 'value1', 'field': 'value2' }"); + fail("failure expected"); + } catch (Exception e) { + assertThat(e.getCause() instanceof JsonParseException).isTrue(); + } + } + + @Test(groups = { "unit" }) + public void objectMapperValidJsonWithSingleQuotesAndTrailingComma() { + Document document = null; + + // Valid Json - Single quotes and trailing commas allowed in Json string + document = new Document("{ 'field1': 'value1', 'field2': 'value2', }"); + assertThat(document.toJson().equals("{\"field1\":\"value1\",\"field2\":\"value2\"}")).isEqualTo(true); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/PartitionKeyHashingTests.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/PartitionKeyHashingTests.java new file mode 100644 index 0000000000000..36b117ce4d249 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/PartitionKeyHashingTests.java @@ -0,0 +1,94 @@ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Undefined; +import com.azure.data.cosmos.internal.routing.PartitionKeyInternalHelper; +import com.fasterxml.jackson.databind.node.NullNode; +import org.testng.annotations.Test; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; + +public class PartitionKeyHashingTests { + + @Test(groups = "unit") + public void effectivePartitionKeyHashV1() { + HashMap keyToEffectivePartitionKeyString = new HashMap() {{ + put("", "05C1CF33970FF80800"); + put("partitionKey", "05C1E1B3D9CD2608716273756A756A706F4C667A00"); + put(new String(new char[1024]).replace("\0", "a"), "05C1EB5921F706086262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626200"); + put(null, "05C1ED45D7475601"); + put(NullNode.getInstance(), "05C1ED45D7475601"); + put(Undefined.Value(), "05C1D529E345DC00"); + put(true, "05C1D7C5A903D803"); + put(false, "05C1DB857D857C02"); + put(Byte.MIN_VALUE, "05C1D73349F54C053FA0"); + put(Byte.MAX_VALUE, "05C1DD539DDFCC05C05FE0"); + put(Long.MIN_VALUE, "05C1DB35F33D1C053C20"); + put(Long.MAX_VALUE, "05C1B799AB2DD005C3E0"); + put(Integer.MIN_VALUE, "05C1DFBF252BCC053E20"); + put(Integer.MAX_VALUE, "05C1E1F503DFB205C1DFFFFFFFFC"); + put(Double.MIN_VALUE, "05C1E5C91F4D3005800101010101010102"); + put(Double.MAX_VALUE, "05C1CBE367C53005FFEFFFFFFFFFFFFFFE"); + }}; + + for (Map.Entry entry : keyToEffectivePartitionKeyString.entrySet()) { + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + partitionKeyDef.kind(PartitionKind.HASH); + partitionKeyDef.paths(Arrays.asList(new String[]{"\\id"})); + String actualEffectiveKeyString = PartitionKeyInternalHelper.getEffectivePartitionKeyString(new PartitionKey(entry.getKey()).getInternalPartitionKey(),partitionKeyDef, true); + assertThat(entry.getValue()).isEqualTo(actualEffectiveKeyString); + } + } + + @Test(groups = "unit") + public void effectivePartitionKeyHashV2() { + HashMap keyToEffectivePartitionKeyString = new HashMap() {{ + put("", "32E9366E637A71B4E710384B2F4970A0"); + put("partitionKey", "013AEFCF77FA271571CF665A58C933F1"); + put(new String(new char[1024]).replace("\0", "a"), "332BDF5512AE49615F32C7D98C2DB86C"); + put(null, "378867E4430E67857ACE5C908374FE16"); + put(NullNode.getInstance(), "378867E4430E67857ACE5C908374FE16"); + put(Undefined.Value(), "11622DAA78F835834610ABE56EFF5CB5"); + put(true, "0E711127C5B5A8E4726AC6DD306A3E59"); + put(false, "2FE1BE91E90A3439635E0E9E37361EF2"); + put(Byte.MIN_VALUE, "01DAEDABF913540367FE219B2AD06148"); + put(Byte.MAX_VALUE, "0C507ACAC853ECA7977BF4CEFB562A25"); + put(Long.MIN_VALUE, "23D5C6395512BDFEAFADAD15328AD2BB"); + put(Long.MAX_VALUE, "2EDB959178DFCCA18983F89384D1629B"); + put(Integer.MIN_VALUE, "0B1660D5233C3171725B30D4A5F4CC1F"); + put(Integer.MAX_VALUE, "2D9349D64712AEB5EB1406E2F0BE2725"); + put(Double.MIN_VALUE, "0E6CBA63A280927DE485DEF865800139"); + put(Double.MAX_VALUE, "31424D996457102634591FF245DBCC4D"); + }}; + + for (Map.Entry entry : keyToEffectivePartitionKeyString.entrySet()) { + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + partitionKeyDef.kind(PartitionKind.HASH); + partitionKeyDef.version(PartitionKeyDefinitionVersion.V2); + partitionKeyDef.paths(Arrays.asList(new String[]{"\\id"})); + String actualEffectiveKeyString = PartitionKeyInternalHelper.getEffectivePartitionKeyString(new PartitionKey(entry.getKey()).getInternalPartitionKey(),partitionKeyDef, true); + assertThat(entry.getValue()).isEqualTo(actualEffectiveKeyString); + } + } + + @Test(groups = "unit") + public void hashV2PartitionKeyDeserialization() { + String partitionKeyDefinitionStr = "{\"paths\":[\"/pk\"],\"kind\":\"Hash\",\"version\":2}"; + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(partitionKeyDefinitionStr); + assertThat(partitionKeyDef.version()).isEqualTo(PartitionKeyDefinitionVersion.V2); + assertThat(partitionKeyDef.kind()).isEqualTo(PartitionKind.HASH); + assertThat(partitionKeyDef.paths().toArray()[0]).isEqualTo("/pk"); + } + + @Test(groups = "unit") + public void hashV1PartitionKeyDeserialization() { + String partitionKeyDefinitionStr = "{\"paths\":[\"/pk\"],\"kind\":\"Hash\"}"; + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(partitionKeyDefinitionStr); + assertThat(partitionKeyDef.version()).isNull(); + assertThat(partitionKeyDef.kind()).isEqualTo(PartitionKind.HASH); + assertThat(partitionKeyDef.paths().toArray()[0]).isEqualTo("/pk"); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/PermissionTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/PermissionTest.java new file mode 100644 index 0000000000000..50eec6fa13355 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/PermissionTest.java @@ -0,0 +1,44 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.Permission; +import org.testng.annotations.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class PermissionTest { + + @Test(groups = {"unit"}) + public void deserialize() { + String json = "{" + + " 'id': 'a98eb026-b66b-4cec-8fb9-9b0e10ddab76'," + + " 'permissionMode': 'read'," + + " 'resource': 'dbs/AQAAAA==/colls/AQAAAJ0fgTc='," + + " 'resourcePartitionKey': ['/id']" + + "}"; + Permission p = new Permission(json); + assertThat(p.getResourcePartitionKey()).isEqualToComparingFieldByField(new PartitionKey("/id")); + assertThat(p.getPermissionMode()).isEqualTo(PermissionMode.READ); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/ResourceIdTests.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/ResourceIdTests.java new file mode 100644 index 0000000000000..c196baf42c6fd --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/ResourceIdTests.java @@ -0,0 +1,112 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos; + +import com.azure.data.cosmos.internal.ResourceId; +import org.apache.commons.lang3.tuple.Pair; +import org.testng.annotations.Test; + +import java.util.HashMap; +import java.util.Map; +import java.util.Random; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ResourceIdTests { + + @Test(groups = { "unit" }) + public void resourceIdTryParsing() { + HashMap cases = new HashMap<>(); + cases.put("testDb", false); + cases.put("db", false); + cases.put("cosmosdb", false); + cases.put("asosfactor", false); + + cases.put("f", false); + cases.put("fo", false); + cases.put("foo", false); + cases.put("foob", true); + cases.put("fooba", false); + cases.put("foobar", false); + cases.put("Zm8=", false); + cases.put("Zm9v", true); + cases.put("Zm9vYg==", true); + cases.put("Zm9vYmE=", false); + cases.put("Zm9vYmFy", false); + + // collection rid + cases.put("1-MxAPlgMgA=", true); + cases.put("nJRwAA==", true); + cases.put("MaZyAA==", true); + cases.put("-qpmAA==", true); + cases.put("wsIRAA==", true); + cases.put("GJwnAA==", true); + + // document rid + cases.put("ClZUAPp9+A0=", true); + + // offer rid + cases.put("-d8Hx", false); + + for (Map.Entry testCase : cases.entrySet()) { + Pair resourcePair = ResourceId.tryParse(testCase.getKey()); + assertThat( resourcePair.getKey()).as(String.format("ResourceId.tryParse failed for '%s'", testCase.getKey())).isEqualTo(testCase.getValue()); + } + } + + private static int randomNextIntForTest(Random rnd, Boolean positive) { + return rnd.nextInt(Integer.MAX_VALUE / 2) + (positive ? Integer.MAX_VALUE / 2 : - Integer.MAX_VALUE / 2); + } + + @Test(groups = { "unit" }) + public void resourceIdParsingRoundTrip() { + Random rnd = new Random(System.currentTimeMillis()); + + ResourceId dbRid = ResourceId.newDatabaseId(randomNextIntForTest(rnd, true)); + ResourceId parsedDbRid = ResourceId.parse(dbRid.toString()); + assertThat(parsedDbRid.getDatabase()).isEqualTo(dbRid.getDatabase()); + + ResourceId collRid = ResourceId.newDocumentCollectionId(dbRid.toString(), randomNextIntForTest(rnd, false)); + ResourceId parsedCollRid = ResourceId.parse(collRid.toString()); + assertThat(parsedCollRid.getDatabase()).isEqualTo(collRid.getDatabase()); + assertThat(parsedCollRid.getDocumentCollection()).isEqualTo(collRid.getDocumentCollection()); + + ResourceId userRid = ResourceId.newUserId(dbRid.toString(), randomNextIntForTest(rnd, true)); + ResourceId parsedUserRid = ResourceId.parse(userRid.toString()); + assertThat(parsedUserRid.getDatabase()).isEqualTo(userRid.getDatabase()); + assertThat(parsedUserRid.getUser()).isEqualTo(userRid.getUser()); + + ResourceId permissionRid = ResourceId.newPermissionId(userRid.toString(), randomNextIntForTest(rnd, false)); + ResourceId parsedPermissionRid = ResourceId.parse(permissionRid.toString()); + assertThat(parsedPermissionRid.getDatabase()).isEqualTo(permissionRid.getDatabase()); + assertThat(parsedPermissionRid.getUser()).isEqualTo(permissionRid.getUser()); + assertThat(parsedPermissionRid.getPermission()).isEqualTo(permissionRid.getPermission()); + + ResourceId attachmentRid = ResourceId.newAttachmentId("wsIRALoBhyQ9AAAAAAAACA==", randomNextIntForTest(rnd, true)); + ResourceId parsedAttachmentRid = ResourceId.parse(attachmentRid.toString()); + assertThat(parsedAttachmentRid.getDatabase()).isEqualTo(attachmentRid.getDatabase()); + assertThat(parsedAttachmentRid.getDocumentCollection()).isEqualTo(attachmentRid.getDocumentCollection()); + assertThat(parsedAttachmentRid.getDocument()).isEqualTo(attachmentRid.getDocument()); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ClientRetryPolicyTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ClientRetryPolicyTest.java new file mode 100644 index 0000000000000..fbbf29a9389e8 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ClientRetryPolicyTest.java @@ -0,0 +1,139 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.RetryOptions; +import io.netty.handler.timeout.ReadTimeoutException; +import io.reactivex.subscribers.TestSubscriber; +import org.mockito.Mockito; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.net.URL; +import java.time.Duration; +import java.util.concurrent.TimeUnit; + +public class ClientRetryPolicyTest { + private final static int TIMEOUT = 10000; + + @Test(groups = "unit") + public void networkFailureOnRead() throws Exception { + RetryOptions retryOptions = new RetryOptions(); + GlobalEndpointManager endpointManager = Mockito.mock(GlobalEndpointManager.class); + Mockito.doReturn(new URL("http://localhost")).when(endpointManager).resolveServiceEndpoint(Mockito.any(RxDocumentServiceRequest.class)); + Mockito.doReturn(Mono.empty()).when(endpointManager).refreshLocationAsync(Mockito.eq(null)); + ClientRetryPolicy clientRetryPolicy = new ClientRetryPolicy(endpointManager, true, retryOptions); + + Exception exception = ReadTimeoutException.INSTANCE; + + RxDocumentServiceRequest dsr = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + dsr.requestContext = Mockito.mock(DocumentServiceRequestContext.class); + + clientRetryPolicy.onBeforeSendRequest(dsr); + + for (int i = 0; i < 10; i++) { + Mono shouldRetry = clientRetryPolicy.shouldRetry(exception); + + validateSuccess(shouldRetry, ShouldRetryValidator.builder() + .nullException() + .shouldRetry(true) + .backOfTime(Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS)) + .build()); + + Mockito.verify(endpointManager, Mockito.times(i + 1)).markEndpointUnavailableForRead(Mockito.any()); + Mockito.verify(endpointManager, Mockito.times(0)).markEndpointUnavailableForWrite(Mockito.any()); + } + } + + @Test(groups = "unit") + public void networkFailureOnWrite() throws Exception { + RetryOptions retryOptions = new RetryOptions(); + GlobalEndpointManager endpointManager = Mockito.mock(GlobalEndpointManager.class); + Mockito.doReturn(new URL("http://localhost")).when(endpointManager).resolveServiceEndpoint(Mockito.any(RxDocumentServiceRequest.class)); + Mockito.doReturn(Mono.empty()).when(endpointManager).refreshLocationAsync(Mockito.eq(null)); + ClientRetryPolicy clientRetryPolicy = new ClientRetryPolicy(endpointManager, true, retryOptions); + + Exception exception = ReadTimeoutException.INSTANCE; + + RxDocumentServiceRequest dsr = RxDocumentServiceRequest.createFromName( + OperationType.Create, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + dsr.requestContext = Mockito.mock(DocumentServiceRequestContext.class); + + clientRetryPolicy.onBeforeSendRequest(dsr); + for (int i = 0; i < 10; i++) { + Mono shouldRetry = clientRetryPolicy.shouldRetry(exception); + validateSuccess(shouldRetry, ShouldRetryValidator.builder() + .nullException() + .shouldRetry(true) + .backOfTime(i > 0 ? Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS) : Duration.ZERO) + .build()); + + Mockito.verify(endpointManager, Mockito.times(0)).markEndpointUnavailableForRead(Mockito.any()); + Mockito.verify(endpointManager, Mockito.times(i + 1)).markEndpointUnavailableForWrite(Mockito.any()); + } + } + + @Test(groups = "unit") + public void onBeforeSendRequestNotInvoked() { + RetryOptions retryOptions = new RetryOptions(); + GlobalEndpointManager endpointManager = Mockito.mock(GlobalEndpointManager.class); + + Mockito.doReturn(Mono.empty()).when(endpointManager).refreshLocationAsync(Mockito.eq(null)); + ClientRetryPolicy clientRetryPolicy = new ClientRetryPolicy(endpointManager, true, retryOptions); + + Exception exception = ReadTimeoutException.INSTANCE; + + RxDocumentServiceRequest dsr = RxDocumentServiceRequest.createFromName( + OperationType.Create, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + dsr.requestContext = Mockito.mock(DocumentServiceRequestContext.class); + + Mono shouldRetry = clientRetryPolicy.shouldRetry(exception); + validateSuccess(shouldRetry, ShouldRetryValidator.builder() + .withException(exception) + .shouldRetry(false) + .build()); + + Mockito.verifyZeroInteractions(endpointManager); + } + + public static void validateSuccess(Mono single, + ShouldRetryValidator validator) { + + validateSuccess(single, validator, TIMEOUT); + } + + public static void validateSuccess(Mono single, + ShouldRetryValidator validator, + long timeout) { + TestSubscriber testSubscriber = new TestSubscriber<>(); + + single.flux().subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertComplete(); + testSubscriber.assertNoErrors(); + testSubscriber.assertValueCount(1); + validator.validate(testSubscriber.values().get(0)); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ConfigsBuilder.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ConfigsBuilder.java new file mode 100644 index 0000000000000..f2eba61d20dda --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ConfigsBuilder.java @@ -0,0 +1,47 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.internal.directconnectivity.Protocol; +import org.mockito.Mockito; + +/** + * This can be used for testing. + */ +public class ConfigsBuilder { + private Configs configs = Mockito.spy(new Configs()); + + public static ConfigsBuilder instance() { + return new ConfigsBuilder(); + } + + public ConfigsBuilder withProtocol(Protocol protocol) { + Mockito.doReturn(protocol).when(configs).getProtocol(); + return this; + } + + public Configs build() { + return configs; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ConfigsTests.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ConfigsTests.java new file mode 100644 index 0000000000000..2e7d95c474d87 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ConfigsTests.java @@ -0,0 +1,56 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.internal.directconnectivity.Protocol; +import org.testng.annotations.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ConfigsTests { + + @Test(groups = { "unit" }) + public void maxHttpHeaderSize() { + Configs config = new Configs(); + assertThat(config.getMaxHttpHeaderSize()).isEqualTo(32 * 1024); + } + + @Test(groups = { "unit" }) + public void maxHttpBodyLength() { + Configs config = new Configs(); + assertThat(config.getMaxHttpBodyLength()).isEqualTo(6 * 1024 * 1024); + } + + @Test(groups = { "unit" }) + public void getProtocol() { + Configs config = new Configs(); + assertThat(config.getProtocol()).isEqualTo(Protocol.valueOf(System.getProperty("cosmos.directModeProtocol", "TCP").toUpperCase())); + } + + @Test(groups = { "unit" }) + public void getDirectHttpsMaxConnectionLimit() { + Configs config = new Configs(); + assertThat(config.getDirectHttpsMaxConnectionLimit()).isEqualTo(Runtime.getRuntime().availableProcessors() * 500); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ConsistencyTests1.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ConsistencyTests1.java new file mode 100644 index 0000000000000..2c2487447be0a --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ConsistencyTests1.java @@ -0,0 +1,297 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.PartitionKind; +import org.testng.SkipException; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ConsistencyTests1 extends ConsistencyTestsBase { + + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateStrongConsistencyOnSyncReplication() throws Exception { + if (!TestConfigurations.CONSISTENCY.equalsIgnoreCase(ConsistencyLevel.STRONG.toString())) { + throw new SkipException("Endpoint does not have strong consistency"); + } + + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.GATEWAY); + this.writeClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.STRONG).build(); + + this.readClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.STRONG).build(); + User userDefinition = getUserDefinition(); + userDefinition.id(userDefinition.id() + "validateStrongConsistencyOnSyncReplication"); + User user = safeCreateUser(this.initClient, createdDatabase.id(), userDefinition); + validateStrongConsistency(user); + } + + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT, enabled = false) + public void validateConsistentLSNForDirectTCPClient() { + //TODO Need to test with TCP protocol + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/355057 + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + this.writeClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .build(); + + this.readClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .build(); + validateConsistentLSN(); + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateConsistentLSNForDirectHttpsClient() { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + this.writeClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .build(); + + this.readClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .build(); + validateConsistentLSN(); + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT, enabled = false) + public void validateConsistentLSNAndQuorumAckedLSNForDirectTCPClient() { + //TODO Need to test with TCP protocol + //https://msdata.visualstudio.com/CosmosDB/_workitems/edit/355057 + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + this.writeClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .build(); + + this.readClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .build(); + validateConsistentLSNAndQuorumAckedLSN(); + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateStrongDynamicQuorum() { + if (!TestConfigurations.CONSISTENCY.equalsIgnoreCase(ConsistencyLevel.STRONG.toString())) { + throw new SkipException("Endpoint does not have strong consistency"); + } + + validateReadQuorum(ConsistencyLevel.STRONG, ResourceType.Document, false); + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateBoundedStalenessDynamicQuorumSyncReplication() { + if (!(TestConfigurations.CONSISTENCY.equalsIgnoreCase(ConsistencyLevel.STRONG.toString()) || TestConfigurations.CONSISTENCY.equalsIgnoreCase(ConsistencyLevel.BOUNDED_STALENESS.toString()))) { + throw new SkipException("Endpoint does not have strong consistency"); + } + + validateReadQuorum(ConsistencyLevel.BOUNDED_STALENESS, ResourceType.Document, true); + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateConsistentLSNAndQuorumAckedLSNForDirectHttpsClient() { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + this.writeClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .build(); + + this.readClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .build(); + validateConsistentLSNAndQuorumAckedLSN(); + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateStrongConsistencyOnAsyncReplicationGW() throws InterruptedException { + validateStrongConsistencyOnAsyncReplication(true); + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateStrongConsistencyOnAsyncReplicationDirect() throws InterruptedException { + validateStrongConsistencyOnAsyncReplication(false); + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateSessionContainerAfterCollectionCreateReplace() { + //TODO Need to test with TCP protocol + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/355057 + //validateSessionContainerAfterCollectionCreateReplace(false, Protocol.TCP); + validateSessionContainerAfterCollectionCreateReplace(false); + validateSessionContainerAfterCollectionCreateReplace(true); + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateConsistentPrefixOnSyncReplication() throws InterruptedException { + if (!(TestConfigurations.CONSISTENCY.equalsIgnoreCase(ConsistencyLevel.STRONG.toString()) || TestConfigurations.CONSISTENCY.equalsIgnoreCase(ConsistencyLevel.BOUNDED_STALENESS.toString()))) { + throw new SkipException("Endpoint does not have strong consistency"); + } + + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.GATEWAY); + this.writeClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.BOUNDED_STALENESS).build(); + + this.readClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.BOUNDED_STALENESS).build(); + User user = safeCreateUser(this.initClient, createdDatabase.id(), getUserDefinition()); + boolean readLagging = validateConsistentPrefix(user); + assertThat(readLagging).isFalse(); + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateConsistentPrefixOnAsyncReplication() throws InterruptedException { + if (!(TestConfigurations.CONSISTENCY.equalsIgnoreCase(ConsistencyLevel.STRONG.toString()) || TestConfigurations.CONSISTENCY.equalsIgnoreCase(ConsistencyLevel.BOUNDED_STALENESS.toString()))) { + throw new SkipException("Endpoint does not have strong consistency"); + } + + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + this.writeClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.BOUNDED_STALENESS) + .build(); + + this.readClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.BOUNDED_STALENESS) + .build(); + Document documentDefinition = getDocumentDefinition(); + Document document = createDocument(this.initClient, createdDatabase.id(), createdCollection.id(), documentDefinition); + boolean readLagging = validateConsistentPrefix(document); + //assertThat(readLagging).isTrue(); //Will fail if batch repl is turned off + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT, enabled = false) + public void validateConsistentPrefixWithReplicaRestartWithPause() { + //TODO this need to complete once we implement emulator container in java, and the we can do operation + // like pause, resume, stop, recycle on it needed for this test. + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/355053 + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT, enabled = false) + public void validateConsistentPrefixWithReplicaRestart() { + //TODO this need to complete once we implement emulator container in java, and the we can do operation + // like pause, resume, stop, recycle on it needed for this test. + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/355053 + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateSubstatusCodeOnNotFoundExceptionInSessionReadAsync() { + validateSubstatusCodeOnNotFoundExceptionInSessionReadAsync(false); + validateSubstatusCodeOnNotFoundExceptionInSessionReadAsync(true); + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT, enabled = false) + public void validateBarrierStrongConsistencyForMasterResources() { + //TODO this need to complete once we implement emulator container in java, and the we can do operation + // like pause, resume, stop, recycle on it needed for this test. + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/355053 + } + + private void validateSubstatusCodeOnNotFoundExceptionInSessionReadAsync(boolean useGateway) { + + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + if (useGateway) { + connectionPolicy.connectionMode(ConnectionMode.GATEWAY); + } else { + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + } + AsyncDocumentClient client = new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .build(); + try { + DocumentCollection documentCollection = new DocumentCollection(); + documentCollection.id(UUID.randomUUID().toString()); + PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); + partitionKeyDefinition.kind(PartitionKind.HASH); + ArrayList paths = new ArrayList(); + paths.add("/id"); + partitionKeyDefinition.paths(paths); + documentCollection.setPartitionKey(partitionKeyDefinition); + + DocumentCollection collection = client.createCollection(createdDatabase.selfLink(), documentCollection + , null).blockFirst().getResource(); + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setPartitionKey(new PartitionKey("1")); + + Document documentDefinition = new Document(); + documentDefinition.id("1"); + Document document = client.createDocument(collection.selfLink(), documentDefinition, requestOptions, false).blockFirst().getResource(); + + Flux> deleteObservable = client.deleteDocument(document.selfLink(), requestOptions); + ResourceResponseValidator validator = new ResourceResponseValidator.Builder() + .nullResource().build(); + validateSuccess(deleteObservable, validator); + Flux> readObservable = client.readDocument(document.selfLink(), requestOptions); + FailureValidator notFoundValidator = new FailureValidator.Builder().resourceNotFound().unknownSubStatusCode().build(); + validateFailure(readObservable, notFoundValidator); + + } finally { + safeClose(client); + } + } + + private static User getUserDefinition() { + User user = new User(); + user.id(USER_NAME); + return user; + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ConsistencyTests2.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ConsistencyTests2.java new file mode 100644 index 0000000000000..9a58d4b056795 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ConsistencyTests2.java @@ -0,0 +1,298 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.internal.directconnectivity.WFConstants; +import com.azure.data.cosmos.internal.routing.PartitionKeyInternal; +import org.apache.commons.lang3.Range; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ConsistencyTests2 extends ConsistencyTestsBase { + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateReadSessionOnAsyncReplication() throws InterruptedException { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.GATEWAY); + this.writeClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION).build(); + + this.readClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION).build(); + + Document document = this.initClient.createDocument(createdCollection.selfLink(), getDocumentDefinition(), + null, false).blockFirst().getResource(); + Thread.sleep(5000);//WaitForServerReplication + boolean readLagging = this.validateReadSession(document); + //assertThat(readLagging).isTrue(); //Will fail if batch repl is turned off + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateWriteSessionOnAsyncReplication() throws InterruptedException { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.GATEWAY); + this.writeClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION).build(); + + this.readClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION).build(); + + Document document = this.initClient.createDocument(createdCollection.selfLink(), getDocumentDefinition(), + null, false).blockFirst().getResource(); + Thread.sleep(5000);//WaitForServerReplication + boolean readLagging = this.validateWriteSession(document); + //assertThat(readLagging).isTrue(); //Will fail if batch repl is turned off + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT, enabled = false) + public void validateEventualConsistencyOnAsyncReplicationDirect() { + //TODO this need to complete once we implement emulator container in java, and the we can do operation + // like pause, resume, stop, recycle on it needed for this test. + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/355053 + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT, enabled = false) + public void validateEventualConsistencyOnAsyncReplicationGateway() { + //TODO this need to complete once we implement emulator container in java, and the we can do operation + // like pause, resume, stop, recycle on it needed for this test. + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/355053 + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateSessionContainerAfterCollectionDeletion() throws Exception { + //TODO Need to test with TCP protocol + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/355057 + // Verify the collection deletion will trigger the session token clean up (even for different client) + //this.ValidateSessionContainerAfterCollectionDeletion(true, Protocol.TCP); + this.validateSessionContainerAfterCollectionDeletion(true); + this.validateSessionContainerAfterCollectionDeletion(false); + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT, enabled = false) + public void validateReadDistributionForSessionReads() { + // .NET uses lock method which is eventfully using LastReadAddress only for the test case to pass, we are not implementing this in java + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateSessionTokenWithPreConditionFailure() throws Exception { + //TODO Need to test with TCP protocol + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/355057 + //this.validateSessionTokenWithPreConditionFailure(false, Protocol.TCP); + this.validateSessionTokenWithPreConditionFailure(false); + this.validateSessionTokenWithPreConditionFailure(true); + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateSessionTokenWithDocumentNotFound() throws Exception { + //TODO Need to test with TCP protocol + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/355057 + //this.validateSessionTokenWithDocumentNotFoundException(false, Protocol.TCP); + this.validateSessionTokenWithDocumentNotFoundException(false); + this.validateSessionTokenWithDocumentNotFoundException(true); + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateSessionTokenWithExpectedException() throws Exception { + //TODO Need to test with TCP protocol + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/355057 + //this.validateSessionTokenWithExpectedException(false, Protocol.TCP); + this.validateSessionTokenWithExpectedException(false); + this.validateSessionTokenWithExpectedException(true); + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateSessionTokenWithConflictException() { + //TODO Need to test with TCP protocol + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/355057 + //this.validateSessionTokenWithConflictException(false, Protocol.TCP); + this.validateSessionTokenWithConflictException(false); + this.validateSessionTokenWithConflictException(true); + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateSessionTokenMultiPartitionCollection() throws Exception { + //TODO Need to test with TCP protocol + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/355057 + //this.validateSessionTokenMultiPartitionCollection(false, Protocol.TCP); + this.validateSessionTokenMultiPartitionCollection(false); + this.validateSessionTokenMultiPartitionCollection(true); + } + + @Test(groups = {"direct"}, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateSessionTokenFromCollectionReplaceIsServerToken() { + //TODO Need to test with TCP protocol + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/355057 + //this.validateSessionTokenFromCollectionReplaceIsServerToken(false, Protocol.TCP); + this.validateSessionTokenFromCollectionReplaceIsServerToken(false); + this.validateSessionTokenFromCollectionReplaceIsServerToken(true); + } + + //TODO ReadFeed is broken, will enable the test case once it get fixed + //https://msdata.visualstudio.com/CosmosDB/_workitems/edit/358715 + @Test(groups = {"direct"}, enabled = false, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateNoChargeOnFailedSessionRead() throws Exception { + // DIRECT clients for read and write operations + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + RxDocumentClientImpl writeClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .build(); + // Client locked to replica for pause/resume + RxDocumentClientImpl readSecondaryClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .build(); + try { + // CREATE collection + DocumentCollection parentResource = writeClient.createCollection(createdDatabase.selfLink(), + getCollectionDefinition(), null).blockFirst().getResource(); + + // Document to lock pause/resume clients + Document documentDefinition = getDocumentDefinition(); + documentDefinition.id("test" + documentDefinition.id()); + ResourceResponse childResource = writeClient.createDocument(parentResource.selfLink(), documentDefinition, null, true).blockFirst(); + logger.info("Created {} child resource", childResource.getResource().resourceId()); + + String token = childResource.getSessionToken().split(":")[0] + ":" + this.createSessionToken(SessionTokenHelper.parse(childResource.getSessionToken()), 100000000).convertToString(); + + FeedOptions feedOptions = new FeedOptions(); + feedOptions.partitionKey(new PartitionKey(PartitionKeyInternal.Empty.toJson())); + feedOptions.sessionToken(token); + FailureValidator validator = new FailureValidator.Builder().statusCode(HttpConstants.StatusCodes.NOTFOUND).subStatusCode(HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE).build(); + Flux> feedObservable = readSecondaryClient.readDocuments(parentResource.selfLink(), feedOptions); + validateQueryFailure(feedObservable, validator); + } finally { + safeClose(writeClient); + safeClose(readSecondaryClient); + } + } + + @Test(groups = {"direct"}, enabled = false, timeOut = CONSISTENCY_TEST_TIMEOUT) + public void validateStrongReadOnOldDocument() { + //TODO this need to complete once we implement emulator container in java, and the we can do operation + // like pause, resume, stop, recycle on it needed for this test. + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/355053 + } + + // TODO: DANOBLE: Investigate DIRECT TCP performance issue + // Note that we need multiple CONSISTENCY_TEST_TIMEOUT + // SEE: https://msdata.visualstudio.com/CosmosDB/_workitems/edit/367028https://msdata.visualstudio.com/CosmosDB/_workitems/edit/367028 + + @Test(groups = {"direct"}, timeOut = 4 * CONSISTENCY_TEST_TIMEOUT) + public void validateSessionTokenAsync() { + // Validate that document query never fails + // with NotFoundException + List documents = new ArrayList<>(); + for (int i = 0; i < 1000; i++) { + Document documentDefinition = getDocumentDefinition(); + BridgeInternal.setProperty(documentDefinition, UUID.randomUUID().toString(), UUID.randomUUID().toString()); + documents.add(documentDefinition); + } + + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + RxDocumentClientImpl client = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .build(); + + try { + Document lastDocument = client.createDocument(createdCollection.selfLink(), getDocumentDefinition(), + null, true) + .blockFirst() + .getResource(); + + Mono task1 = ParallelAsync.forEachAsync(Range.between(0, 1000), 5, index -> client.createDocument(createdCollection.selfLink(), documents.get(index % documents.size()), + null, true) + .blockFirst()); + + Mono task2 = ParallelAsync.forEachAsync(Range.between(0, 1000), 5, index -> { + try { + FeedOptions feedOptions = new FeedOptions(); + feedOptions.enableCrossPartitionQuery(true); + FeedResponse queryResponse = client.queryDocuments(createdCollection.selfLink(), + "SELECT * FROM c WHERE c.Id = " + + "'foo'", feedOptions) + .blockFirst(); + String lsnHeaderValue = queryResponse.responseHeaders().get(WFConstants.BackendHeaders.LSN); + long lsn = Long.valueOf(lsnHeaderValue); + String sessionTokenHeaderValue = queryResponse.responseHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); + ISessionToken sessionToken = SessionTokenHelper.parse(sessionTokenHeaderValue); + logger.info("SESSION Token = {}, LSN = {}", sessionToken.convertToString(), lsn); + assertThat(lsn).isEqualTo(sessionToken.getLSN()); + } catch (Exception ex) { + CosmosClientException clientException = (CosmosClientException) ex.getCause(); + if (clientException.statusCode() != 0) { + if (clientException.statusCode() == HttpConstants.StatusCodes.REQUEST_TIMEOUT) { + // ignore + } else if (clientException.statusCode() == HttpConstants.StatusCodes.NOTFOUND) { + String lsnHeaderValue = clientException.responseHeaders().get(WFConstants.BackendHeaders.LSN); + long lsn = Long.valueOf(lsnHeaderValue); + String sessionTokenHeaderValue = clientException.responseHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); + ISessionToken sessionToken = SessionTokenHelper.parse(sessionTokenHeaderValue); + + logger.info("SESSION Token = {}, LSN = {}", sessionToken.convertToString(), lsn); + assertThat(lsn).isEqualTo(sessionToken.getLSN()); + } else { + throw ex; + } + } else { + throw ex; + } + } + }); + Mono.whenDelayError(task1, task2).block(); + } finally { + safeClose(client); + } + } + +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ConsistencyTestsBase.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ConsistencyTestsBase.java new file mode 100644 index 0000000000000..7dbdddea5586b --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ConsistencyTestsBase.java @@ -0,0 +1,869 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.AccessCondition; +import com.azure.data.cosmos.AccessConditionType; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.PartitionKind; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.internal.directconnectivity.WFConstants; +import com.azure.data.cosmos.internal.routing.PartitionKeyInternalHelper; +import com.azure.data.cosmos.internal.routing.Range; +import org.apache.commons.collections4.map.UnmodifiableMap; +import org.apache.commons.lang3.StringUtils; +import org.testng.SkipException; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import reactor.core.publisher.Flux; + +import java.lang.reflect.Constructor; +import java.lang.reflect.Field; +import java.time.OffsetDateTime; +import java.util.Arrays; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ConsistencyTestsBase extends TestSuiteBase { + static final int CONSISTENCY_TEST_TIMEOUT = 120000; + static final String USER_NAME = "TestUser"; + RxDocumentClientImpl writeClient; + RxDocumentClientImpl readClient; + AsyncDocumentClient initClient; + Database createdDatabase; + DocumentCollection createdCollection; + + @BeforeClass(groups = {"direct"}, timeOut = SETUP_TIMEOUT) + public void beforeClass() throws Exception { + initClient = createGatewayRxDocumentClient().build(); + createdDatabase = SHARED_DATABASE; + createdCollection = SHARED_MULTI_PARTITION_COLLECTION; + } + + void validateStrongConsistency(Resource resourceToWorkWith) throws Exception { + int numberOfTestIteration = 5; + Resource writeResource = resourceToWorkWith; + while (numberOfTestIteration-- > 0) //Write from a client and do point read through second client and ensure TS matches. + { + OffsetDateTime sourceTimestamp = writeResource.timestamp(); + Thread.sleep(1000); //Timestamp is in granularity of seconds. + Resource updatedResource = null; + if (resourceToWorkWith instanceof User) { + updatedResource = this.writeClient.upsertUser(createdDatabase.selfLink(), (User) writeResource, null).blockFirst().getResource(); + } else if (resourceToWorkWith instanceof Document) { + RequestOptions options = new RequestOptions(); + options.setPartitionKey(new PartitionKey(resourceToWorkWith.get("mypk"))); + updatedResource = this.writeClient.upsertDocument(createdCollection.selfLink(), (Document) writeResource, options, false).blockFirst().getResource(); + } + assertThat(updatedResource.timestamp().isAfter(sourceTimestamp)).isTrue(); + + User readResource = this.readClient.readUser(resourceToWorkWith.selfLink(), null).blockFirst().getResource(); + assertThat(updatedResource.timestamp().equals(readResource.timestamp())); + } + } + + void validateConsistentLSN() { + Document documentDefinition = getDocumentDefinition(); + RequestOptions options = new RequestOptions(); + options.setPartitionKey(new PartitionKey(documentDefinition.get("mypk"))); + Document document = createDocument(this.writeClient, createdDatabase.id(), createdCollection.id(), documentDefinition); + ResourceResponse response = this.writeClient.deleteDocument(document.selfLink(), options).single().block(); + assertThat(response.getStatusCode()).isEqualTo(204); + + long quorumAckedLSN = Long.parseLong((String) response.getResponseHeaders().get(WFConstants.BackendHeaders.QUORUM_ACKED_LSN)); + assertThat(quorumAckedLSN > 0).isTrue(); + FailureValidator validator = new FailureValidator.Builder().statusCode(404).lsnGreaterThan(quorumAckedLSN).build(); + Flux> readObservable = this.readClient.readDocument(document.selfLink(), options); + validateFailure(readObservable, validator); + } + + void validateConsistentLSNAndQuorumAckedLSN() { + Document documentDefinition = getDocumentDefinition(); + RequestOptions options = new RequestOptions(); + options.setPartitionKey(new PartitionKey(documentDefinition.get("mypk"))); + Document document = createDocument(this.writeClient, createdDatabase.id(), createdCollection.id(), documentDefinition); + ResourceResponse response = this.writeClient.deleteDocument(document.selfLink(), options).single().block(); + assertThat(response.getStatusCode()).isEqualTo(204); + + long quorumAckedLSN = Long.parseLong((String) response.getResponseHeaders().get(WFConstants.BackendHeaders.QUORUM_ACKED_LSN)); + assertThat(quorumAckedLSN > 0).isTrue(); + + FailureValidator validator = new FailureValidator.Builder().statusCode(404).lsnGreaterThanEqualsTo(quorumAckedLSN).exceptionQuorumAckedLSNInNotNull().build(); + Flux> readObservable = this.readClient.deleteDocument(document.selfLink(), options); + validateFailure(readObservable, validator); + + } + + void validateReadQuorum(ConsistencyLevel consistencyLevel, ResourceType childResourceType, boolean isBoundedStaleness) { + //TODO this need to complete once we implement emulator container in java, and then we can do operation + // like pause, resume, stop, recycle on it needed for this test. + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/355053 + } + + void validateStrongConsistencyOnAsyncReplication(boolean useGateway) throws InterruptedException { + if (!TestConfigurations.CONSISTENCY.equalsIgnoreCase(ConsistencyLevel.STRONG.toString())) { + throw new SkipException("Endpoint does not have strong consistency"); + } + + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + if (useGateway) { + connectionPolicy.connectionMode(ConnectionMode.GATEWAY); + } + + this.writeClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.STRONG).build(); + + this.readClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.STRONG).build(); + + Document documentDefinition = getDocumentDefinition(); + Document document = createDocument(this.writeClient, createdDatabase.id(), createdCollection.id(), documentDefinition); + validateStrongConsistency(document); + } + + void validateStrongConsistency(Document documentToWorkWith) throws InterruptedException { + int numberOfTestIteration = 5; + Document writeDocument = documentToWorkWith; + while (numberOfTestIteration-- > 0) { + OffsetDateTime sourceTimestamp = writeDocument.timestamp(); + Thread.sleep(1000);//Timestamp is in granularity of seconds. + RequestOptions options = new RequestOptions(); + options.setPartitionKey(new PartitionKey(documentToWorkWith.get("mypk"))); + Document updatedDocument = this.writeClient.replaceDocument(writeDocument, options).blockFirst().getResource(); + assertThat(updatedDocument.timestamp().isAfter(sourceTimestamp)).isTrue(); + + Document readDocument = this.readClient.readDocument(documentToWorkWith.selfLink(), options).blockFirst().getResource(); + assertThat(updatedDocument.timestamp().equals(readDocument.timestamp())); + } + } + + void validateSessionContainerAfterCollectionCreateReplace(boolean useGateway) { + // DIRECT clients for read and write operations + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + if (useGateway) { + connectionPolicy.connectionMode(ConnectionMode.GATEWAY); + } else { + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + } + + RxDocumentClientImpl writeClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION).build(); + + try { + PartitionKeyDefinition partitionKey = new PartitionKeyDefinition(); + partitionKey.paths(Arrays.asList("/customerid")); + partitionKey.kind(PartitionKind.HASH); + DocumentCollection coll = null; + { + // self link + ResourceResponse collection = writeClient.createCollection(createdDatabase.selfLink(), getCollectionDefinition(), null).blockFirst(); + String globalSessionToken1 = ((SessionContainer) writeClient.getSession()).getSessionToken(collection.getResource().selfLink()); + String globalSessionToken2 = ((SessionContainer) writeClient.getSession()).getSessionToken(BridgeInternal.getAltLink(collection.getResource())); + System.out.println("BridgeInternal.getAltLink(collection.getResource()) " + BridgeInternal.getAltLink(collection.getResource())); + assertThat(collection.getSessionToken()).isEqualTo(globalSessionToken1); + assertThat(collection.getSessionToken()).isEqualTo(globalSessionToken2); + + coll = collection.getResource(); + ResourceResponse collectionRead = writeClient.readCollection(coll.selfLink(), null).blockFirst(); + // timesync might bump the version, comment out the check + //assertThat(collection.sessionToken()).isEqualTo(collectionRead.sessionToken()); + } + { + // name link + ResourceResponse collection = writeClient.createCollection(BridgeInternal.getAltLink(createdDatabase), getCollectionDefinition(), null).blockFirst(); + + String globalSessionToken1 = ((SessionContainer) writeClient.getSession()).getSessionToken(collection.getResource().selfLink()); + String globalSessionToken2 = ((SessionContainer) writeClient.getSession()).getSessionToken(BridgeInternal.getAltLink(collection.getResource())); + assertThat(collection.getSessionToken()).isEqualTo(globalSessionToken1); + //assertThat(collection.sessionToken()).isEqualTo(globalSessionToken2); + + ResourceResponse collectionRead = + writeClient.readCollection(BridgeInternal.getAltLink(collection.getResource()), null).blockFirst(); + // timesync might bump the version, comment out the check + //assertThat(collection.sessionToken()).isEqualTo(collectionRead.sessionToken()); + } + { + Document document2 = new Document(); + document2.id("test" + UUID.randomUUID().toString()); + BridgeInternal.setProperty(document2, "customerid", 2); + // name link + ResourceResponse document = writeClient.createDocument(BridgeInternal.getAltLink(coll), + document2, null, false) + .blockFirst(); + String globalSessionToken1 = ((SessionContainer) writeClient.getSession()).getSessionToken(coll.selfLink()); + String globalSessionToken2 = ((SessionContainer) writeClient.getSession()).getSessionToken(BridgeInternal.getAltLink(coll)); + + assertThat(globalSessionToken1.indexOf(document.getSessionToken())).isNotNegative(); + assertThat(globalSessionToken2.indexOf(document.getSessionToken())).isNotNegative(); + } + { + Document document2 = new Document(); + document2.id("test" + UUID.randomUUID().toString()); + BridgeInternal.setProperty(document2, "customerid", 3); + // name link + ResourceResponse document = writeClient.createDocument(BridgeInternal.getAltLink(coll), + document2, null, false) + .blockFirst(); + String globalSessionToken1 = ((SessionContainer) writeClient.getSession()).getSessionToken(coll.selfLink()); + String globalSessionToken2 = ((SessionContainer) writeClient.getSession()).getSessionToken(BridgeInternal.getAltLink(coll)); + + assertThat(globalSessionToken1.indexOf(document.getSessionToken())).isNotNegative(); + assertThat(globalSessionToken2.indexOf(document.getSessionToken())).isNotNegative(); + } + } finally { + safeClose(writeClient); + } + } + + boolean validateConsistentPrefix(Resource resourceToWorkWith) throws InterruptedException { + int numberOfTestIteration = 5; + OffsetDateTime lastReadDateTime = resourceToWorkWith.timestamp(); + boolean readLagging = false; + Resource writeResource = resourceToWorkWith; + + while (numberOfTestIteration-- > 0) { //Write from a client and do point read through second client and ensure TS monotonically increases. + OffsetDateTime sourceTimestamp = writeResource.timestamp(); + Thread.sleep(1000); //Timestamp is in granularity of seconds. + Resource updatedResource = null; + if (resourceToWorkWith instanceof User) { + updatedResource = this.writeClient.upsertUser(createdDatabase.selfLink(), (User) writeResource, + null) + .blockFirst() + .getResource(); + } else if (resourceToWorkWith instanceof Document) { + updatedResource = this.writeClient.upsertDocument(createdCollection.selfLink(), + (Document) writeResource, null, false) + .blockFirst() + .getResource(); + } + assertThat(updatedResource.timestamp().isAfter(sourceTimestamp)).isTrue(); + writeResource = updatedResource; + + Resource readResource = null; + if (resourceToWorkWith instanceof User) { + readResource = this.readClient.readUser(resourceToWorkWith.selfLink(), null) + .blockFirst() + .getResource(); + } else if (resourceToWorkWith instanceof Document) { + RequestOptions options = new RequestOptions(); + options.setPartitionKey(new PartitionKey(resourceToWorkWith.get("mypk"))); + readResource = this.readClient.readDocument(resourceToWorkWith.selfLink(), options) + .blockFirst() + .getResource(); + } + assertThat(readResource.timestamp().compareTo(lastReadDateTime) >= 0).isTrue(); + lastReadDateTime = readResource.timestamp(); + if (readResource.timestamp().isBefore(updatedResource.timestamp())) { + readLagging = true; + } + } + return readLagging; + } + + boolean validateReadSession(Resource resourceToWorkWith) throws InterruptedException { + int numberOfTestIteration = 5; + OffsetDateTime lastReadDateTime = OffsetDateTime.MIN; + boolean readLagging = false; + Resource writeResource = resourceToWorkWith; + + while (numberOfTestIteration-- > 0) { + OffsetDateTime sourceTimestamp = writeResource.timestamp(); + Thread.sleep(1000); + Resource updatedResource = null; + if (resourceToWorkWith instanceof Document) { + updatedResource = this.writeClient.upsertDocument(createdCollection.selfLink(), writeResource, + null, false) + .single() + .block() + .getResource(); + } + assertThat(updatedResource.timestamp().isAfter(sourceTimestamp)).isTrue(); + writeResource = updatedResource; + + Resource readResource = null; + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setPartitionKey(new PartitionKey(resourceToWorkWith.get("mypk"))); + if (resourceToWorkWith instanceof Document) { + readResource = this.readClient.readDocument(resourceToWorkWith.selfLink(), requestOptions).blockFirst().getResource(); + } + assertThat(readResource.timestamp().compareTo(lastReadDateTime) >= 0).isTrue(); + lastReadDateTime = readResource.timestamp(); + + if (readResource.timestamp().isBefore(updatedResource.timestamp())) { + readLagging = true; + } + } + return readLagging; + } + + boolean validateWriteSession(Resource resourceToWorkWith) throws InterruptedException { + int numberOfTestIteration = 5; + OffsetDateTime lastReadDateTime = OffsetDateTime.MIN; + boolean readLagging = false; + Resource writeResource = resourceToWorkWith; + + while (numberOfTestIteration-- > 0) { + OffsetDateTime sourceTimestamp = writeResource.timestamp(); + Thread.sleep(1000); + Resource updatedResource = null; + if (resourceToWorkWith instanceof Document) { + updatedResource = this.writeClient.upsertDocument(createdCollection.selfLink(), writeResource, null, false).single().block().getResource(); + } + assertThat(updatedResource.timestamp().isAfter(sourceTimestamp)).isTrue(); + writeResource = updatedResource; + + Resource readResource = null; + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setPartitionKey(new PartitionKey(resourceToWorkWith.get("mypk"))); + if (resourceToWorkWith instanceof Document) { + readResource = + this.readClient.readDocument(resourceToWorkWith.selfLink(), requestOptions) + .blockFirst() + .getResource(); + } + assertThat(readResource.timestamp().compareTo(lastReadDateTime) >= 0).isTrue(); + lastReadDateTime = readResource.timestamp(); + + if (readResource.timestamp().isBefore(updatedResource.timestamp())) { + readLagging = true; + } + + //Now perform write on session and update our session token and lastReadTS + Thread.sleep(1000); + if (resourceToWorkWith instanceof Document) { + readResource = this.writeClient.upsertDocument(createdCollection.selfLink(), readResource, + requestOptions, false) + .blockFirst() + .getResource(); + //Now perform write on session + } + assertThat(readResource.timestamp().isAfter(lastReadDateTime)); + + this.readClient.setSession(this.writeClient.getSession()); + } + return readLagging; + } + + void validateSessionContainerAfterCollectionDeletion(boolean useGateway) throws Exception { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + if (useGateway) { + connectionPolicy.connectionMode(ConnectionMode.GATEWAY); + } else { + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + } + RxDocumentClientImpl client1 = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .build(); + RxDocumentClientImpl client2 = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .build(); + + String collectionId = UUID.randomUUID().toString(); + try { + DocumentCollection collectionDefinition = getCollectionDefinition(); + collectionDefinition.id(collectionId); + DocumentCollection collection = createCollection(client2, createdDatabase.id(), collectionDefinition, null); + ResourceResponseValidator successValidatorCollection = new ResourceResponseValidator.Builder() + .withId(collection.id()) + .build(); + Flux> readObservableCollection = client2.readCollection(collection.selfLink(), null); + validateSuccess(readObservableCollection, successValidatorCollection); + + for (int i = 0; i < 5; i++) { + String documentId = "Generation1-" + i; + Document documentDefinition = getDocumentDefinition(); + documentDefinition.id(documentId); + Document documentCreated = client2.createDocument(collection.selfLink(), documentDefinition, null, true).blockFirst().getResource(); + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setPartitionKey(new PartitionKey(documentCreated.get("mypk"))); + client2.readDocument(BridgeInternal.getAltLink(documentCreated), requestOptions).blockFirst(); + client2.readDocument(documentCreated.selfLink(), requestOptions).blockFirst(); + } + + { + // just create the second for fun + DocumentCollection collection2 = createCollection(client2, createdDatabase.id(), getCollectionDefinition(), null); + successValidatorCollection = new ResourceResponseValidator.Builder() + .withId(collection2.id()) + .build(); + readObservableCollection = client2.readCollection(collection2.selfLink(), null); + validateSuccess(readObservableCollection, successValidatorCollection); + } + // verify the client2 has the same token. + { + String token1 = ((SessionContainer) client2.getSession()).getSessionToken(BridgeInternal.getAltLink(collection)); + String token2 = ((SessionContainer) client2.getSession()).getSessionToken(collection.selfLink()); + assertThat(token1).isEqualTo(token2); + } + + // now delete collection use different client + client1.deleteCollection(collection.selfLink(), null).blockFirst(); + + DocumentCollection collectionRandom1 = createCollection(client2, createdDatabase.id(), getCollectionDefinition()); + DocumentCollection documentCollection = getCollectionDefinition(); + collectionDefinition.id(collectionId); + DocumentCollection collectionSameName = createCollection(client2, createdDatabase.id(), collectionDefinition); + String documentId1 = "Generation2-" + 0; + Document databaseDefinition2 = getDocumentDefinition(); + databaseDefinition2.id(documentId1); + Document createdDocument = client1.createDocument(collectionSameName.selfLink(), databaseDefinition2, null, true).blockFirst().getResource(); + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setPartitionKey(new PartitionKey(createdDocument.get("mypk"))); + ResourceResponseValidator successValidator = new ResourceResponseValidator.Builder() + .withId(createdDocument.id()) + .build(); + Flux> readObservable = client1.readDocument(createdDocument.selfLink(), requestOptions); + validateSuccess(readObservable, successValidator); + { + String token1 = ((SessionContainer) client1.getSession()).getSessionToken(BridgeInternal.getAltLink(collectionSameName)); + String token2 = ((SessionContainer) client1.getSession()).getSessionToken(collectionSameName.selfLink()); + assertThat(token1).isEqualTo(token2); + } + + { + // Client2 read using name link should fail with higher LSN. + String token = ((SessionContainer) client1.getSession()).getSessionToken(collectionSameName.selfLink()); + // artificially bump to higher LSN + String higherLsnToken = this.getDifferentLSNToken(token, 2000); + RequestOptions requestOptions1 = new RequestOptions(); + requestOptions1.setSessionToken(higherLsnToken); + requestOptions1.setPartitionKey(new PartitionKey(createdDocument.get("mypk"))); + readObservable = client2.readDocument(BridgeInternal.getAltLink(createdDocument), requestOptions1); + FailureValidator failureValidator = new FailureValidator.Builder().subStatusCode(1002).build(); + validateFailure(readObservable, failureValidator); + } + // this will trigger client2 to clear the token + { + // verify token by altlink is gone! + String token1 = ((SessionContainer) client2.getSession()).getSessionToken(BridgeInternal.getAltLink(collectionSameName)); + String token2 = ((SessionContainer) client2.getSession()).getSessionToken(collection.selfLink()); + assertThat(token1).isEmpty(); + //assertThat(token2).isNotEmpty(); In java both SelfLink and AltLink token remain in sync. + } + { + // second read should succeed! + readObservable = client2.readDocument(BridgeInternal.getAltLink(createdDocument), requestOptions); + validateSuccess(readObservable, successValidator); + } + // verify deleting indeed delete the collection session token + { + Document documentTest = + client1.createDocument(BridgeInternal.getAltLink(collectionSameName), getDocumentDefinition(), null, true).blockFirst().getResource(); + RequestOptions options = new RequestOptions(); + options.setPartitionKey(new PartitionKey(documentTest.get("mypk"))); + successValidator = new ResourceResponseValidator.Builder() + .withId(documentTest.id()) + .build(); + readObservable = client1.readDocument(documentTest.selfLink(), options); + validateSuccess(readObservable, successValidator); + + client1.deleteCollection(collectionSameName.selfLink(), null).blockFirst(); + String token1 = ((SessionContainer) client2.getSession()).getSessionToken(BridgeInternal.getAltLink(collectionSameName)); + String token2 = ((SessionContainer) client2.getSession()).getSessionToken(collectionSameName.selfLink()); + // currently we can't delete the token from Altlink when deleting using selflink + assertThat(token1).isNotEmpty(); + //assertThat(token2).isEmpty(); In java both SelfLink and AltLink token remain in sync. + } + } finally { + safeClose(client1); + safeClose(client2); + } + + } + + void validateSessionTokenWithPreConditionFailure(boolean useGateway) throws Exception { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + if (useGateway) { + connectionPolicy.connectionMode(ConnectionMode.GATEWAY); + } else { + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + } + RxDocumentClientImpl writeClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .build(); + RxDocumentClientImpl validationClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .build(); + try { + // write a document, and upsert to it to update etag. + ResourceResponse documentResponse = writeClient.createDocument(BridgeInternal.getAltLink(createdCollection), getDocumentDefinition(), null, true).blockFirst(); + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setPartitionKey(new PartitionKey(documentResponse.getResource().get("mypk"))); + ResourceResponse upsertResponse = + writeClient.upsertDocument(BridgeInternal.getAltLink(createdCollection), documentResponse.getResource(), requestOptions, true).blockFirst(); + + // create a conditioned read request, with first write request's etag, so the read fails with PreconditionFailure + AccessCondition ac = new AccessCondition(); + ac.condition(documentResponse.getResource().etag()); + ac.type(AccessConditionType.IF_MATCH); + RequestOptions requestOptions1 = new RequestOptions(); + requestOptions.setPartitionKey(new PartitionKey(documentResponse.getResource().get("mypk"))); + requestOptions1.setAccessCondition(ac); + Flux> preConditionFailureResponseObservable = validationClient.upsertDocument(BridgeInternal.getAltLink(createdCollection), + documentResponse.getResource(), requestOptions1, true); + FailureValidator failureValidator = new FailureValidator.Builder().statusCode(HttpConstants.StatusCodes.PRECONDITION_FAILED).build(); + validateFailure(preConditionFailureResponseObservable, failureValidator); + assertThat(isSessionEqual(((SessionContainer) validationClient.getSession()), (SessionContainer) writeClient.getSession())).isTrue(); + + } finally { + safeClose(writeClient); + safeClose(validationClient); + } + } + + void validateSessionTokenWithDocumentNotFoundException(boolean useGateway) throws Exception { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + if (useGateway) { + connectionPolicy.connectionMode(ConnectionMode.GATEWAY); + } else { + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + } + RxDocumentClientImpl writeClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .build(); + RxDocumentClientImpl validationClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .build(); + try { + DocumentCollection collectionDefinition = getCollectionDefinition(); + collectionDefinition.id("TestCollection"); + + ResourceResponse documentResponse = writeClient.createDocument(BridgeInternal.getAltLink(createdCollection), getDocumentDefinition(), null, true).blockFirst(); + + FailureValidator failureValidator = new FailureValidator.Builder().statusCode(HttpConstants.StatusCodes.NOTFOUND).build(); + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setPartitionKey(new PartitionKey(documentResponse.getResource().get("mypk"))); + // try to read a non existent document in the same partition that we previously wrote to + Flux> readObservable = validationClient.readDocument(BridgeInternal.getAltLink(documentResponse.getResource()) + "dummy", requestOptions); + validateFailure(readObservable, failureValidator); + assertThat(isSessionEqual(((SessionContainer) validationClient.getSession()), (SessionContainer) writeClient.getSession())).isTrue(); + } finally { + safeClose(writeClient); + safeClose(validationClient); + } + } + + void validateSessionTokenWithExpectedException(boolean useGateway) throws Exception { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + if (useGateway) { + connectionPolicy.connectionMode(ConnectionMode.GATEWAY); + } else { + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + } + RxDocumentClientImpl writeClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .build(); + try { + ResourceResponse documentResponse = + writeClient.createDocument(BridgeInternal.getAltLink(createdCollection), getDocumentDefinition(), null, false).blockFirst(); + String token = documentResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); + + // artificially bump to higher LSN + String higherLsnToken = this.getDifferentLSNToken(token, 2000); + FailureValidator failureValidator = new FailureValidator.Builder().subStatusCode(HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE).build(); + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setPartitionKey(new PartitionKey(documentResponse.getResource().get("mypk"))); + requestOptions.setSessionToken(higherLsnToken); + // try to read a non existent document in the same partition that we previously wrote to + Flux> readObservable = writeClient.readDocument(BridgeInternal.getAltLink(documentResponse.getResource()), + requestOptions); + validateFailure(readObservable, failureValidator); + + } finally { + safeClose(writeClient); + } + } + + void validateSessionTokenWithConflictException(boolean useGateway) { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + if (useGateway) { + connectionPolicy.connectionMode(ConnectionMode.GATEWAY); + } else { + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + } + RxDocumentClientImpl writeClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .build(); + RxDocumentClientImpl validationClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .build(); + try { + Document documentDefinition = getDocumentDefinition(); + ResourceResponse documentResponse = + writeClient.createDocument(BridgeInternal.getAltLink(createdCollection), documentDefinition, null, true).blockFirst(); + + FailureValidator failureValidator = new FailureValidator.Builder().statusCode(HttpConstants.StatusCodes.CONFLICT).build(); + Flux> conflictDocumentResponse = validationClient.createDocument(BridgeInternal.getAltLink(createdCollection), + documentDefinition, null, + true); + validateFailure(conflictDocumentResponse, failureValidator); + } finally { + safeClose(writeClient); + safeClose(validationClient); + } + } + + void validateSessionTokenMultiPartitionCollection(boolean useGateway) throws Exception { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + if (useGateway) { + connectionPolicy.connectionMode(ConnectionMode.GATEWAY); + } else { + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + } + RxDocumentClientImpl writeClient = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .build(); + try { + + Range fullRange = new Range(PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, + PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false); + + IRoutingMapProvider routingMapProvider = writeClient.getPartitionKeyRangeCache(); + // assertThat(routingMapProvider.tryGetOverlappingRangesAsync(collection.resourceId(), fullRange, false).toBlocking().value().size()).isEqualTo(5); + + // Document to lock pause/resume clients + Document document1 = new Document(); + document1.id("test" + UUID.randomUUID().toString()); + BridgeInternal.setProperty(document1, "mypk", 1); + ResourceResponse childResource1 = writeClient.createDocument(createdCollection.selfLink(), document1, null, true).blockFirst(); + logger.info("Created {} child resource", childResource1.getResource().resourceId()); + assertThat(childResource1.getSessionToken()).isNotNull(); + assertThat(childResource1.getSessionToken().contains(":")).isTrue(); + String globalSessionToken1 = ((SessionContainer) writeClient.getSession()).getSessionToken(createdCollection.selfLink()); + assertThat(globalSessionToken1.contains(childResource1.getSessionToken())); + + // Document to lock pause/resume clients + Document document2 = new Document(); + document2.id("test" + UUID.randomUUID().toString()); + BridgeInternal.setProperty(document2, "mypk", 2); + ResourceResponse childResource2 = writeClient.createDocument(createdCollection.selfLink(), document2, null, true).blockFirst(); + assertThat(childResource2.getSessionToken()).isNotNull(); + assertThat(childResource2.getSessionToken().contains(":")).isTrue(); + String globalSessionToken2 = ((SessionContainer) writeClient.getSession()).getSessionToken(createdCollection.selfLink()); + logger.info("globalsessiontoken2 {}, childtoken1 {}, childtoken2 {}", globalSessionToken2, childResource1.getSessionToken(), childResource2.getSessionToken()); + assertThat(globalSessionToken2.contains(childResource2.getSessionToken())).isTrue(); + + // this token can read childresource2 but not childresource1 + String sessionToken = + StringUtils.split(childResource1.getSessionToken(), ':')[0] + ":" + createSessionToken(SessionTokenHelper.parse(childResource1.getSessionToken()), 100000000).convertToString() + "," + childResource2.getSessionToken(); + + RequestOptions option = new RequestOptions(); + option.setSessionToken(sessionToken); + option.setPartitionKey(new PartitionKey(2)); + writeClient.readDocument(childResource2.getResource().selfLink(), option).blockFirst(); + + option = new RequestOptions(); + option.setSessionToken(StringUtils.EMPTY); + option.setPartitionKey(new PartitionKey(1)); + writeClient.readDocument(childResource1.getResource().selfLink(), option).blockFirst(); + + option = new RequestOptions(); + option.setSessionToken(sessionToken); + option.setPartitionKey(new PartitionKey(1)); + Flux> readObservable = writeClient.readDocument(childResource1.getResource().selfLink(), option); + FailureValidator failureValidator = + new FailureValidator.Builder().statusCode(HttpConstants.StatusCodes.NOTFOUND).subStatusCode(HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE).build(); + validateFailure(readObservable, failureValidator); + + readObservable = writeClient.readDocument(childResource2.getResource().selfLink(), option); + failureValidator = + new FailureValidator.Builder().statusCode(HttpConstants.StatusCodes.NOTFOUND).subStatusCode(HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE).build(); + validateFailure(readObservable, failureValidator); + + assertThat(((SessionContainer) writeClient.getSession()).getSessionToken(createdCollection.selfLink())).isEqualTo + (((SessionContainer) writeClient.getSession()).getSessionToken(BridgeInternal.getAltLink(createdCollection))); + + assertThat(((SessionContainer) writeClient.getSession()).getSessionToken("asdfasdf")).isEmpty(); + assertThat(((SessionContainer) writeClient.getSession()).getSessionToken(createdDatabase.selfLink())).isEmpty(); + } finally { + safeClose(writeClient); + } + } + + void validateSessionTokenFromCollectionReplaceIsServerToken(boolean useGateway) { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + if (useGateway) { + connectionPolicy.connectionMode(ConnectionMode.GATEWAY); + } else { + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + } + RxDocumentClientImpl client1 = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .build(); + RxDocumentClientImpl client2 = null; + try { + Document doc = client1.createDocument(createdCollection.selfLink(), getDocumentDefinition(), null, true).blockFirst().getResource(); + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setPartitionKey(new PartitionKey(doc.get("mypk"))); + Document doc1 = client1.readDocument(BridgeInternal.getAltLink(doc), requestOptions).blockFirst().getResource(); + + String token1 = ((SessionContainer) client1.getSession()).getSessionToken(createdCollection.selfLink()); + client2 = (RxDocumentClientImpl) new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .build(); + client2.replaceCollection(createdCollection, null).blockFirst(); + String token2 = ((SessionContainer) client2.getSession()).getSessionToken(createdCollection.selfLink()); + + logger.info("Token after document and after collection replace {} = {}", token1, token2); + } finally { + safeClose(client1); + safeClose(client2); + } + } + + @AfterClass(groups = {"direct"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(this.initClient); + safeClose(this.writeClient); + safeClose(this.readClient); + } + + private String getDifferentLSNToken(String token, long lsnDifferent) throws Exception { + String[] tokenParts = StringUtils.split(token, ':'); + ISessionToken sessionToken = SessionTokenHelper.parse(tokenParts[1]); + ISessionToken differentSessionToken = createSessionToken(sessionToken, sessionToken.getLSN() + lsnDifferent); + return String.format("%s:%s", tokenParts[0], differentSessionToken.convertToString()); + } + + public static ISessionToken createSessionToken(ISessionToken from, long globalLSN) throws Exception { + // Creates session token with specified GlobalLSN + if (from instanceof VectorSessionToken) { + VectorSessionToken fromSessionToken = (VectorSessionToken) from; + Field fieldVersion = VectorSessionToken.class.getDeclaredField("version"); + fieldVersion.setAccessible(true); + Long version = (Long) fieldVersion.get(fromSessionToken); + + Field fieldLocalLsnByRegion = VectorSessionToken.class.getDeclaredField("localLsnByRegion"); + fieldLocalLsnByRegion.setAccessible(true); + UnmodifiableMap localLsnByRegion = (UnmodifiableMap) fieldLocalLsnByRegion.get(fromSessionToken); + + Constructor constructor = VectorSessionToken.class.getDeclaredConstructor(long.class, long.class, UnmodifiableMap.class); + constructor.setAccessible(true); + VectorSessionToken vectorSessionToken = constructor.newInstance(version, globalLSN, localLsnByRegion); + return vectorSessionToken; + } else { + throw new IllegalArgumentException(); + } + } + + Document getDocumentDefinition() { + String uuid = UUID.randomUUID().toString(); + Document doc = new Document(String.format("{ " + + "\"id\": \"%s\", " + + "\"mypk\": \"%s\", " + + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + + "}" + , uuid, uuid)); + return doc; + } + + private boolean isSessionEqual(SessionContainer sessionContainer1, SessionContainer sessionContainer2) throws Exception { + if (sessionContainer1 == null) { + return false; + } + + if (sessionContainer2 == null) { + return false; + } + + if (sessionContainer1 == sessionContainer2) { + return true; + } + + Field fieldCollectionResourceIdToSessionTokens1 = SessionContainer.class.getDeclaredField("collectionResourceIdToSessionTokens"); + Field fieldCollectionNameToCollectionResourceId1 = SessionContainer.class.getDeclaredField("collectionNameToCollectionResourceId"); + fieldCollectionResourceIdToSessionTokens1.setAccessible(true); + fieldCollectionNameToCollectionResourceId1.setAccessible(true); + ConcurrentHashMap> collectionResourceIdToSessionTokens1 = + (ConcurrentHashMap>) fieldCollectionResourceIdToSessionTokens1.get(sessionContainer1); + ConcurrentHashMap collectionNameToCollectionResourceId1 = (ConcurrentHashMap) fieldCollectionNameToCollectionResourceId1.get(sessionContainer1); + + + Field fieldCollectionResourceIdToSessionTokens2 = SessionContainer.class.getDeclaredField("collectionResourceIdToSessionTokens"); + Field fieldCollectionNameToCollectionResourceId2 = SessionContainer.class.getDeclaredField("collectionNameToCollectionResourceId"); + fieldCollectionResourceIdToSessionTokens2.setAccessible(true); + fieldCollectionNameToCollectionResourceId2.setAccessible(true); + ConcurrentHashMap> collectionResourceIdToSessionTokens2 = + (ConcurrentHashMap>) fieldCollectionResourceIdToSessionTokens2.get(sessionContainer2); + ConcurrentHashMap collectionNameToCollectionResourceId2 = (ConcurrentHashMap) fieldCollectionNameToCollectionResourceId2.get(sessionContainer2); + + if (collectionResourceIdToSessionTokens1.size() != collectionResourceIdToSessionTokens2.size() || + collectionNameToCollectionResourceId1.size() != collectionNameToCollectionResourceId2.size()) { + return false; + } + + // get keys, and compare entries + for (Long resourceId : collectionResourceIdToSessionTokens1.keySet()) { + if (!collectionResourceIdToSessionTokens1.get(resourceId).equals(collectionResourceIdToSessionTokens2.get(resourceId))) { + return false; + } + } + + for (String collectionName : collectionNameToCollectionResourceId1.keySet()) { + if (!collectionNameToCollectionResourceId1.get(collectionName).equals(collectionNameToCollectionResourceId2.get(collectionName))) { + return false; + } + } + + return true; + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/DocumentQuerySpyWireContentTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/DocumentQuerySpyWireContentTest.java new file mode 100644 index 0000000000000..266ace95c5764 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/DocumentQuerySpyWireContentTest.java @@ -0,0 +1,208 @@ +/** + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.internal.AsyncDocumentClient.Builder; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.internal.http.HttpRequest; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +import static org.assertj.core.api.Assertions.assertThat; + +public class DocumentQuerySpyWireContentTest extends TestSuiteBase { + + private Database createdDatabase; + private DocumentCollection createdSinglePartitionCollection; + private DocumentCollection createdMultiPartitionCollection; + + private List createdDocumentsInSinglePartitionCollection = new ArrayList<>(); + private List createdDocumentsInMultiPartitionCollection = new ArrayList<>(); + + private SpyClientUnderTestFactory.ClientUnderTest client; + + public String getSinglePartitionCollectionLink() { + return TestUtils.getCollectionNameLink(createdDatabase.id(), createdSinglePartitionCollection.id()); + } + + public String getMultiPartitionCollectionLink() { + return TestUtils.getCollectionNameLink(createdDatabase.id(), createdMultiPartitionCollection.id()); + } + + @Factory(dataProvider = "clientBuilders") + public DocumentQuerySpyWireContentTest(Builder clientBuilder) { + super(clientBuilder); + } + + @DataProvider(name = "responseContinuationTokenLimitParamProvider") + public static Object[][] responseContinuationTokenLimitParamProvider() { + + FeedOptions options1 = new FeedOptions(); + options1.maxItemCount(1); + options1.responseContinuationTokenLimitInKb(5); + options1.partitionKey(new PartitionKey("99")); + String query1 = "Select * from r"; + boolean multiPartitionCollection1 = true; + + FeedOptions options2 = new FeedOptions(); + options2.maxItemCount(1); + options2.responseContinuationTokenLimitInKb(5); + options2.partitionKey(new PartitionKey("99")); + String query2 = "Select * from r order by r.prop"; + boolean multiPartitionCollection2 = false; + + FeedOptions options3 = new FeedOptions(); + options3.maxItemCount(1); + options3.responseContinuationTokenLimitInKb(5); + options3.partitionKey(new PartitionKey("99")); + String query3 = "Select * from r"; + boolean multiPartitionCollection3 = false; + + FeedOptions options4 = new FeedOptions(); + options4.partitionKey(new PartitionKey("99")); + String query4 = "Select * from r order by r.prop"; + boolean multiPartitionCollection4 = false; + + return new Object[][]{ + {options1, query1, multiPartitionCollection1}, + {options2, query2, multiPartitionCollection2}, + {options3, query3, multiPartitionCollection3}, + {options4, query4, multiPartitionCollection4}, + }; + } + + @Test(dataProvider = "responseContinuationTokenLimitParamProvider", groups = { "simple" }, timeOut = TIMEOUT) + public void queryWithContinuationTokenLimit(FeedOptions options, String query, boolean isMultiParitionCollection) throws Exception { + String collectionLink; + if (isMultiParitionCollection) { + collectionLink = getMultiPartitionCollectionLink(); + } else { + collectionLink = getSinglePartitionCollectionLink(); + } + + client.clearCapturedRequests(); + + Flux> queryObservable = client + .queryDocuments(collectionLink, query, options); + + List results = queryObservable.flatMap(p -> Flux.fromIterable(p.results())) + .collectList().block(); + + assertThat(results.size()).describedAs("total results").isGreaterThanOrEqualTo(1); + + List requests = client.getCapturedRequests(); + + for(HttpRequest req: requests) { + validateRequestHasContinuationTokenLimit(req, options.responseContinuationTokenLimitInKb()); + } + } + + private void validateRequestHasContinuationTokenLimit(HttpRequest request, Integer expectedValue) { + Map headers = request.headers().toMap(); + if (expectedValue != null && expectedValue > 0) { + assertThat(headers + .containsKey(HttpConstants.HttpHeaders.RESPONSE_CONTINUATION_TOKEN_LIMIT_IN_KB)) + .isTrue(); + assertThat(headers + .get("x-ms-documentdb-responsecontinuationtokenlimitinkb")) + .isEqualTo(Integer.toString(expectedValue)); + } else { + assertThat(headers + .containsKey(HttpConstants.HttpHeaders.RESPONSE_CONTINUATION_TOKEN_LIMIT_IN_KB)) + .isFalse(); + } + } + + public Document createDocument(AsyncDocumentClient client, String collectionLink, int cnt) { + + Document docDefinition = getDocumentDefinition(cnt); + return client + .createDocument(collectionLink, docDefinition, null, false).blockFirst().getResource(); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() throws Exception { + + client = new SpyClientBuilder(this.clientBuilder()).build(); + + createdDatabase = SHARED_DATABASE; + createdSinglePartitionCollection = SHARED_SINGLE_PARTITION_COLLECTION; + truncateCollection(SHARED_SINGLE_PARTITION_COLLECTION); + + createdMultiPartitionCollection = SHARED_MULTI_PARTITION_COLLECTION; + truncateCollection(SHARED_MULTI_PARTITION_COLLECTION); + + for(int i = 0; i < 3; i++) { + createdDocumentsInSinglePartitionCollection.add(createDocument(client, getCollectionLink(createdSinglePartitionCollection), i)); + createdDocumentsInMultiPartitionCollection.add(createDocument(client, getCollectionLink(createdMultiPartitionCollection), i)); + } + + for(int i = 0; i < 5; i++) { + createdDocumentsInSinglePartitionCollection.add(createDocument(client, getCollectionLink(createdSinglePartitionCollection), 99)); + createdDocumentsInMultiPartitionCollection.add(createDocument(client, getCollectionLink(createdMultiPartitionCollection), 99)); + } + + // wait for catch up + TimeUnit.SECONDS.sleep(1); + + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + + // do the query once to ensure the collection is cached. + client.queryDocuments(getMultiPartitionCollectionLink(), "select * from root", options) + .then().block(); + + // do the query once to ensure the collection is cached. + client.queryDocuments(getSinglePartitionCollectionLink(), "select * from root", options) + .then().block(); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + private static Document getDocumentDefinition(int cnt) { + String uuid = UUID.randomUUID().toString(); + Document doc = new Document(String.format("{ " + + "\"id\": \"%s\", " + + "\"prop\" : %d, " + + "\"mypk\": \"%s\", " + + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + + "}" + , uuid, cnt, cnt)); + return doc; + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/DocumentServiceRequestContextValidator.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/DocumentServiceRequestContextValidator.java new file mode 100644 index 0000000000000..1075d78b8d5bf --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/DocumentServiceRequestContextValidator.java @@ -0,0 +1,93 @@ + +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.internal.directconnectivity.StoreResponse; + +import java.util.ArrayList; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +public interface DocumentServiceRequestContextValidator { + + static Builder builder() { + return new Builder(); + } + + void validate(T v); + + class Builder { + private List> validators = new ArrayList<>(); + + public DocumentServiceRequestContextValidator build() { + return new DocumentServiceRequestContextValidator() { + + @SuppressWarnings({ "rawtypes", "unchecked" }) + @Override + public void validate(T v) { + for (DocumentServiceRequestContextValidator validator : validators) { + validator.validate(v); + } + } + }; + } + + + public Builder add(DocumentServiceRequestContextValidator validator) { + validators.add(validator); + return this; + } + + public Builder qurorumSelectedLSN(long quoriumSelectedLSN) { + add(new DocumentServiceRequestContextValidator() { + @Override + public void validate(DocumentServiceRequestContext v) { + assertThat(v.quorumSelectedLSN).isEqualTo(quoriumSelectedLSN); + } + }); + return this; + } + + public Builder globalCommittedSelectedLSN(long globalCommittedSelectedLSN) { + add(new DocumentServiceRequestContextValidator() { + @Override + public void validate(DocumentServiceRequestContext v) { + assertThat(v.globalCommittedSelectedLSN).isEqualTo(globalCommittedSelectedLSN); + } + }); + return this; + } + + public Builder storeResponses(List storeResponses) { + add(new DocumentServiceRequestContextValidator() { + @Override + public void validate(DocumentServiceRequestContext v) { + assertThat(v.storeResponses).isEqualTo(storeResponses); + } + }); + return this; + } + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/DocumentServiceRequestValidator.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/DocumentServiceRequestValidator.java new file mode 100644 index 0000000000000..b599a39ea5528 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/DocumentServiceRequestValidator.java @@ -0,0 +1,109 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import java.util.ArrayList; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +public interface DocumentServiceRequestValidator { + + static Builder builder() { + return new Builder(); + } + + void validate(T v); + + class Builder { + private List> validators = new ArrayList<>(); + + public DocumentServiceRequestValidator build() { + return new DocumentServiceRequestValidator() { + + @SuppressWarnings({ "rawtypes", "unchecked" }) + @Override + public void validate(T v) { + for (DocumentServiceRequestValidator validator : validators) { + validator.validate(v); + } + } + }; + } + + public Builder add(DocumentServiceRequestValidator validator) { + validators.add(validator); + return this; + } + + public Builder withResourceType(ResourceType resourceType) { + add(new DocumentServiceRequestValidator() { + @Override + public void validate(T v) { + assertThat(v.getResourceType()).isEqualTo(resourceType); + } + }); + return this; + } + + public Builder withOperationType(OperationType operationType) { + add(new DocumentServiceRequestValidator() { + @Override + public void validate(T v) { + assertThat(v.getOperationType()).isEqualTo(operationType); + } + }); + return this; + } + + public Builder resourceTypeIn(ResourceType... resourceType) { + add(new DocumentServiceRequestValidator() { + @Override + public void validate(T v) { + assertThat(v.getResourceType()).isIn((Object[]) resourceType); + } + }); + return this; + } + + public Builder resourceTypeNotIn(ResourceType... resourceType) { + add(new DocumentServiceRequestValidator() { + @Override + public void validate(T v) { + assertThat(v.getResourceType()).isNotIn((Object[]) resourceType); + } + }); + return this; + } + + public Builder add(DocumentServiceRequestContextValidator validator) { + add(new DocumentServiceRequestValidator() { + @Override + public void validate(RxDocumentServiceRequest request) { + validator.validate(request.requestContext); + } + }); + return this; + } + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/FailureValidator.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/FailureValidator.java new file mode 100644 index 0000000000000..c45ccbc696ce7 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/FailureValidator.java @@ -0,0 +1,347 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosError; +import com.azure.data.cosmos.internal.directconnectivity.WFConstants; + +import java.util.ArrayList; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +public interface FailureValidator { + + static Builder builder() { + return new Builder(); + } + + void validate(Throwable t); + + class Builder { + private List validators = new ArrayList<>(); + + public FailureValidator build() { + return new FailureValidator() { + @Override + public void validate(Throwable t) { + for (FailureValidator validator : validators) { + validator.validate(t); + } + } + }; + } + + public Builder statusCode(int statusCode) { + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t).isInstanceOf(CosmosClientException.class); + assertThat(((CosmosClientException) t).statusCode()).isEqualTo(statusCode); + } + }); + return this; + } + + public Builder lsnGreaterThan(long quorumAckedLSN) { + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t).isInstanceOf(CosmosClientException.class); + assertThat(BridgeInternal.getLSN((CosmosClientException) t) > quorumAckedLSN).isTrue(); + } + }); + return this; + } + + public Builder lsnGreaterThanEqualsTo(long quorumAckedLSN) { + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t).isInstanceOf(CosmosClientException.class); + assertThat(BridgeInternal.getLSN((CosmosClientException) t) >= quorumAckedLSN).isTrue(); + } + }); + return this; + } + + public Builder exceptionQuorumAckedLSNInNotNull() { + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t).isInstanceOf(CosmosClientException.class); + CosmosClientException cosmosClientException = (CosmosClientException) t; + long exceptionQuorumAckedLSN = -1; + if (cosmosClientException.responseHeaders().get(WFConstants.BackendHeaders.QUORUM_ACKED_LSN) != null) { + exceptionQuorumAckedLSN = Long.parseLong((String) cosmosClientException.responseHeaders().get(WFConstants.BackendHeaders.QUORUM_ACKED_LSN)); + + } + assertThat(exceptionQuorumAckedLSN).isNotEqualTo(-1); + } + }); + return this; + } + + public Builder errorMessageContains(String errorMsg) { + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t.getMessage()).contains(errorMsg); + } + }); + return this; + } + + public Builder notNullActivityId() { + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t).isInstanceOf(CosmosClientException.class); + assertThat(((CosmosClientException) t).message()).isNotNull(); + } + }); + return this; + } + + public Builder error(CosmosError cosmosError) { + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t).isInstanceOf(CosmosClientException.class); + assertThat(((CosmosClientException) t).error().toJson()).isEqualTo(cosmosError.toJson()); + } + }); + return this; + } + + public Builder subStatusCode(Integer substatusCode) { + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t).isInstanceOf(CosmosClientException.class); + assertThat(((CosmosClientException) t).subStatusCode()).isEqualTo(substatusCode); + } + }); + return this; + } + + public Builder unknownSubStatusCode() { + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t).isInstanceOf(CosmosClientException.class); + assertThat(((CosmosClientException) t).subStatusCode()).isEqualTo(HttpConstants.SubStatusCodes.UNKNOWN); + } + }); + return this; + } + + public Builder responseHeader(String key, String value) { + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t).isInstanceOf(CosmosClientException.class); + assertThat(((CosmosClientException) t).responseHeaders().get(key)).isEqualTo(value); + } + }); + return this; + } + + public Builder lsn(long lsn) { + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t).isInstanceOf(CosmosClientException.class); + CosmosClientException ex = (CosmosClientException) t; + assertThat(BridgeInternal.getLSN(ex)).isEqualTo(lsn); + } + }); + return this; + } + + public Builder partitionKeyRangeId(String pkrid) { + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t).isInstanceOf(CosmosClientException.class); + CosmosClientException ex = (CosmosClientException) t; + assertThat(BridgeInternal.getPartitionKeyRangeId(ex)).isEqualTo(pkrid); + } + }); + return this; + } + + public Builder resourceAddress(String resourceAddress) { + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t).isInstanceOf(CosmosClientException.class); + CosmosClientException ex = (CosmosClientException) t; + assertThat(BridgeInternal.getResourceAddress(ex)).isEqualTo(resourceAddress); + } + }); + return this; + } + + public Builder instanceOf(Class cls) { + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t).isInstanceOf(cls); + } + }); + return this; + } + + public Builder sameAs(Exception exception) { + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isSameAs(exception); + } + }); + return this; + } + + public Builder resourceNotFound() { + + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t).isInstanceOf(CosmosClientException.class); + CosmosClientException ex = (CosmosClientException) t; + assertThat(ex.statusCode()).isEqualTo(404); + + } + }); + return this; + } + + public Builder resourceTokenNotFound() { + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t).isInstanceOf(IllegalArgumentException.class); + IllegalArgumentException ex = (IllegalArgumentException) t; + assertThat(ex.getMessage()).isEqualTo(RMResources.ResourceTokenNotFound); + } + }); + return this; + } + + public Builder resourceAlreadyExists() { + + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t).isInstanceOf(CosmosClientException.class); + CosmosClientException ex = (CosmosClientException) t; + assertThat(ex.statusCode()).isEqualTo(409); + + } + }); + return this; + } + + public Builder causeInstanceOf(Class cls) { + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t.getCause()).isNotNull(); + assertThat(t.getCause()).isInstanceOf(cls); + } + }); + return this; + } + + public Builder causeOfCauseInstanceOf(Class cls) { + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t.getCause()).isNotNull(); + assertThat(t.getCause().getCause()).isNotNull(); + assertThat(t.getCause().getCause()).isInstanceOf(cls); + } + }); + return this; + } + + public Builder documentClientExceptionHeaderRequestContainsEntry(String key, String value) { + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t).isInstanceOf(CosmosClientException.class); + CosmosClientException ex = (CosmosClientException) t; + assertThat(BridgeInternal.getRequestHeaders(ex)).containsEntry(key, value); + } + }); + return this; + } + + public Builder withRuntimeExceptionMessage(String message) { + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t).isInstanceOf(RuntimeException.class); + assertThat(t.getMessage()).isEqualTo(message); + } + }); + return this; + } + + public Builder withRuntimeExceptionClass(Class k) { + validators.add(new FailureValidator() { + @Override + public void validate(Throwable t) { + assertThat(t).isNotNull(); + assertThat(t).isInstanceOf(RuntimeException.class); + assertThat(t).isInstanceOf(k); + } + }); + return this; + } + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/FeedResponseListValidator.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/FeedResponseListValidator.java new file mode 100644 index 0000000000000..4da9e76512226 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/FeedResponseListValidator.java @@ -0,0 +1,320 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CompositePath; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.Resource; +import com.fasterxml.jackson.databind.node.ArrayNode; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; + +public interface FeedResponseListValidator { + + void validate(List> feedList); + + class Builder { + private List> validators = new ArrayList<>(); + + public FeedResponseListValidator build() { + return new FeedResponseListValidator() { + + @SuppressWarnings({ "rawtypes", "unchecked" }) + @Override + public void validate(List> feedList) { + for (FeedResponseListValidator validator : validators) { + validator.validate(feedList); + } + } + }; + } + + public Builder totalSize(final int expectedCount) { + validators.add(new FeedResponseListValidator() { + @Override + public void validate(List> feedList) { + int resultCount = feedList.stream().mapToInt(f -> f.results().size()).sum(); + assertThat(resultCount) + .describedAs("total number of results").isEqualTo(expectedCount); + } + }); + return this; + } + + public Builder containsExactly(List expectedRids) { + validators.add(new FeedResponseListValidator() { + @Override + public void validate(List> feedList) { + List actualIds = feedList + .stream() + .flatMap(f -> f.results().stream()) + .map(r -> r.resourceId()) + .collect(Collectors.toList()); + assertThat(actualIds) + .describedAs("Resource IDs of results") + .containsExactlyElementsOf(expectedRids); + } + }); + return this; + } + + public Builder containsExactlyIds(List expectedIds) { + validators.add(new FeedResponseListValidator() { + @Override + public void validate(List> feedList) { + List actualIds = feedList + .stream() + .flatMap(f -> f.results().stream()) + .map(r -> r.id()) + .collect(Collectors.toList()); + assertThat(actualIds) + .describedAs("IDs of results") + .containsExactlyElementsOf(expectedIds); + } + }); + return this; + } + + public Builder validateAllResources(Map> resourceIDToValidator) { + validators.add(new FeedResponseListValidator() { + @Override + public void validate(List> feedList) { + List resources = feedList + .stream() + .flatMap(f -> f.results().stream()) + .collect(Collectors.toList()); + + for(T r: resources) { + ResourceValidator validator = resourceIDToValidator.get(r.resourceId()); + assertThat(validator).isNotNull(); + validator.validate(r); + } + } + }); + return this; + } + + public Builder exactlyContainsInAnyOrder(List expectedIds) { + validators.add(new FeedResponseListValidator() { + @Override + public void validate(List> feedList) { + List actualIds = feedList + .stream() + .flatMap(f -> f.results().stream()) + .map(Resource::resourceId) + .collect(Collectors.toList()); + assertThat(actualIds) + .describedAs("Resource IDs of results") + .containsOnlyElementsOf(expectedIds); + } + }); + return this; + } + + public Builder numberOfPages(int expectedNumberOfPages) { + validators.add(new FeedResponseListValidator() { + @Override + public void validate(List> feedList) { + assertThat(feedList) + .describedAs("number of pages") + .hasSize(expectedNumberOfPages); + } + }); + return this; + } + + public Builder numberOfPagesIsGreaterThanOrEqualTo(int leastNumber) { + validators.add(new FeedResponseListValidator() { + @Override + public void validate(List> feedList) { + assertThat(feedList.size()) + .describedAs("number of pages") + .isGreaterThanOrEqualTo(leastNumber); + } + }); + return this; + } + + public Builder totalRequestChargeIsAtLeast(double minimumCharge) { + validators.add(new FeedResponseListValidator() { + @Override + public void validate(List> feedList) { + assertThat(feedList.stream().mapToDouble(p -> p.requestCharge()).sum()) + .describedAs("total request charge") + .isGreaterThanOrEqualTo(minimumCharge); + } + }); + return this; + } + + public Builder pageSatisfy(int pageNumber, FeedResponseValidator pageValidator) { + validators.add(new FeedResponseListValidator() { + @Override + public void validate(List> feedList) { + assertThat(feedList.size()).isGreaterThan(pageNumber); + pageValidator.validate(feedList.get(pageNumber)); + } + }); + return this; + } + + public Builder allPagesSatisfy(FeedResponseValidator pageValidator) { + validators.add(new FeedResponseListValidator() { + @Override + public void validate(List> feedList) { + + for(FeedResponse fp: feedList) { + pageValidator.validate(fp); + } + } + }); + return this; + } + + public Builder withAggregateValue(Object value) { + validators.add(new FeedResponseListValidator() { + @Override + public void validate(List> feedList) { + List list = feedList.get(0).results(); + CosmosItemProperties result = list.size() > 0 ? list.get(0) : null; + + if (result != null) { + if (value instanceof Double) { + + Double d = result.getDouble("_aggregate"); + assertThat(d).isEqualTo(value); + } else if (value instanceof Integer) { + + Integer d = result.getInt("_aggregate"); + assertThat(d).isEqualTo(value); + } else if (value instanceof String) { + + String d = result.getString("_aggregate"); + assertThat(d).isEqualTo(value); + } else if (value instanceof Document){ + + assertThat(result.toString()).isEqualTo(value.toString()); + } else { + + assertThat(result.get("_aggregate")).isNull(); + assertThat(value).isNull(); + } + } else { + + assertThat(value).isNull(); + } + + } + }); + return this; + } + + public Builder withOrderedResults(List expectedOrderedList, + List compositeIndex) { + validators.add(new FeedResponseListValidator() { + @Override + public void validate(List> feedList) { + + List resultOrderedList = feedList.stream() + .flatMap(f -> f.results().stream()) + .collect(Collectors.toList()); + assertThat(expectedOrderedList.size()).isEqualTo(resultOrderedList.size()); + + ArrayList paths = new ArrayList(); + Iterator compositeIndexIterator = compositeIndex.iterator(); + while (compositeIndexIterator.hasNext()) { + paths.add(compositeIndexIterator.next().path().replace("/", "")); + } + for (int i = 0; i < resultOrderedList.size(); i ++) { + ArrayNode resultValues = (ArrayNode) resultOrderedList.get(i).get("$1"); + assertThat(resultValues.size()).isEqualTo(paths.size()); + for (int j = 0; j < paths.size(); j++) { + if (paths.get(j).contains("number")) { + assertThat(expectedOrderedList.get(i).getInt(paths.get(j))).isEqualTo(resultValues.get(j).intValue()); + } else if (paths.get(j).toLowerCase().contains("string")) { + assertThat(expectedOrderedList.get(i).getString(paths.get(j))).isEqualTo(resultValues.get(j).asText()); + } else if (paths.get(j).contains("bool")) { + assertThat(expectedOrderedList.get(i).getBoolean(paths.get(j))).isEqualTo(resultValues.get(j).asBoolean()); + } else { + assertThat(resultValues.get(j).isNull()).isTrue(); + assertThat(expectedOrderedList.get(i).get("nullField")).isNull(); + } + } + } + + } + }); + return this; + } + + public Builder pageLengths(int[] pageLengths) { + validators.add(new FeedResponseListValidator() { + @Override + public void validate(List> feedList) { + assertThat(feedList).hasSize(pageLengths.length); + for (int i = 0; i < pageLengths.length; i++) + assertThat(feedList.get(i).results().size()).isEqualTo(pageLengths[i]); + } + }); + return this; + } + + public Builder hasValidQueryMetrics(boolean shouldHaveMetrics) { + validators.add(new FeedResponseListValidator() { + @Override + public void validate(List> feedList) { + for(FeedResponse feedPage: feedList) { + if (shouldHaveMetrics) { + QueryMetrics queryMetrics = BridgeInternal.createQueryMetricsFromCollection(BridgeInternal.queryMetricsFromFeedResponse(feedPage).values()); + assertThat(queryMetrics.getIndexHitDocumentCount()).isGreaterThanOrEqualTo(0); + assertThat(queryMetrics.getRetrievedDocumentSize()).isGreaterThan(0); + assertThat(queryMetrics.getTotalQueryExecutionTime().compareTo(Duration.ZERO)).isGreaterThan(0); + assertThat(queryMetrics.getOutputDocumentCount()).isGreaterThan(0); + assertThat(queryMetrics.getRetrievedDocumentCount()).isGreaterThan(0); + assertThat(queryMetrics.getDocumentLoadTime().compareTo(Duration.ZERO)).isGreaterThan(0); + assertThat(queryMetrics.getDocumentWriteTime().compareTo(Duration.ZERO)).isGreaterThanOrEqualTo(0); + assertThat(queryMetrics.getVMExecutionTime().compareTo(Duration.ZERO)).isGreaterThan(0); + assertThat(queryMetrics.getQueryPreparationTimes().getLogicalPlanBuildTime().compareTo(Duration.ZERO)).isGreaterThan(0); + assertThat(queryMetrics.getQueryPreparationTimes().getPhysicalPlanBuildTime().compareTo(Duration.ZERO)).isGreaterThanOrEqualTo(0); + assertThat(queryMetrics.getQueryPreparationTimes().getQueryCompilationTime().compareTo(Duration.ZERO)).isGreaterThan(0); + assertThat(queryMetrics.getRuntimeExecutionTimes().getQueryEngineExecutionTime().compareTo(Duration.ZERO)).isGreaterThanOrEqualTo(0); + assertThat(BridgeInternal.getClientSideMetrics(queryMetrics).getRequestCharge()).isGreaterThan(0); + } else { + assertThat(BridgeInternal.queryMetricsFromFeedResponse(feedPage).isEmpty()); + } + } + } + }); + return this; + } + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/FeedResponseValidator.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/FeedResponseValidator.java new file mode 100644 index 0000000000000..20a190603c25d --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/FeedResponseValidator.java @@ -0,0 +1,145 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.Resource; + +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; + +public interface FeedResponseValidator { + + void validate(FeedResponse feedList); + + public class Builder { + private List> validators = new ArrayList<>(); + + public FeedResponseValidator build() { + return new FeedResponseValidator() { + + @SuppressWarnings({ "rawtypes", "unchecked" }) + @Override + public void validate(FeedResponse feedPage) { + for (FeedResponseValidator validator : validators) { + validator.validate(feedPage); + } + } + }; + } + + public Builder pageSizeIsLessThanOrEqualTo(final int maxPageSize) { + + validators.add(new FeedResponseValidator() { + @Override + public void validate(FeedResponse feedPage) { + assertThat(feedPage.results().size()).isLessThanOrEqualTo(maxPageSize); + } + }); + return this; + } + + public Builder pageSizeOf(final int expectedCount) { + + validators.add(new FeedResponseValidator() { + @Override + public void validate(FeedResponse feedPage) { + assertThat(feedPage.results()).hasSize(expectedCount); + } + }); + return this; + } + + public Builder positiveRequestCharge() { + + validators.add(new FeedResponseValidator() { + @Override + public void validate(FeedResponse feedPage) { + assertThat(feedPage.requestCharge()).isPositive(); + } + }); + return this; + } + + public Builder requestChargeGreaterThanOrEqualTo(double minRequestCharge) { + + validators.add(new FeedResponseValidator() { + @Override + public void validate(FeedResponse feedPage) { + assertThat(feedPage.requestCharge()).isGreaterThanOrEqualTo(minRequestCharge); + } + }); + return this; + } + + public Builder requestChargeLessThanOrEqualTo(double maxRequestCharge) { + + validators.add(new FeedResponseValidator() { + @Override + public void validate(FeedResponse feedPage) { + assertThat(feedPage.requestCharge()).isLessThanOrEqualTo(maxRequestCharge); + } + }); + return this; + } + + public Builder hasHeader(String headerKey) { + + validators.add(new FeedResponseValidator() { + @Override + public void validate(FeedResponse feedPage) { + assertThat(feedPage.responseHeaders()).containsKey(headerKey); + } + }); + return this; + } + + public Builder hasRequestChargeHeader() { + + validators.add(new FeedResponseValidator() { + @Override + public void validate(FeedResponse feedPage) { + assertThat(feedPage.responseHeaders()).containsKey(HttpConstants.HttpHeaders.REQUEST_CHARGE); + } + }); + return this; + } + + public Builder idsExactlyAre(final List expectedIds) { + validators.add(new FeedResponseValidator() { + @Override + public void validate(FeedResponse feedPage) { + assertThat(feedPage + .results().stream() + .map(r -> r.resourceId()) + .collect(Collectors.toList())) + .containsExactlyElementsOf(expectedIds); + } + }); + return this; + } + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/HttpClientUnderTestWrapper.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/HttpClientUnderTestWrapper.java new file mode 100644 index 0000000000000..d7543eaafc597 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/HttpClientUnderTestWrapper.java @@ -0,0 +1,63 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.internal.http.HttpClient; +import com.azure.data.cosmos.internal.http.HttpRequest; +import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.mockito.Mockito.doAnswer; + +/** + * This is a helper class for capturing requests sent over a httpClient. + */ +public class HttpClientUnderTestWrapper { + + final private HttpClient origHttpClient; + final private HttpClient spyHttpClient; + + public final List capturedRequests = Collections.synchronizedList(new ArrayList<>()); + + public HttpClientUnderTestWrapper(HttpClient origHttpClient) { + this.origHttpClient = origHttpClient; + this.spyHttpClient = Mockito.spy(origHttpClient); + initRequestCapture(spyHttpClient); + } + + public HttpClient getSpyHttpClient() { + return spyHttpClient; + } + + private void initRequestCapture(HttpClient spyClient) { + doAnswer(invocationOnMock -> { + HttpRequest httpRequest = invocationOnMock.getArgumentAt(0, HttpRequest.class); + capturedRequests.add(httpRequest); + return origHttpClient.send(httpRequest); + }).when(spyClient).send(Mockito.any(HttpRequest.class)); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/LocationHelperTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/LocationHelperTest.java new file mode 100644 index 0000000000000..c24e244a1452a --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/LocationHelperTest.java @@ -0,0 +1,19 @@ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.internal.routing.LocationHelper; +import org.testng.annotations.Test; + +import java.net.URI; +import java.net.URL; + +import static org.assertj.core.api.Assertions.assertThat; + +public class LocationHelperTest { + @Test(groups = "unit") + public void getLocationEndpoint() throws Exception { + URL globalServiceEndpoint = URI.create("https://account-name.documents.azure.com:443").toURL(); + URL expectedRegionServiceEndpoint = URI.create("https://account-name-east-us.documents.azure.com:443").toURL(); + assertThat(LocationHelper.getLocationEndpoint(globalServiceEndpoint, "east-us")) + .isEqualTo(expectedRegionServiceEndpoint); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/NetworkFailureTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/NetworkFailureTest.java new file mode 100644 index 0000000000000..c045cd9cb6670 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/NetworkFailureTest.java @@ -0,0 +1,92 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import org.mockito.Mockito; +import org.testng.annotations.AfterClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.net.UnknownHostException; +import java.time.Instant; + +import static org.assertj.core.api.Java6Assertions.assertThat; + +public class NetworkFailureTest extends TestSuiteBase { + private static final int TIMEOUT = ClientRetryPolicy.MaxRetryCount * ClientRetryPolicy.RetryIntervalInMS + 60000; + private final DocumentCollection collectionDefinition; + + @Factory(dataProvider = "simpleClientBuildersWithDirect") + public NetworkFailureTest(AsyncDocumentClient.Builder clientBuilder) { + super(clientBuilder); + this.collectionDefinition = getCollectionDefinition(); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT) + public void createCollectionWithUnreachableHost() { + SpyClientUnderTestFactory.ClientWithGatewaySpy client = null; + + try { + client = SpyClientUnderTestFactory.createClientWithGatewaySpy(clientBuilder()); + + Database database = SHARED_DATABASE; + + Flux> createObservable = client + .createCollection(database.selfLink(), collectionDefinition, null); + + + final RxGatewayStoreModel origGatewayStoreModel = client.getOrigGatewayStoreModel(); + + Mockito.doAnswer(invocation -> { + RxDocumentServiceRequest request = invocation.getArgumentAt(0, RxDocumentServiceRequest.class); + + if (request.getResourceType() == ResourceType.DocumentCollection) { + return Flux.error(new UnknownHostException()); + } + + return origGatewayStoreModel.processMessage(request); + + }).when(client.getSpyGatewayStoreModel()).processMessage(Mockito.any()); + + + FailureValidator validator = new FailureValidator.Builder().instanceOf(UnknownHostException.class).build(); + Instant start = Instant.now(); + validateFailure(createObservable, validator, TIMEOUT); + Instant after = Instant.now(); + assertThat(after.toEpochMilli() - start.toEpochMilli()) + .isGreaterThanOrEqualTo(ClientRetryPolicy.MaxRetryCount * ClientRetryPolicy.RetryIntervalInMS); + + } finally { + safeClose(client); + } + } + + @AfterClass(groups = { "emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + AsyncDocumentClient client = createGatewayHouseKeepingDocumentClient().build(); + safeDeleteCollection(client, collectionDefinition); + client.close(); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ParallelAsync.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ParallelAsync.java new file mode 100644 index 0000000000000..e454a20ee7a1e --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ParallelAsync.java @@ -0,0 +1,52 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import org.apache.commons.lang3.Range; +import reactor.core.publisher.Mono; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Consumer; + +public class ParallelAsync { + + static Mono forEachAsync(Range range, int partition, Consumer func) { + + int partitionSize = (range.getMaximum() - range.getMinimum()) / partition; + List> task = new ArrayList<>(); + int startRange = range.getMinimum(); + for (int i = 0; i < partition; i++) { + Range integerRange = Range.between(startRange, startRange + partitionSize); + task.add(Mono.defer(() -> { + for(int j = integerRange.getMinimum(); j < integerRange.getMaximum();j++) { + func.accept(j); + } + return Mono.empty(); + })); + startRange = startRange + partitionSize ; + } + return Mono.whenDelayError(task); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/PathsHelperTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/PathsHelperTest.java new file mode 100644 index 0000000000000..2538b0faf95dd --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/PathsHelperTest.java @@ -0,0 +1,117 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import org.testng.annotations.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class PathsHelperTest { + + private static final String DATABASE_ID = "IXYFAA=="; + private static final String DATABASE_COLLECTION_ID = "IXYFAOHEBPM="; + private static final String DOCUMENT_ID = "IXYFAOHEBPMBAAAAAAAAAA=="; + private static final String ATTACHMENT_ID = "IXYFAOHEBPMBAAAAAAAAABJYSJk="; + private static final String PERMISSION_ID = "IXYFAE9ZOwBGkyqWIsNKAA=="; + private static final String STORED_PRCEDURE_ID = "IXYFAOHEBPMCAAAAAAAAgA=="; + private static final String TRIGGER_ID = "IXYFAOHEBPMCAAAAAAAAcA=="; + private static final String USER_DEFINED_FUNCTION_ID = "IXYFAOHEBPMBAAAAAAAAYA=="; + private static final String USER_ID = "IXYFAE9ZOwA="; + private static final String CONFLICT_ID = "k6d9ALgBmD8BAAAAAAAAQA=="; + + private static final String DATABASE_FULL_NAME = "dbs/IXYFAA==/"; + private static final String DOCUMENT_COLLECTION_FULL_NAME = "dbs/IXYFAA==/colls/IXYFAOHEBPM=/"; + private static final String DOCUMENT_FULL_NAME = "dbs/IXYFAA==/colls/IXYFAOHEBPM=/docs/IXYFAOHEBPMBAAAAAAAAAA==/"; + private static final String STORED_PRCEDURE_FULL_NAME = "dbs/IXYFAA==/colls/IXYFAOHEBPM=/sprocs/IXYFAOHEBPMCAAAAAAAAgA==/"; + private static final String USER_DEFINED_FUNCTION_FULL_NAME = "dbs/IXYFAA==/colls/IXYFAOHEBPM=/udfs/IXYFAOHEBPMBAAAAAAAAYA==/"; + private static final String USER_FULL_NAME = "dbs/IXYFAA==/users/IXYFAE9ZOwA=/"; + private static final String PERMISSION_FULL_NAME = "dbs/IXYFAA==/users/IXYFAE9ZOwA=/permissions/IXYFAE9ZOwBGkyqWIsNKAA==/"; + private static final String ATTACHMENT_FULL_NAME = "dbs/IXYFAA==/colls/IXYFAOHEBPM=/docs/IXYFAOHEBPMBAAAAAAAAAA==/attachments/IXYFAOHEBPMBAAAAAAAAABJYSJk=/"; + private static final String TRIGGER_FULL_NAME = "dbs/IXYFAA==/colls/IXYFAOHEBPM=/triggers/IXYFAOHEBPMCAAAAAAAAcA==/"; + private static final String CONFLICT_FULL_NAME = "dbs/k6d9AA==/colls/k6d9ALgBmD8=/conflicts/k6d9ALgBmD8BAAAAAAAAQA==/"; + + private static final String INCORRECT = "incorrect"; + + @Test(groups = { "unit" }) + public void validateResourceID() { + assertThat(PathsHelper.validateResourceId(ResourceType.Database, DATABASE_ID)).isTrue(); + assertThat(PathsHelper.validateResourceId(ResourceType.DocumentCollection, DATABASE_COLLECTION_ID)).isTrue(); + assertThat(PathsHelper.validateResourceId(ResourceType.Document, DOCUMENT_ID)).isTrue(); + assertThat(PathsHelper.validateResourceId(ResourceType.Attachment, ATTACHMENT_ID)).isTrue(); + assertThat(PathsHelper.validateResourceId(ResourceType.Permission, PERMISSION_ID)).isTrue(); + assertThat(PathsHelper.validateResourceId(ResourceType.StoredProcedure, STORED_PRCEDURE_ID)).isTrue(); + assertThat(PathsHelper.validateResourceId(ResourceType.Trigger, TRIGGER_ID)).isTrue(); + assertThat(PathsHelper.validateResourceId(ResourceType.UserDefinedFunction, USER_DEFINED_FUNCTION_ID)).isTrue(); + assertThat(PathsHelper.validateResourceId(ResourceType.User, USER_ID)).isTrue(); + assertThat(PathsHelper.validateResourceId(ResourceType.Conflict, CONFLICT_ID)).isTrue(); + + assertThat(PathsHelper.validateResourceId(ResourceType.Database, DATABASE_ID + INCORRECT)).isFalse(); + assertThat(PathsHelper.validateResourceId(ResourceType.DocumentCollection, DATABASE_COLLECTION_ID + INCORRECT)).isFalse(); + assertThat(PathsHelper.validateResourceId(ResourceType.Document, DOCUMENT_ID + INCORRECT)).isFalse(); + assertThat(PathsHelper.validateResourceId(ResourceType.Attachment, ATTACHMENT_ID + INCORRECT)).isFalse(); + assertThat(PathsHelper.validateResourceId(ResourceType.Permission, PERMISSION_ID + INCORRECT)).isFalse(); + assertThat(PathsHelper.validateResourceId(ResourceType.StoredProcedure, STORED_PRCEDURE_ID + INCORRECT)).isFalse(); + assertThat(PathsHelper.validateResourceId(ResourceType.Trigger, TRIGGER_ID + INCORRECT)).isFalse(); + assertThat(PathsHelper.validateResourceId(ResourceType.UserDefinedFunction, USER_DEFINED_FUNCTION_ID + INCORRECT)).isFalse(); + assertThat(PathsHelper.validateResourceId(ResourceType.User, USER_ID + INCORRECT)).isFalse(); + assertThat(PathsHelper.validateResourceId(ResourceType.Conflict, CONFLICT_ID + INCORRECT)).isFalse(); + } + + @Test(groups = { "unit" }) + public void validateResourceFullName() { + assertThat(PathsHelper.validateResourceFullName(ResourceType.Database, DATABASE_FULL_NAME)).isTrue(); + assertThat(PathsHelper.validateResourceFullName(ResourceType.DocumentCollection, DOCUMENT_COLLECTION_FULL_NAME)).isTrue(); + assertThat(PathsHelper.validateResourceFullName(ResourceType.Document, DOCUMENT_FULL_NAME)).isTrue(); + assertThat(PathsHelper.validateResourceFullName(ResourceType.Attachment, ATTACHMENT_FULL_NAME)).isTrue(); + assertThat(PathsHelper.validateResourceFullName(ResourceType.Permission, PERMISSION_FULL_NAME)).isTrue(); + assertThat(PathsHelper.validateResourceFullName(ResourceType.User, USER_FULL_NAME)).isTrue(); + assertThat(PathsHelper.validateResourceFullName(ResourceType.Conflict, CONFLICT_FULL_NAME)).isTrue(); + assertThat(PathsHelper.validateResourceFullName(ResourceType.UserDefinedFunction, USER_DEFINED_FUNCTION_FULL_NAME)).isTrue(); + assertThat(PathsHelper.validateResourceFullName(ResourceType.StoredProcedure, STORED_PRCEDURE_FULL_NAME)).isTrue(); + assertThat(PathsHelper.validateResourceFullName(ResourceType.Trigger, TRIGGER_FULL_NAME)).isTrue(); + + assertThat(PathsHelper.validateResourceFullName(ResourceType.Database, DATABASE_FULL_NAME + INCORRECT)).isFalse(); + assertThat(PathsHelper.validateResourceFullName(ResourceType.DocumentCollection, DOCUMENT_COLLECTION_FULL_NAME + INCORRECT)).isFalse(); + assertThat(PathsHelper.validateResourceFullName(ResourceType.Document, DOCUMENT_FULL_NAME + INCORRECT)).isFalse(); + assertThat(PathsHelper.validateResourceFullName(ResourceType.Attachment, ATTACHMENT_FULL_NAME + INCORRECT)).isFalse(); + assertThat(PathsHelper.validateResourceFullName(ResourceType.Permission, PERMISSION_FULL_NAME + INCORRECT)).isFalse(); + assertThat(PathsHelper.validateResourceFullName(ResourceType.User, USER_FULL_NAME + INCORRECT)).isFalse(); + assertThat(PathsHelper.validateResourceFullName(ResourceType.Conflict, CONFLICT_FULL_NAME + INCORRECT)).isFalse(); + assertThat(PathsHelper.validateResourceFullName(ResourceType.UserDefinedFunction, USER_DEFINED_FUNCTION_FULL_NAME + INCORRECT)).isFalse(); + assertThat(PathsHelper.validateResourceFullName(ResourceType.StoredProcedure, STORED_PRCEDURE_FULL_NAME + INCORRECT)).isFalse(); + assertThat(PathsHelper.validateResourceFullName(ResourceType.Trigger, TRIGGER_FULL_NAME + INCORRECT)).isFalse(); + + } + + @Test(groups = {"unit"}) + public void generatePathAtDBLevel() { + RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.DatabaseAccount); + String path = PathsHelper.generatePath(ResourceType.DatabaseAccount, rxDocumentServiceRequest, rxDocumentServiceRequest.isFeed); + assertThat(path).isEqualTo(Paths.DATABASE_ACCOUNT_PATH_SEGMENT + "/"); + + rxDocumentServiceRequest = RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Database); + path = PathsHelper.generatePath(ResourceType.Database, rxDocumentServiceRequest, rxDocumentServiceRequest.isFeed); + assertThat(path).isEqualTo(Paths.DATABASES_PATH_SEGMENT + "/"); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RenameCollectionAwareClientRetryPolicyTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RenameCollectionAwareClientRetryPolicyTest.java new file mode 100644 index 0000000000000..7a03819197c2e --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RenameCollectionAwareClientRetryPolicyTest.java @@ -0,0 +1,155 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.BadRequestException; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.NotFoundException; +import com.azure.data.cosmos.internal.directconnectivity.WFConstants; +import com.azure.data.cosmos.internal.caches.RxClientCollectionCache; +import io.netty.handler.timeout.ReadTimeoutException; +import org.mockito.Mockito; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import static com.azure.data.cosmos.internal.ClientRetryPolicyTest.validateSuccess; +import static org.assertj.core.api.Assertions.assertThat; + +public class RenameCollectionAwareClientRetryPolicyTest { + + private final static int TIMEOUT = 10000; + + @Test(groups = "unit", timeOut = TIMEOUT) + public void onBeforeSendRequestNotInvoked() { + GlobalEndpointManager endpointManager = Mockito.mock(GlobalEndpointManager.class); + Mockito.doReturn(Mono.empty()).when(endpointManager).refreshLocationAsync(Mockito.eq(null)); + + IRetryPolicyFactory retryPolicyFactory = new RetryPolicy(endpointManager, ConnectionPolicy.defaultPolicy()); + RxClientCollectionCache rxClientCollectionCache = Mockito.mock(RxClientCollectionCache.class); + + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + RenameCollectionAwareClientRetryPolicy renameCollectionAwareClientRetryPolicy = new RenameCollectionAwareClientRetryPolicy(sessionContainer + , rxClientCollectionCache + , retryPolicyFactory.getRequestPolicy()); + + Exception exception = ReadTimeoutException.INSTANCE; + + RxDocumentServiceRequest dsr = RxDocumentServiceRequest.createFromName( + OperationType.Create, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + dsr.requestContext = Mockito.mock(DocumentServiceRequestContext.class); + + Mono shouldRetry = + renameCollectionAwareClientRetryPolicy.shouldRetry(exception); + validateSuccess(shouldRetry, ShouldRetryValidator.builder() + .withException(exception) + .shouldRetry(false) + .build()); + + Mockito.verifyZeroInteractions(endpointManager); + } + + @Test(groups = "unit", timeOut = TIMEOUT) + public void shouldRetryWithNotFoundStatusCode() { + GlobalEndpointManager endpointManager = Mockito.mock(GlobalEndpointManager.class); + Mockito.doReturn(Mono.empty()).when(endpointManager).refreshLocationAsync(Mockito.eq(null)); + IRetryPolicyFactory retryPolicyFactory = new RetryPolicy(endpointManager, ConnectionPolicy.defaultPolicy()); + RxClientCollectionCache rxClientCollectionCache = Mockito.mock(RxClientCollectionCache.class); + + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + RenameCollectionAwareClientRetryPolicy renameCollectionAwareClientRetryPolicy = new RenameCollectionAwareClientRetryPolicy(sessionContainer + , rxClientCollectionCache + , retryPolicyFactory.getRequestPolicy()); + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName( + OperationType.Create, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + request.requestContext = Mockito.mock(DocumentServiceRequestContext.class); + renameCollectionAwareClientRetryPolicy.onBeforeSendRequest(request); + + NotFoundException notFoundException = new NotFoundException(); + + Mono singleShouldRetry = renameCollectionAwareClientRetryPolicy + .shouldRetry(notFoundException); + validateSuccess(singleShouldRetry, ShouldRetryValidator.builder() + .withException(notFoundException) + .shouldRetry(false) + .build()); + } + + @Test(groups = "unit", timeOut = TIMEOUT) + public void shouldRetryWithNotFoundStatusCodeAndReadSessionNotAvailableSubStatusCode() { + GlobalEndpointManager endpointManager = Mockito.mock(GlobalEndpointManager.class); + Mockito.doReturn(Mono.empty()).when(endpointManager).refreshLocationAsync(Mockito.eq(null)); + IRetryPolicyFactory retryPolicyFactory = new RetryPolicy(endpointManager, ConnectionPolicy.defaultPolicy()); + RxClientCollectionCache rxClientCollectionCache = Mockito.mock(RxClientCollectionCache.class); + + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + RenameCollectionAwareClientRetryPolicy renameCollectionAwareClientRetryPolicy = new RenameCollectionAwareClientRetryPolicy(sessionContainer + , rxClientCollectionCache + , retryPolicyFactory.getRequestPolicy()); + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName( + OperationType.Create, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + request.requestContext = Mockito.mock(DocumentServiceRequestContext.class); + request.requestContext.resolvedCollectionRid = "rid_0"; + renameCollectionAwareClientRetryPolicy.onBeforeSendRequest(request); + + NotFoundException notFoundException = new NotFoundException(); + notFoundException.responseHeaders().put(WFConstants.BackendHeaders.SUB_STATUS, + Integer.toString(HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)); + + DocumentCollection documentCollection = new DocumentCollection(); + documentCollection.resourceId("rid_1"); + + Mockito.when(rxClientCollectionCache.resolveCollectionAsync(request)).thenReturn(Mono.just(documentCollection)); + + Mono singleShouldRetry = renameCollectionAwareClientRetryPolicy + .shouldRetry(notFoundException); + validateSuccess(singleShouldRetry, ShouldRetryValidator.builder() + .nullException() + .shouldRetry(true) + .build()); + } + + /** + * No retry on bad request exception + */ + @Test(groups = "unit", timeOut = TIMEOUT) + public void shouldRetryWithGenericException() { + GlobalEndpointManager endpointManager = Mockito.mock(GlobalEndpointManager.class); + Mockito.doReturn(Mono.empty()).when(endpointManager).refreshLocationAsync(Mockito.eq(null)); + IRetryPolicyFactory retryPolicyFactory = new RetryPolicy(endpointManager, ConnectionPolicy.defaultPolicy()); + RxClientCollectionCache rxClientCollectionCache = Mockito.mock(RxClientCollectionCache.class); + + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + RenameCollectionAwareClientRetryPolicy renameCollectionAwareClientRetryPolicy = new RenameCollectionAwareClientRetryPolicy(sessionContainer + , rxClientCollectionCache + , retryPolicyFactory.getRequestPolicy()); + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName( + OperationType.Create, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + request.requestContext = Mockito.mock(DocumentServiceRequestContext.class); + renameCollectionAwareClientRetryPolicy.onBeforeSendRequest(request); + + Mono singleShouldRetry = renameCollectionAwareClientRetryPolicy + .shouldRetry(new BadRequestException()); + IRetryPolicy.ShouldRetryResult shouldRetryResult = singleShouldRetry.block(); + assertThat(shouldRetryResult.shouldRetry).isFalse(); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ResourceResponseValidator.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ResourceResponseValidator.java new file mode 100644 index 0000000000000..8a2ca2755902f --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ResourceResponseValidator.java @@ -0,0 +1,359 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.CompositePath; +import com.azure.data.cosmos.IndexingMode; +import com.azure.data.cosmos.PermissionMode; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.SpatialSpec; +import com.azure.data.cosmos.SpatialType; +import com.azure.data.cosmos.TriggerOperation; +import com.azure.data.cosmos.TriggerType; +import org.assertj.core.api.Condition; + +import java.time.Instant; +import java.time.OffsetDateTime; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map.Entry; + +import static org.assertj.core.api.Assertions.assertThat; + +public interface ResourceResponseValidator { + + static Builder builder() { + return new Builder(); + } + + void validate(ResourceResponse resourceResponse); + + class Builder { + private List> validators = new ArrayList<>(); + + public ResourceResponseValidator build() { + return new ResourceResponseValidator() { + + @SuppressWarnings({ "rawtypes", "unchecked" }) + @Override + public void validate(ResourceResponse resourceResponse) { + for (ResourceResponseValidator validator : validators) { + validator.validate(resourceResponse); + } + } + }; + } + + public Builder withId(final String resourceId) { + validators.add(new ResourceResponseValidator() { + + @Override + public void validate(ResourceResponse resourceResponse) { + assertThat(resourceResponse.getResource()).isNotNull(); + assertThat(resourceResponse.getResource().id()).as("check Resource Id").isEqualTo(resourceId); + } + }); + return this; + } + + public Builder nullResource() { + validators.add(new ResourceResponseValidator() { + + @Override + public void validate(ResourceResponse resourceResponse) { + assertThat(resourceResponse.getResource()).isNull(); + } + }); + return this; + } + + public Builder withProperty(String propertyName, Condition validatingCondition) { + validators.add(new ResourceResponseValidator() { + + @Override + public void validate(ResourceResponse resourceResponse) { + assertThat(resourceResponse.getResource()).isNotNull(); + assertThat(resourceResponse.getResource().get(propertyName)).is(validatingCondition); + + } + }); + return this; + } + + public Builder withProperty(String propertyName, Object value) { + validators.add(new ResourceResponseValidator() { + + @Override + public void validate(ResourceResponse resourceResponse) { + assertThat(resourceResponse.getResource()).isNotNull(); + assertThat(resourceResponse.getResource().get(propertyName)).isEqualTo(value); + + } + }); + return this; + } + + + public Builder withTimestampIsAfterOrEqualTo(Instant time) { + validators.add(new ResourceResponseValidator() { + + @Override + public void validate(ResourceResponse resourceResponse) { + assertThat(resourceResponse.getResource()).isNotNull(); + assertThat(resourceResponse.getResource().timestamp()).isNotNull(); + OffsetDateTime d = resourceResponse.getResource().timestamp(); + System.out.println(d.toString()); + assertThat(d.toInstant()).isAfterOrEqualTo(time); + } + }); + return this; + } + + public Builder withTimestampIsBeforeOrEqualTo(Instant time) { + validators.add(new ResourceResponseValidator() { + + @Override + public void validate(ResourceResponse resourceResponse) { + assertThat(resourceResponse.getResource()).isNotNull(); + assertThat(resourceResponse.getResource().timestamp()).isNotNull(); + OffsetDateTime d = resourceResponse.getResource().timestamp(); + assertThat(d.toInstant()).isBeforeOrEqualTo(time); + } + }); + return this; + } + + public Builder withPermissionMode(PermissionMode mode) { + validators.add(new ResourceResponseValidator() { + + @Override + public void validate(ResourceResponse resourceResponse) { + assertThat(resourceResponse.getResource().getPermissionMode()).isEqualTo(mode); + } + }); + return this; + } + + public Builder withPermissionResourceLink(String link) { + validators.add(new ResourceResponseValidator() { + + @Override + public void validate(ResourceResponse resourceResponse) { + assertThat(resourceResponse.getResource().getResourceLink()).isEqualTo(link); + } + }); + return this; + } + + public Builder indexingMode(IndexingMode mode) { + validators.add(new ResourceResponseValidator() { + + @Override + public void validate(ResourceResponse resourceResponse) { + assertThat(resourceResponse.getResource()).isNotNull(); + assertThat(resourceResponse.getResource().getIndexingPolicy()).isNotNull(); + assertThat(resourceResponse.getResource().getIndexingPolicy().indexingMode()).isEqualTo(mode); + } + }); + return this; + } + + public Builder withStoredProcedureBody(String functionBody) { + validators.add(new ResourceResponseValidator() { + + @Override + public void validate(ResourceResponse resourceResponse) { + assertThat(resourceResponse.getResource().getBody()).isEqualTo(functionBody); + } + }); + return this; + } + + public Builder withUserDefinedFunctionBody(String functionBody) { + validators.add(new ResourceResponseValidator() { + + @Override + public void validate(ResourceResponse resourceResponse) { + assertThat(resourceResponse.getResource().getBody()).isEqualTo(functionBody); + } + }); + return this; + } + + + public Builder withTriggerBody(String functionBody) { + validators.add(new ResourceResponseValidator() { + + @Override + public void validate(ResourceResponse resourceResponse) { + assertThat(resourceResponse.getResource().getBody()).isEqualTo(functionBody); + } + }); + return this; + } + + public Builder notNullEtag() { + validators.add(new ResourceResponseValidator() { + + @Override + public void validate(ResourceResponse resourceResponse) { + assertThat(resourceResponse.getResource()).isNotNull(); + assertThat(resourceResponse.getResource().etag()).isNotNull(); + } + }); + return this; + } + + public Builder notEmptySelfLink() { + validators.add(new ResourceResponseValidator() { + + @Override + public void validate(ResourceResponse resourceResponse) { + assertThat(resourceResponse.getResource()).isNotNull(); + assertThat(resourceResponse.getResource().selfLink()).isNotEmpty(); + } + }); + return this; + } + + public Builder withTriggerInternals(TriggerType type, TriggerOperation op) { + validators.add(new ResourceResponseValidator() { + + @Override + public void validate(ResourceResponse resourceResponse) { + assertThat(resourceResponse.getResource().getTriggerType()).isEqualTo(type); + assertThat(resourceResponse.getResource().getTriggerOperation()).isEqualTo(op); + } + }); + return this; + } + + public Builder withOfferThroughput(int throughput) { + validators.add(new ResourceResponseValidator() { + + @Override + public void validate(ResourceResponse resourceResponse) { + assertThat(resourceResponse.getResource().getThroughput()) + .isEqualTo(throughput); + } + }); + return this; + } + + public Builder validatePropertyCondition(String key, Condition condition) { + validators.add(new ResourceResponseValidator() { + + @Override + public void validate(ResourceResponse resourceResponse) { + assertThat(resourceResponse.getResource()).isNotNull(); + assertThat(resourceResponse.getResource().get(key)).is(condition); + + } + }); + return this; + } + + public Builder withCompositeIndexes(Collection> compositeIndexesWritten) { + validators.add(new ResourceResponseValidator() { + + @Override + public void validate(ResourceResponse resourceResponse) { + Iterator> compositeIndexesReadIterator = resourceResponse.getResource() + .getIndexingPolicy().compositeIndexes().iterator(); + Iterator> compositeIndexesWrittenIterator = compositeIndexesWritten.iterator(); + + ArrayList readIndexesStrings = new ArrayList(); + ArrayList writtenIndexesStrings = new ArrayList(); + + while (compositeIndexesReadIterator.hasNext() && compositeIndexesWrittenIterator.hasNext()) { + Iterator compositeIndexReadIterator = compositeIndexesReadIterator.next().iterator(); + Iterator compositeIndexWrittenIterator = compositeIndexesWrittenIterator.next().iterator(); + + StringBuilder readIndexesString = new StringBuilder(); + StringBuilder writtenIndexesString = new StringBuilder(); + + while (compositeIndexReadIterator.hasNext() && compositeIndexWrittenIterator.hasNext()) { + CompositePath compositePathRead = compositeIndexReadIterator.next(); + CompositePath compositePathWritten = compositeIndexWrittenIterator.next(); + + readIndexesString.append(compositePathRead.path() + ":" + compositePathRead.order() + ";"); + writtenIndexesString.append(compositePathWritten.path() + ":" + compositePathRead.order() + ";"); + } + + readIndexesStrings.add(readIndexesString.toString()); + writtenIndexesStrings.add(writtenIndexesString.toString()); + } + + assertThat(readIndexesStrings).containsExactlyInAnyOrderElementsOf(writtenIndexesStrings); + } + }); + return this; + } + + public Builder withSpatialIndexes(Collection spatialIndexes) { + validators.add(new ResourceResponseValidator() { + + @Override + public void validate(ResourceResponse resourceResponse) { + Iterator spatialIndexesReadIterator = resourceResponse.getResource() + .getIndexingPolicy().spatialIndexes().iterator(); + Iterator spatialIndexesWrittenIterator = spatialIndexes.iterator(); + + HashMap> readIndexMap = new HashMap>(); + HashMap> writtenIndexMap = new HashMap>(); + + while (spatialIndexesReadIterator.hasNext() && spatialIndexesWrittenIterator.hasNext()) { + SpatialSpec spatialSpecRead = spatialIndexesReadIterator.next(); + SpatialSpec spatialSpecWritten = spatialIndexesWrittenIterator.next(); + + String readPath = spatialSpecRead.path() + ":"; + String writtenPath = spatialSpecWritten.path() + ":"; + + ArrayList readSpatialTypes = new ArrayList(); + ArrayList writtenSpatialTypes = new ArrayList(); + + Iterator spatialTypesReadIterator = spatialSpecRead.spatialTypes().iterator(); + Iterator spatialTypesWrittenIterator = spatialSpecWritten.spatialTypes().iterator(); + + while (spatialTypesReadIterator.hasNext() && spatialTypesWrittenIterator.hasNext()) { + readSpatialTypes.add(spatialTypesReadIterator.next()); + writtenSpatialTypes.add(spatialTypesWrittenIterator.next()); + } + + readIndexMap.put(readPath, readSpatialTypes); + writtenIndexMap.put(writtenPath, writtenSpatialTypes); + } + + for (Entry> entry : readIndexMap.entrySet()) { + assertThat(entry.getValue()) + .containsExactlyInAnyOrderElementsOf(writtenIndexMap.get(entry.getKey())); + } + } + }); + return this; + } + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ResourceValidator.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ResourceValidator.java new file mode 100644 index 0000000000000..78a019628e461 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ResourceValidator.java @@ -0,0 +1,72 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.Resource; + +import java.util.ArrayList; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +public interface ResourceValidator { + + void validate(T v); + + class Builder { + private List> validators = new ArrayList<>(); + + public ResourceValidator build() { + return new ResourceValidator() { + + @SuppressWarnings({ "rawtypes", "unchecked" }) + @Override + public void validate(T v) { + for (ResourceValidator validator : validators) { + validator.validate(v); + } + } + }; + } + + public Builder areEqual(T expectedValue) { + validators.add(new ResourceValidator() { + @Override + public void validate(T v) { + + assertThat(v.getMap().keySet()) + .describedAs("number of fields"). + hasSize(expectedValue.getMap().keySet().size()); + expectedValue.getMap().keySet(); + for(String key: expectedValue.getMap().keySet()) { + assertThat(expectedValue.get(key)) + .describedAs("value for " + key) + .isEqualTo(expectedValue.get(key)); + } + } + }); + return this; + } + + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RetryAnalyzer.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RetryAnalyzer.java new file mode 100644 index 0000000000000..1289516c55037 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RetryAnalyzer.java @@ -0,0 +1,53 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.internal.TestConfigurations; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.ITestResult; +import org.testng.util.RetryAnalyzerCount; + +import java.util.concurrent.TimeUnit; + +public class RetryAnalyzer extends RetryAnalyzerCount { + private final Logger logger = LoggerFactory.getLogger(RetryAnalyzer.class); + private final int waitBetweenRetriesInSeconds = 120; + + public RetryAnalyzer() { + this.setCount(Integer.parseInt(TestConfigurations.MAX_RETRY_LIMIT)); + } + + @Override + public boolean retryMethod(ITestResult result) { + try { + TimeUnit.SECONDS.sleep(waitBetweenRetriesInSeconds); + } catch (InterruptedException e) { + return false; + } + + return true; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RetryCreateDocumentTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RetryCreateDocumentTest.java new file mode 100644 index 0000000000000..061694d03f1b3 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RetryCreateDocumentTest.java @@ -0,0 +1,194 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosError; +import com.google.common.collect.ImmutableMap; +import org.mockito.Mockito; +import org.mockito.stubbing.Answer; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.lang.reflect.Method; +import java.time.Duration; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.mockito.Matchers.anyObject; +import static org.mockito.Mockito.doAnswer; + +public class RetryCreateDocumentTest extends TestSuiteBase { + + private SpyClientUnderTestFactory.ClientWithGatewaySpy client; + + private Database database; + private DocumentCollection collection; + + @Factory(dataProvider = "clientBuilders") + public RetryCreateDocumentTest(AsyncDocumentClient.Builder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void retryDocumentCreate() throws Exception { + // create a document to ensure collection is cached + client.createDocument(collection.selfLink(), getDocumentDefinition(), null, false).single().block(); + + Document docDefinition = getDocumentDefinition(); + + Flux> createObservable = client + .createDocument(collection.selfLink(), docDefinition, null, false); + AtomicInteger count = new AtomicInteger(); + + doAnswer((Answer>) invocation -> { + RxDocumentServiceRequest req = (RxDocumentServiceRequest) invocation.getArguments()[0]; + if (req.getOperationType() != OperationType.Create) { + return client.getOrigGatewayStoreModel().processMessage(req); + } + + int currentAttempt = count.getAndIncrement(); + if (currentAttempt == 0) { + Map header = ImmutableMap.of( + HttpConstants.HttpHeaders.SUB_STATUS, + Integer.toString(HttpConstants.SubStatusCodes.PARTITION_KEY_MISMATCH)); + + return Flux.error(BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.BADREQUEST, new CosmosError() , header)); + } else { + return client.getOrigGatewayStoreModel().processMessage(req); + } + }).when(client.getSpyGatewayStoreModel()).processMessage(anyObject()); + + // validate + ResourceResponseValidator validator = new ResourceResponseValidator.Builder() + .withId(docDefinition.id()).build(); + validateSuccess(createObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void createDocument_noRetryOnNonRetriableFailure() throws Exception { + + AtomicInteger count = new AtomicInteger(); + doAnswer((Answer>) invocation -> { + RxDocumentServiceRequest req = (RxDocumentServiceRequest) invocation.getArguments()[0]; + + if (req.getResourceType() != ResourceType.Document) { + return client.getOrigGatewayStoreModel().processMessage(req); + } + + int currentAttempt = count.getAndIncrement(); + if (currentAttempt == 0) { + return client.getOrigGatewayStoreModel().processMessage(req); + } else { + Map header = ImmutableMap.of( + HttpConstants.HttpHeaders.SUB_STATUS, + Integer.toString(2)); + + return Flux.error(BridgeInternal.createCosmosClientException(1, new CosmosError() , header)); + } + }).when(client.getSpyGatewayStoreModel()).processMessage(anyObject()); + + // create a document to ensure collection is cached + client.createDocument(collection.selfLink(), getDocumentDefinition(), null, false) + .single() + .block(); + + Document docDefinition = getDocumentDefinition(); + + Flux> createObservable = client + .createDocument(collection.selfLink(), docDefinition, null, false); + + // validate + FailureValidator validator = new FailureValidator.Builder().statusCode(1).subStatusCode(2).build(); + validateFailure(createObservable, validator, TIMEOUT); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void createDocument_failImmediatelyOnNonRetriable() throws Exception { + // create a document to ensure collection is cached + client.createDocument(collection.selfLink(), getDocumentDefinition(), null, false).single().block(); + AtomicInteger count = new AtomicInteger(); + + doAnswer((Answer>) invocation -> { + RxDocumentServiceRequest req = (RxDocumentServiceRequest) invocation.getArguments()[0]; + if (req.getOperationType() != OperationType.Create) { + return client.getOrigGatewayStoreModel().processMessage(req); + } + int currentAttempt = count.getAndIncrement(); + if (currentAttempt == 0) { + Map header = ImmutableMap.of( + HttpConstants.HttpHeaders.SUB_STATUS, + Integer.toString(2)); + + return Flux.error(BridgeInternal.createCosmosClientException(1, new CosmosError() , header)); + } else { + return client.getOrigGatewayStoreModel().processMessage(req); + } + }).when(client.getSpyGatewayStoreModel()).processMessage(anyObject()); + + Document docDefinition = getDocumentDefinition(); + + Flux> createObservable = client + .createDocument(collection.selfLink(), docDefinition, null, false); + // validate + + FailureValidator validator = new FailureValidator.Builder().statusCode(1).subStatusCode(2).build(); + validateFailure(createObservable.timeout(Duration.ofMillis(100)), validator); + } + + @BeforeMethod(groups = { "simple" }) + public void beforeMethod(Method method) { + Mockito.reset(client.getSpyGatewayStoreModel()); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + // set up the client + client = SpyClientUnderTestFactory.createClientWithGatewaySpy(clientBuilder()); + + database = SHARED_DATABASE; + collection = SHARED_SINGLE_PARTITION_COLLECTION; + } + + private Document getDocumentDefinition() { + String uuid = UUID.randomUUID().toString(); + Document doc = new Document(String.format("{ " + + "\"id\": \"%s\", " + + "\"mypk\": \"%s\", " + + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + + "}" + , uuid, uuid)); + return doc; + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RetryThrottleTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RetryThrottleTest.java new file mode 100644 index 0000000000000..1f8e1c2fe381a --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RetryThrottleTest.java @@ -0,0 +1,158 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.RetryOptions; +import org.mockito.stubbing.Answer; +import org.testng.annotations.AfterClass; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Mockito.doAnswer; + +public class RetryThrottleTest extends TestSuiteBase { + private final static int TIMEOUT = 10000; + private final static int TOTAL_DOCS = 500; + private final static int LARGE_TIMEOUT = 30000; + + private SpyClientUnderTestFactory.ClientWithGatewaySpy client; + private Database database; + private DocumentCollection collection; + + @Test(groups = { "long" }, timeOut = LARGE_TIMEOUT, enabled = false) + public void retryCreateDocumentsOnSpike() throws Exception { + ConnectionPolicy policy = new ConnectionPolicy(); + RetryOptions retryOptions = new RetryOptions(); + retryOptions.maxRetryAttemptsOnThrottledRequests(Integer.MAX_VALUE); + retryOptions.maxRetryWaitTimeInSeconds(LARGE_TIMEOUT); + policy.retryOptions(retryOptions); + + AsyncDocumentClient.Builder builder = new AsyncDocumentClient.Builder() + .withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(policy) + .withConsistencyLevel(ConsistencyLevel.EVENTUAL); + + client = SpyClientUnderTestFactory.createClientWithGatewaySpy(builder); + + // create a document to ensure collection is cached + client.createDocument(getCollectionLink(collection), getDocumentDefinition(), null, false).blockFirst(); + + List>> list = new ArrayList<>(); + for(int i = 0; i < TOTAL_DOCS; i++) { + Flux> obs = client.createDocument(getCollectionLink(collection), getDocumentDefinition(), null, false); + list.add(obs); + } + + // registers a spy to count number of invocation + AtomicInteger totalCount = new AtomicInteger(); + AtomicInteger successCount = new AtomicInteger(); + + doAnswer((Answer>) invocation -> { + RxDocumentServiceRequest req = (RxDocumentServiceRequest) invocation.getArguments()[0]; + if (req.getResourceType() == ResourceType.Document && req.getOperationType() == OperationType.Create) { + // increment the counter per Document CREATE operations + totalCount.incrementAndGet(); + } + return client.getOrigGatewayStoreModel().processMessage(req).doOnNext(rsp -> successCount.incrementAndGet()); + }).when(client.getSpyGatewayStoreModel()).processMessage(anyObject()); + + List> rsps = Flux.merge(Flux.fromIterable(list), 100).collectList().single().block(); + System.out.println("total: " + totalCount.get()); + assertThat(rsps).hasSize(TOTAL_DOCS); + assertThat(successCount.get()).isEqualTo(TOTAL_DOCS); + System.out.println("total count is " + totalCount.get()); + } + + @Test(groups = { "long" }, timeOut = TIMEOUT, enabled = false) + public void retryDocumentCreate() throws Exception { + client = SpyClientUnderTestFactory.createClientWithGatewaySpy(createGatewayRxDocumentClient()); + + // create a document to ensure collection is cached + client.createDocument(getCollectionLink(collection), getDocumentDefinition(), null, false).blockFirst(); + + Document docDefinition = getDocumentDefinition(); + + Flux> createObservable = client + .createDocument(collection.selfLink(), docDefinition, null, false); + AtomicInteger count = new AtomicInteger(); + + doAnswer((Answer>) invocation -> { + RxDocumentServiceRequest req = (RxDocumentServiceRequest) invocation.getArguments()[0]; + if (req.getOperationType() != OperationType.Create) { + return client.getOrigGatewayStoreModel().processMessage(req); + } + int currentAttempt = count.getAndIncrement(); + if (currentAttempt == 0) { + return Flux.error(BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.TOO_MANY_REQUESTS)); + } else { + return client.getOrigGatewayStoreModel().processMessage(req); + } + }).when(client.getSpyGatewayStoreModel()).processMessage(anyObject()); + + // validate + ResourceResponseValidator validator = new ResourceResponseValidator.Builder() + .withId(docDefinition.id()).build(); + validateSuccess(createObservable, validator, TIMEOUT); + } + + @AfterMethod(groups = { "long" }, enabled = false) + private void afterMethod() { + safeClose(client); + } + + @BeforeClass(groups = { "long" }, timeOut = SETUP_TIMEOUT, enabled = false) + public void beforeClass() { + // set up the client + database = SHARED_DATABASE; + collection = SHARED_SINGLE_PARTITION_COLLECTION; + } + + private Document getDocumentDefinition() { + String uuid = UUID.randomUUID().toString(); + Document doc = new Document(String.format("{ " + + "\"id\": \"%s\", " + + "\"mypk\": \"%s\", " + + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + + "}" + , uuid, uuid)); + return doc; + } + + @AfterClass(groups = { "long" }, timeOut = SHUTDOWN_TIMEOUT, enabled = false) + public void afterClass() { + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RetryUtilsTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RetryUtilsTest.java new file mode 100644 index 0000000000000..9d4a156948f42 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RetryUtilsTest.java @@ -0,0 +1,158 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.GoneException; +import com.azure.data.cosmos.internal.directconnectivity.StoreResponse; +import com.azure.data.cosmos.internal.directconnectivity.StoreResponseValidator; +import com.azure.data.cosmos.internal.IRetryPolicy.ShouldRetryResult; +import io.reactivex.subscribers.TestSubscriber; +import org.mockito.Matchers; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.time.Duration; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +public class RetryUtilsTest { + IRetryPolicy retryPolicy; + Function, Mono> callbackMethod; + Function, Mono> inBackoffAlternateCallbackMethod; + private static final Duration minBackoffForInBackoffCallback = Duration.ofMillis(10); + private static final int TIMEOUT = 30000; + private static final Duration BACK_OFF_DURATION = Duration.ofMillis(20); + private StoreResponse storeResponse; + + @BeforeClass(groups = { "unit" }) + public void beforeClass() throws Exception { + retryPolicy = Mockito.mock(IRetryPolicy.class); + callbackMethod = Mockito.mock(Function.class); + inBackoffAlternateCallbackMethod = Mockito.mock(Function.class); + storeResponse = getStoreResponse(); + } + + /** + * This method will make sure we are throwing original exception in case of + * ShouldRetryResult.noRetry() instead of Single.error(null). + */ + @Test(groups = { "unit" }, timeOut = TIMEOUT) + public void toRetryWithAlternateFuncWithNoRetry() { + Function> onErrorFunc = RetryUtils.toRetryWithAlternateFunc(callbackMethod, + retryPolicy, inBackoffAlternateCallbackMethod, minBackoffForInBackoffCallback); + Mockito.when(retryPolicy.shouldRetry(Matchers.any())).thenReturn(Mono.just(ShouldRetryResult.noRetry())); + Mono response = onErrorFunc.apply(new GoneException()); + validateFailure(response, TIMEOUT, GoneException.class); + } + + /** + * This method will test retries on callbackMethod, eventually returning success + * response after some failures and making sure it failed for at least specific + * number before passing. + */ + @Test(groups = { "unit" }, timeOut = TIMEOUT) + public void toRetryWithAlternateFuncTestingMethodOne() { + Function> onErrorFunc = RetryUtils.toRetryWithAlternateFunc(callbackMethod, + retryPolicy, null, minBackoffForInBackoffCallback); + + toggleMockFuncBtwFailureSuccess(callbackMethod); + Mockito.when(retryPolicy.shouldRetry(Matchers.any())) + .thenReturn(Mono.just(ShouldRetryResult.retryAfter(BACK_OFF_DURATION))); + Mono response = onErrorFunc.apply(new GoneException()); + StoreResponseValidator validator = StoreResponseValidator.create().withStatus(storeResponse.getStatus()) + .withContent(storeResponse.getResponseBody()).build(); + validateSuccess(response, validator, TIMEOUT); + Mockito.verify(callbackMethod, Mockito.times(4)).apply(Matchers.any()); + } + + /** + * This method will test retries on inBackoffAlternateCallbackMethod, eventually + * returning success response after some failures and making sure it failed for + * at least specific number before passing. + */ + @Test(groups = { "unit" }, timeOut = TIMEOUT) + public void toRetryWithAlternateFuncTestingMethodTwo() { + Function> onErrorFunc = RetryUtils.toRetryWithAlternateFunc(callbackMethod, + retryPolicy, inBackoffAlternateCallbackMethod, minBackoffForInBackoffCallback); + Mockito.when(callbackMethod.apply(Matchers.any())).thenReturn(Mono.error(new GoneException())); + toggleMockFuncBtwFailureSuccess(inBackoffAlternateCallbackMethod); + Mockito.when(retryPolicy.shouldRetry(Matchers.any())) + .thenReturn(Mono.just(ShouldRetryResult.retryAfter(BACK_OFF_DURATION))); + Mono response = onErrorFunc.apply(new GoneException()); + StoreResponseValidator validator = StoreResponseValidator.create().withStatus(storeResponse.getStatus()) + .withContent(storeResponse.getResponseBody()).build(); + validateSuccess(response, validator, TIMEOUT); + Mockito.verify(inBackoffAlternateCallbackMethod, Mockito.times(4)).apply(Matchers.any()); + } + + private void validateFailure(Mono single, long timeout, Class class1) { + + TestSubscriber testSubscriber = new TestSubscriber<>(); + single.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNotComplete(); + testSubscriber.assertTerminated(); + assertThat(testSubscriber.errorCount()).isEqualTo(1); + if (!(testSubscriber.getEvents().get(1).get(0).getClass().equals(class1))) { + fail("Not expecting " + testSubscriber.getEvents().get(1).get(0)); + } + } + + private void validateSuccess(Mono single, StoreResponseValidator validator, long timeout) { + + TestSubscriber testSubscriber = new TestSubscriber<>(); + single.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + assertThat(testSubscriber.valueCount()).isEqualTo(1); + validator.validate(testSubscriber.values().get(0)); + } + + private void toggleMockFuncBtwFailureSuccess( + Function, Mono> method) { + Mockito.when(method.apply(Matchers.any())).thenAnswer(new Answer>() { + + private int count = 0; + + @Override + public Mono answer(InvocationOnMock invocation) throws Throwable { + if (count++ < 3) { + return Mono.error(new GoneException()); + } + return Mono.just(storeResponse); + } + }); + } + + private StoreResponse getStoreResponse() { + StoreResponseBuilder storeResponseBuilder = new StoreResponseBuilder().withContent("Test content") + .withStatus(200); + return storeResponseBuilder.build(); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RxDocumentClientUnderTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RxDocumentClientUnderTest.java new file mode 100644 index 0000000000000..f7415164cfd3e --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RxDocumentClientUnderTest.java @@ -0,0 +1,86 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.ClientUnderTestBuilder; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.internal.http.HttpClient; +import com.azure.data.cosmos.internal.http.HttpRequest; +import com.azure.data.cosmos.internal.http.HttpResponse; +import org.mockito.Mockito; +import org.mockito.stubbing.Answer; +import reactor.core.publisher.Mono; + +import java.net.URI; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.mockito.Mockito.doAnswer; + +/** + * This class in conjunction with {@link ClientUnderTestBuilder} + * provides the functionality for spying the client behavior and the http requests sent. + */ +public class RxDocumentClientUnderTest extends RxDocumentClientImpl { + + public HttpClient spyHttpClient; + public HttpClient origHttpClient; + + public List httpRequests = Collections.synchronizedList(new ArrayList<>()); + + public RxDocumentClientUnderTest(URI serviceEndpoint, + String masterKey, + ConnectionPolicy connectionPolicy, + ConsistencyLevel consistencyLevel, + Configs configs) { + super(serviceEndpoint, masterKey, connectionPolicy, consistencyLevel, configs); + init(); + } + + RxGatewayStoreModel createRxGatewayProxy( + ISessionContainer sessionContainer, + ConsistencyLevel consistencyLevel, + QueryCompatibilityMode queryCompatibilityMode, + UserAgentContainer userAgentContainer, + GlobalEndpointManager globalEndpointManager, + HttpClient rxOrigClient) { + + origHttpClient = rxOrigClient; + spyHttpClient = Mockito.spy(rxOrigClient); + + doAnswer((Answer>) invocationOnMock -> { + HttpRequest httpRequest = invocationOnMock.getArgumentAt(0, HttpRequest.class); + httpRequests.add(httpRequest); + return origHttpClient.send(httpRequest); + }).when(spyHttpClient).send(Mockito.any(HttpRequest.class)); + + return super.createRxGatewayProxy(sessionContainer, + consistencyLevel, + queryCompatibilityMode, + userAgentContainer, + globalEndpointManager, + spyHttpClient); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RxDocumentServiceRequestTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RxDocumentServiceRequestTest.java new file mode 100644 index 0000000000000..f6463cd8530cd --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RxDocumentServiceRequestTest.java @@ -0,0 +1,450 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import org.apache.commons.collections4.map.HashedMap; +import org.apache.commons.lang3.StringUtils; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.nio.charset.StandardCharsets; +import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; + +public class RxDocumentServiceRequestTest { + + private final static String DOCUMENT_DEFINITION = "{ " + "\"id\": \"%s\", " + "\"mypk\": \"%s\", " + + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}"; + private static final String PARTITION_KEY_VALUE = "1"; + + private final String DOCUMENT_URL_WITH_ID = "/dbs/IXYFAA==/colls/IXYFAOHEBPM=/docs/IXYFAOHEBPMBAAAAAAAAAA==/"; + private final String DOCUMENT_URL_WITH_NAME = "/dbs/testDB/colls/testColl/docs/testDoc/"; + private final String DOCUMENT_URL_WITH_ID_WITHOUT_SLASH = "dbs/IXYFAA==/colls/IXYFAOHEBPM=/docs/IXYFAOHEBPMBAAAAAAAAAA==/"; + private final String DOCUMENT_URL_WITH_NAME_WITHOUT_SLASH = "dbs/testDB/colls/testColl/docs/testDoc/"; + + private static final String DATABASE_URL = "/dbs/IXYFAA==/"; + private static final String DOCUMENT_COLLECTION_URL = "/dbs/IXYFAA==/colls/IXYFAOHEBPM=/"; + private static final String STORED_PRCEDURE_URL = "/dbs/IXYFAA==/colls/IXYFAOHEBPM=/sprocs/IXYFAOHEBPMCAAAAAAAAgA==/"; + private static final String USER_DEFINED_FUNCTION_URL = "/dbs/IXYFAA==/colls/IXYFAOHEBPM=/udfs/IXYFAOHEBPMBAAAAAAAAYA==/"; + private static final String USER_URL = "/dbs/IXYFAA==/users/IXYFAE9ZOwA=/"; + private static final String PERMISSION_URL = "/dbs/IXYFAA==/users/IXYFAE9ZOwA=/permissions/IXYFAE9ZOwBGkyqWIsNKAA==/"; + private static final String ATTACHMENT_URL = "/dbs/IXYFAA==/colls/IXYFAOHEBPM=/docs/IXYFAOHEBPMBAAAAAAAAAA==/attachments/IXYFAOHEBPMBAAAAAAAAABJYSJk=/"; + private static final String TRIGGER_URL = "/dbs/IXYFAA==/colls/IXYFAOHEBPM=/triggers/IXYFAOHEBPMCAAAAAAAAcA==/"; + private static final String CONFLICT_URL = "/dbs/k6d9AA==/colls/k6d9ALgBmD8=/conflicts/k6d9ALgBmD8BAAAAAAAAQA==/"; + + @DataProvider(name = "documentUrl") + public Object[][] documentUrlWithId() { + return new Object[][] { { DOCUMENT_URL_WITH_ID, DOCUMENT_URL_WITH_NAME, OperationType.Read }, + { DOCUMENT_URL_WITH_ID, DOCUMENT_URL_WITH_NAME, OperationType.Delete }, + { DOCUMENT_URL_WITH_ID, DOCUMENT_URL_WITH_NAME, OperationType.Replace }, + { DOCUMENT_URL_WITH_ID_WITHOUT_SLASH, DOCUMENT_URL_WITH_NAME_WITHOUT_SLASH, OperationType.Read }, + { DOCUMENT_URL_WITH_ID_WITHOUT_SLASH, DOCUMENT_URL_WITH_NAME_WITHOUT_SLASH, OperationType.Delete }, + { DOCUMENT_URL_WITH_ID_WITHOUT_SLASH, DOCUMENT_URL_WITH_NAME_WITHOUT_SLASH, OperationType.Replace }, }; + } + + @DataProvider(name = "resourceUrlWithOperationType") + public Object[][] resourceOperation() { + return new Object[][] { { DATABASE_URL, ResourceType.Database, OperationType.Read }, + { DOCUMENT_COLLECTION_URL, ResourceType.DocumentCollection, OperationType.Read }, + { STORED_PRCEDURE_URL, ResourceType.StoredProcedure, OperationType.Read }, + { USER_DEFINED_FUNCTION_URL, ResourceType.UserDefinedFunction, OperationType.Read }, + { USER_URL, ResourceType.User, OperationType.Read }, + { PERMISSION_URL, ResourceType.Permission, OperationType.Read }, + { ATTACHMENT_URL, ResourceType.Attachment, OperationType.Read }, + { TRIGGER_URL, ResourceType.Trigger, OperationType.Read }, + { CONFLICT_URL, ResourceType.Conflict, OperationType.Read }, + + { DATABASE_URL, ResourceType.Database, OperationType.Create }, + { DOCUMENT_COLLECTION_URL, ResourceType.DocumentCollection, OperationType.Create }, + { STORED_PRCEDURE_URL, ResourceType.StoredProcedure, OperationType.Create }, + { USER_DEFINED_FUNCTION_URL, ResourceType.UserDefinedFunction, OperationType.Create }, + { USER_URL, ResourceType.User, OperationType.Create }, + { PERMISSION_URL, ResourceType.Permission, OperationType.Create }, + { ATTACHMENT_URL, ResourceType.Attachment, OperationType.Create }, + { TRIGGER_URL, ResourceType.Trigger, OperationType.Create }, + { CONFLICT_URL, ResourceType.Conflict, OperationType.Create }, + + { DATABASE_URL, ResourceType.Database, OperationType.Delete }, + { DOCUMENT_COLLECTION_URL, ResourceType.DocumentCollection, OperationType.Delete }, + { STORED_PRCEDURE_URL, ResourceType.StoredProcedure, OperationType.Delete }, + { USER_DEFINED_FUNCTION_URL, ResourceType.UserDefinedFunction, OperationType.Delete }, + { USER_URL, ResourceType.User, OperationType.Delete }, + { PERMISSION_URL, ResourceType.Permission, OperationType.Delete }, + { ATTACHMENT_URL, ResourceType.Attachment, OperationType.Delete }, + { TRIGGER_URL, ResourceType.Trigger, OperationType.Delete }, + { CONFLICT_URL, ResourceType.Conflict, OperationType.Delete }, + + { DATABASE_URL, ResourceType.Database, OperationType.Replace }, + { DOCUMENT_COLLECTION_URL, ResourceType.DocumentCollection, OperationType.Replace }, + { STORED_PRCEDURE_URL, ResourceType.StoredProcedure, OperationType.Replace }, + { USER_DEFINED_FUNCTION_URL, ResourceType.UserDefinedFunction, OperationType.Replace }, + { USER_URL, ResourceType.User, OperationType.Replace }, + { PERMISSION_URL, ResourceType.Permission, OperationType.Replace }, + { ATTACHMENT_URL, ResourceType.Attachment, OperationType.Replace }, + { TRIGGER_URL, ResourceType.Trigger, OperationType.Replace }, + { CONFLICT_URL, ResourceType.Conflict, OperationType.Replace }, + + { DATABASE_URL, ResourceType.Database, OperationType.Query }, + { DOCUMENT_COLLECTION_URL, ResourceType.DocumentCollection, OperationType.Query }, + { STORED_PRCEDURE_URL, ResourceType.StoredProcedure, OperationType.Query }, + { USER_DEFINED_FUNCTION_URL, ResourceType.UserDefinedFunction, OperationType.Query }, + { USER_URL, ResourceType.User, OperationType.Query }, + { PERMISSION_URL, ResourceType.Permission, OperationType.Query }, + { ATTACHMENT_URL, ResourceType.Attachment, OperationType.Query }, + { TRIGGER_URL, ResourceType.Trigger, OperationType.Query }, + { CONFLICT_URL, ResourceType.Conflict, OperationType.Query }, + + { DATABASE_URL, ResourceType.Database, OperationType.Update }, + { DOCUMENT_COLLECTION_URL, ResourceType.DocumentCollection, OperationType.Update }, + { STORED_PRCEDURE_URL, ResourceType.StoredProcedure, OperationType.Update }, + { USER_DEFINED_FUNCTION_URL, ResourceType.UserDefinedFunction, OperationType.Update }, + { USER_URL, ResourceType.User, OperationType.Update }, + { PERMISSION_URL, ResourceType.Permission, OperationType.Update }, + { ATTACHMENT_URL, ResourceType.Attachment, OperationType.Update }, + { TRIGGER_URL, ResourceType.Trigger, OperationType.Update }, + { CONFLICT_URL, ResourceType.Conflict, OperationType.Update } }; + } + + @DataProvider(name = "resourceIdOrFullNameRequestAndOperationTypeData") + public Object[][] resourceIdOrFullNameRequestAndOperationTypeData() { + return new Object[][]{ + {"IXYFAA==", "dbs/testDB", ResourceType.Database, OperationType.Read}, + {"IXYFAA==", "dbs/testDB", ResourceType.Database, OperationType.Create}, + + {"IXYFAOHEBPM=", "dbs/testDB/colls/testColl", ResourceType.DocumentCollection, OperationType.Read}, + {"IXYFAOHEBPM=", "dbs/testDB/colls/testColl", ResourceType.DocumentCollection, OperationType.Create}, + {"IXYFAOHEBPM=", "dbs/testDB/colls/testColl", ResourceType.DocumentCollection, OperationType.Delete}, + {"IXYFAOHEBPM=", "dbs/testDB/colls/testColl", ResourceType.DocumentCollection, OperationType.Query}, + + {"IXYFAOHEBPMBAAAAAAAAAA==", "dbs/testDB/colls/testColl/docs/testDoc", ResourceType.Document, OperationType.Read}, + {"IXYFAOHEBPMBAAAAAAAAAA==", "dbs/testDB/colls/testColl/docs/testDoc", ResourceType.Document, OperationType.Create}, + {"IXYFAOHEBPMBAAAAAAAAAA==", "dbs/testDB/colls/testColl/docs/testDoc", ResourceType.Document, OperationType.Delete}, + {"IXYFAOHEBPMBAAAAAAAAAA==", "dbs/testDB/colls/testColl/docs/testDoc", ResourceType.Document, OperationType.Query}, + }; + } + + /** + * This test case will cover various create methods through resource url with Id in detail for document resource. + * @param documentUrlWithId Document url with id + * @param documentUrlWithName Document url with name + * @param operationType Operation type + */ + @Test(groups = { "unit" }, dataProvider = "documentUrl") + public void createWithResourceIdURL(String documentUrlWithId, String documentUrlWithName, + OperationType operationType) { + + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, + ResourceType.Document, + documentUrlWithId, + new HashedMap(), AuthorizationTokenType.PrimaryMasterKey); + + assertThat(request.authorizationTokenType).isEqualTo(AuthorizationTokenType.PrimaryMasterKey); + assertThat(request.getResourceAddress()).isEqualTo("IXYFAOHEBPMBAAAAAAAAAA=="); + assertThat(request.getResourceId()).isEqualTo("IXYFAOHEBPMBAAAAAAAAAA=="); + + request = RxDocumentServiceRequest.create(operationType, "IXYFAOHEBPMBAAAAAAAAAA==", ResourceType.Document, + new HashedMap(), AuthorizationTokenType.PrimaryReadonlyMasterKey); + assertThat(request.authorizationTokenType).isEqualTo(AuthorizationTokenType.PrimaryReadonlyMasterKey); + assertThat(request.getResourceAddress()).isEqualTo("IXYFAOHEBPMBAAAAAAAAAA=="); + assertThat(request.getResourceId()).isEqualTo("IXYFAOHEBPMBAAAAAAAAAA=="); + + Document document = getDocumentDefinition(); + request = RxDocumentServiceRequest.create(operationType, document, ResourceType.Document, documentUrlWithId, + new HashedMap(), AuthorizationTokenType.Invalid); + assertThat(request.authorizationTokenType).isEqualTo(AuthorizationTokenType.Invalid); + assertThat(request.getResourceAddress()).isEqualTo("IXYFAOHEBPMBAAAAAAAAAA=="); + assertThat(request.getResourceId()).isEqualTo("IXYFAOHEBPMBAAAAAAAAAA=="); + assertThat(request.getContent()).isEqualTo(document.toJson().getBytes(StandardCharsets.UTF_8)); + + Flux inputStream = Flux.just(document.toJson().getBytes(StandardCharsets.UTF_8)); + request = RxDocumentServiceRequest.create(operationType, ResourceType.Document, documentUrlWithId, inputStream, + new HashedMap(), AuthorizationTokenType.SecondaryMasterKey); + assertThat(request.authorizationTokenType).isEqualTo(AuthorizationTokenType.SecondaryMasterKey); + assertThat(request.getResourceAddress()).isEqualTo("IXYFAOHEBPMBAAAAAAAAAA=="); + assertThat(request.getResourceId()).isEqualTo("IXYFAOHEBPMBAAAAAAAAAA=="); + assertThat(request.getContentObservable()).isEqualTo(inputStream); + + // Creating one request without giving AuthorizationTokenType , it should take + // PrimaryMasterKey by default + request = RxDocumentServiceRequest.create(operationType, + ResourceType.Document, + documentUrlWithId, + new HashedMap()); + + assertThat(request.authorizationTokenType).isEqualTo(AuthorizationTokenType.PrimaryMasterKey); + assertThat(request.getResourceAddress()).isEqualTo("IXYFAOHEBPMBAAAAAAAAAA=="); + assertThat(request.getResourceId()).isEqualTo("IXYFAOHEBPMBAAAAAAAAAA=="); + + } + + /** + * This test case will cover various create method through resource url with name in detail for document resource. + * @param documentUrlWithId Document url with id + * @param documentUrlWithName Document url with name + * @param operationType Operation type + */ + @Test(groups = { "unit" }, dataProvider = "documentUrl") + public void createWithResourceNameURL(String documentUrlWithId, String documentUrlWithName, + OperationType operationType) { + + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, + ResourceType.Document, + documentUrlWithName, + new HashedMap(), AuthorizationTokenType.PrimaryMasterKey); + + assertThat(request.authorizationTokenType).isEqualTo(AuthorizationTokenType.PrimaryMasterKey); + assertThat(request.getResourceAddress()) + .isEqualTo(StringUtils.removeEnd(StringUtils.removeStart(documentUrlWithName, Paths.ROOT), Paths.ROOT)); + assertThat(request.getResourceId()).isNull(); + + Document document = getDocumentDefinition(); + Flux inputStream = Flux.just(document.toJson().getBytes(StandardCharsets.UTF_8)); + request = RxDocumentServiceRequest.create(operationType, + ResourceType.Document, + documentUrlWithName, + inputStream, + new HashedMap(), + AuthorizationTokenType.SecondaryMasterKey); + + assertThat(request.authorizationTokenType).isEqualTo(AuthorizationTokenType.SecondaryMasterKey); + assertThat(request.getResourceAddress()) + .isEqualTo(StringUtils.removeEnd(StringUtils.removeStart(documentUrlWithName, Paths.ROOT), Paths.ROOT)); + assertThat(request.getResourceId()).isNull(); + assertThat(request.getContentObservable()).isEqualTo(inputStream); + + // Creating one request without giving AuthorizationTokenType , it should take + // PrimaryMasterKey by default + request = RxDocumentServiceRequest.create(operationType, + ResourceType.Document, + documentUrlWithName, + new HashedMap()); + + assertThat(request.authorizationTokenType).isEqualTo(AuthorizationTokenType.PrimaryMasterKey); + assertThat(request.getResourceAddress()) + .isEqualTo(StringUtils.removeEnd(StringUtils.removeStart(documentUrlWithName, Paths.ROOT), Paths.ROOT)); + assertThat(request.getResourceId()).isNull(); + } + + + /** + * This will cover sanity for most of the combination of different source with various + * operation. + * @param resourceUrl Resource Url + * @param resourceType Resource Type + * @param operationType Operation type + */ + @Test(groups = { "unit" }, dataProvider = "resourceUrlWithOperationType") + public void createDifferentResourceRequestWithDiffOperation(String resourceUrl, ResourceType resourceType, + OperationType operationType) { + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, resourceType, resourceUrl, + new HashedMap(), AuthorizationTokenType.PrimaryMasterKey); + assertThat(resourceUrl.contains(request.getResourceAddress())).isTrue(); + assertThat(resourceUrl.contains(request.getResourceId())).isTrue(); + assertThat(request.getResourceType()).isEqualTo(resourceType); + assertThat(request.getOperationType()).isEqualTo(operationType); + assertThat(request.getHeaders()).isNotNull(); + } + + /** + * This will test all the create method without request path. + * + * @param resourceId Resource id + * @param resourceType Resource Type + * @param operationType Operation type + */ + @Test(groups = {"unit"}, dataProvider = "resourceIdOrFullNameRequestAndOperationTypeData") + public void createRequestWithoutPath(String resourceId, String resourceFullName, ResourceType resourceType, + OperationType operationType) { + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, resourceId, resourceType, null); + assertThat(request.getHeaders()).isNotNull(); + assertThat(request.getResourceAddress()).isEqualTo(resourceId); + assertThat(request.getResourceId()).isEqualTo(resourceId); + assertThat(request.getResourceType()).isEqualTo(resourceType); + assertThat(request.getOperationType()).isEqualTo(operationType); + assertThat(request.authorizationTokenType).isEqualTo(AuthorizationTokenType.PrimaryMasterKey); + + + request = RxDocumentServiceRequest.create(operationType, resourceId, resourceType, null, AuthorizationTokenType.ResourceToken); + assertThat(request.getHeaders()).isNotNull(); + assertThat(request.getResourceAddress()).isEqualTo(resourceId); + assertThat(request.getResourceId()).isEqualTo(resourceId); + assertThat(request.getResourceType()).isEqualTo(resourceType); + assertThat(request.getOperationType()).isEqualTo(operationType); + assertThat(request.authorizationTokenType).isEqualTo(AuthorizationTokenType.ResourceToken); + + Document document = getDocumentDefinition(); + request = RxDocumentServiceRequest.create(operationType, resourceId, resourceType, document, null); + assertThat(request.getHeaders()).isNotNull(); + assertThat(request.getResourceAddress()).isEqualTo(resourceId); + assertThat(request.getResourceId()).isEqualTo(resourceId); + assertThat(request.getResourceType()).isEqualTo(resourceType); + assertThat(request.getOperationType()).isEqualTo(operationType); + assertThat(request.authorizationTokenType).isEqualTo(AuthorizationTokenType.PrimaryMasterKey); + assertThat(request.getContent()).isEqualTo(document.toJson().getBytes(StandardCharsets.UTF_8)); + + request = RxDocumentServiceRequest.create(operationType, resourceId, resourceType, document, null, AuthorizationTokenType.ResourceToken); + assertThat(request.getHeaders()).isNotNull(); + assertThat(request.getResourceAddress()).isEqualTo(resourceId); + assertThat(request.getResourceId()).isEqualTo(resourceId); + assertThat(request.getResourceType()).isEqualTo(resourceType); + assertThat(request.getOperationType()).isEqualTo(operationType); + assertThat(request.authorizationTokenType).isEqualTo(AuthorizationTokenType.ResourceToken); + assertThat(request.getContent()).isEqualTo(document.toJson().getBytes(StandardCharsets.UTF_8)); + + request = RxDocumentServiceRequest.createFromName(operationType, resourceFullName, resourceType); + assertThat(request.getHeaders()).isNotNull(); + assertThat(request.getResourceAddress()).isEqualTo(resourceFullName); + assertThat(request.getResourceId()).isNull(); + assertThat(request.getIsNameBased()).isTrue(); + assertThat(request.getResourceType()).isEqualTo(resourceType); + assertThat(request.getOperationType()).isEqualTo(operationType); + assertThat(request.authorizationTokenType).isEqualTo(AuthorizationTokenType.PrimaryMasterKey); + + request = RxDocumentServiceRequest.createFromName(operationType, resourceFullName, resourceType, AuthorizationTokenType.ResourceToken); + assertThat(request.getHeaders()).isNotNull(); + assertThat(request.getResourceAddress()).isEqualTo(resourceFullName); + assertThat(request.getResourceId()).isNull(); + assertThat(request.getIsNameBased()).isTrue(); + assertThat(request.getResourceType()).isEqualTo(resourceType); + assertThat(request.getOperationType()).isEqualTo(operationType); + assertThat(request.authorizationTokenType).isEqualTo(AuthorizationTokenType.ResourceToken); + + request = RxDocumentServiceRequest.createFromName(operationType, document, resourceFullName, resourceType); + assertThat(request.getHeaders()).isNotNull(); + assertThat(request.getResourceAddress()).isEqualTo(resourceFullName); + assertThat(request.getResourceId()).isNull(); + assertThat(request.getIsNameBased()).isTrue(); + assertThat(request.getResourceType()).isEqualTo(resourceType); + assertThat(request.getOperationType()).isEqualTo(operationType); + assertThat(request.authorizationTokenType).isEqualTo(AuthorizationTokenType.PrimaryMasterKey); + assertThat(request.getContent()).isEqualTo(document.toJson().getBytes(StandardCharsets.UTF_8)); + + request = RxDocumentServiceRequest.createFromName(operationType, document, resourceFullName, resourceType, AuthorizationTokenType.ResourceToken); + assertThat(request.getHeaders()).isNotNull(); + assertThat(request.getResourceAddress()).isEqualTo(resourceFullName); + assertThat(request.getResourceId()).isNull(); + assertThat(request.getIsNameBased()).isTrue(); + assertThat(request.getResourceType()).isEqualTo(resourceType); + assertThat(request.getOperationType()).isEqualTo(operationType); + assertThat(request.authorizationTokenType).isEqualTo(AuthorizationTokenType.ResourceToken); + assertThat(request.getContent()).isEqualTo(document.toJson().getBytes(StandardCharsets.UTF_8)); + } + + @Test(groups = { "unit" }, dataProvider = "documentUrl") + public void isValidAddress(String documentUrlWithId, String documentUrlWithName, OperationType operationType) { + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, + ResourceType.Document, + documentUrlWithId, + new HashedMap()); + + assertThat(request.isValidAddress(ResourceType.Database)).isTrue(); + assertThat(request.isValidAddress(ResourceType.DocumentCollection)).isTrue(); + assertThat(request.isValidAddress(ResourceType.Document)).isTrue(); + assertThat(request.isValidAddress(ResourceType.Unknown)).isTrue(); + assertThat(request.isValidAddress(ResourceType.User)).isFalse(); + assertThat(request.isValidAddress(ResourceType.Trigger)).isFalse(); + assertThat(request.isValidAddress(ResourceType.Offer)).isFalse(); + assertThat(request.isValidAddress(ResourceType.Permission)).isFalse(); + assertThat(request.isValidAddress(ResourceType.Attachment)).isFalse(); + assertThat(request.isValidAddress(ResourceType.StoredProcedure)).isFalse(); + assertThat(request.isValidAddress(ResourceType.Conflict)).isFalse(); + assertThat(request.isValidAddress(ResourceType.PartitionKeyRange)).isFalse(); + + request = RxDocumentServiceRequest.create(operationType, + ResourceType.Document, + documentUrlWithName, + new HashedMap()); + + assertThat(request.isValidAddress(ResourceType.Document)).isTrue(); + assertThat(request.isValidAddress(ResourceType.Unknown)).isTrue(); + String collectionFullName = "/dbs/testDB/colls/testColl/"; + request = RxDocumentServiceRequest.create(operationType, ResourceType.DocumentCollection, collectionFullName, + new HashedMap()); + + assertThat(request.isValidAddress(ResourceType.DocumentCollection)).isTrue(); + assertThat(request.isValidAddress(ResourceType.Unknown)).isTrue(); + + String databaseFullName = "/dbs/testDB"; + request = RxDocumentServiceRequest.create(operationType, + ResourceType.Database, + databaseFullName, + new HashedMap()); + + assertThat(request.isValidAddress(ResourceType.Database)).isTrue(); + assertThat(request.isValidAddress(ResourceType.Unknown)).isTrue(); + + String permissionFullName = "/dbs/testDB/users/testUser/permissions/testPermission"; + request = RxDocumentServiceRequest.create(operationType, + ResourceType.Permission, + permissionFullName, + new HashedMap()); + + assertThat(request.isValidAddress(ResourceType.Permission)).isTrue(); + assertThat(request.isValidAddress(ResourceType.Unknown)).isTrue(); + + String triggerFullName = "/dbs/testDB/colls/testUser/triggers/testTrigger"; + request = RxDocumentServiceRequest.create(operationType, + ResourceType.Trigger, + triggerFullName, + new HashedMap()); + + assertThat(request.isValidAddress(ResourceType.Trigger)).isTrue(); + assertThat(request.isValidAddress(ResourceType.Unknown)).isTrue(); + + String attachmentFullName = "/dbs/testDB/colls/testUser/docs/testDoc/attachments/testAttachment"; + request = RxDocumentServiceRequest.create(operationType, + ResourceType.Attachment, + attachmentFullName, + new HashedMap()); + + assertThat(request.isValidAddress(ResourceType.Attachment)).isTrue(); + assertThat(request.isValidAddress(ResourceType.Unknown)).isTrue(); + } + + @Test(groups = { "unit" }, dataProvider = "documentUrl") + public void addPreferHeader(String documentUrlWithId, String documentUrlWithName, OperationType operationType) { + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, + ResourceType.Document, + documentUrlWithId, + new HashedMap()); + + request.addPreferHeader("preferHeaderName1", "preferHeaderValue1"); + assertThat(request.getHeaders().size()).isEqualTo(1); + assertThat(request.getHeaders().get(HttpConstants.HttpHeaders.PREFER)) + .isEqualTo("preferHeaderName1=preferHeaderValue1"); + + request.addPreferHeader("preferHeaderName2", "preferHeaderValue2"); + assertThat(request.getHeaders().size()).isEqualTo(1); + assertThat(request.getHeaders().get(HttpConstants.HttpHeaders.PREFER)) + .isEqualTo("preferHeaderName1=preferHeaderValue1;" + "preferHeaderName2=preferHeaderValue2"); + } + + private Document getDocumentDefinition() { + String uuid = UUID.randomUUID().toString(); + Document doc = new Document(String.format(DOCUMENT_DEFINITION, uuid, PARTITION_KEY_VALUE)); + return doc; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RxGatewayStoreModelTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RxGatewayStoreModelTest.java new file mode 100644 index 0000000000000..41579731c3f5d --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/RxGatewayStoreModelTest.java @@ -0,0 +1,96 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.internal.http.HttpClient; +import com.azure.data.cosmos.internal.http.HttpRequest; +import io.netty.handler.timeout.ReadTimeoutException; +import io.reactivex.subscribers.TestSubscriber; +import org.mockito.Mockito; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.net.URL; +import java.util.concurrent.TimeUnit; + +import static org.assertj.core.api.Assertions.assertThat; + +; + +public class RxGatewayStoreModelTest { + private final static int TIMEOUT = 10000; + + @Test(groups = "unit") + public void readTimeout() throws Exception { + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; + UserAgentContainer userAgentContainer = new UserAgentContainer(); + GlobalEndpointManager globalEndpointManager = Mockito.mock(GlobalEndpointManager.class); + Mockito.doReturn(new URL("https://localhost")) + .when(globalEndpointManager).resolveServiceEndpoint(Mockito.any()); + HttpClient httpClient = Mockito.mock(HttpClient.class); + Mockito.doReturn(Mono.error(ReadTimeoutException.INSTANCE)) + .when(httpClient).send(Mockito.any(HttpRequest.class)); + + RxGatewayStoreModel storeModel = new RxGatewayStoreModel( + sessionContainer, + ConsistencyLevel.SESSION, + queryCompatibilityMode, + userAgentContainer, + globalEndpointManager, + httpClient); + + RxDocumentServiceRequest dsr = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + dsr.getHeaders().put("key", "value"); + dsr.requestContext = Mockito.mock(DocumentServiceRequestContext.class); + + Flux resp = storeModel.processMessage(dsr); + validateFailure(resp, FailureValidator.builder() + .instanceOf(CosmosClientException.class) + .causeInstanceOf(ReadTimeoutException.class) + .documentClientExceptionHeaderRequestContainsEntry("key", "value") + .statusCode(0).build()); + } + + public void validateFailure(Flux observable, + FailureValidator validator) { + validateFailure(observable, validator, TIMEOUT); + } + + public static void validateFailure(Flux observable, + FailureValidator validator, + long timeout) { + TestSubscriber testSubscriber = new TestSubscriber<>(); + observable.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNotComplete(); + testSubscriber.assertTerminated(); + assertThat(testSubscriber.errorCount()).isEqualTo(1); + validator.validate(testSubscriber.errors().get(0)); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/SessionContainerTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/SessionContainerTest.java new file mode 100644 index 0000000000000..170de82b96921 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/SessionContainerTest.java @@ -0,0 +1,640 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.GatewayTestUtils; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.reflect.FieldUtils; +import org.mockito.Mockito; +import org.mockito.internal.util.collections.Sets; +import org.testng.annotations.Test; + +import java.util.HashMap; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Tests for {@link SessionContainer} + */ +public class SessionContainerTest { + + private final static Random random = new Random(); + + @Test(groups = "unit") + public void sessionContainer() throws Exception { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + + int numCollections = 2; + int numPartitionKeyRangeIds = 5; + + for (int i = 0; i < numCollections; i++) { + String collectionResourceId = ResourceId.newDocumentCollectionId(getRandomDbId(), getRandomCollectionId() + i).getDocumentCollectionId().toString(); + String collectionFullName = "dbs/db1/colls/collName_" + i; + + for (int j = 0; j < numPartitionKeyRangeIds; j++) { + + String partitionKeyRangeId = "range_" + j; + String lsn = "1#" + j + "#4=90#5=2"; + + sessionContainer.setSessionToken( + collectionResourceId, + collectionFullName, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, partitionKeyRangeId + ":" + lsn)); + } + } + + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed, ResourceType.DocumentCollection, + "dbs/db1/colls/collName_1", IOUtils.toInputStream("content1", "UTF-8"), new HashMap<>()); + + ISessionToken sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_1"); + assertThat(sessionToken.getLSN()).isEqualTo(1); + + DocumentServiceRequestContext dsrContext = new DocumentServiceRequestContext(); + PartitionKeyRange resolvedPKRange = new PartitionKeyRange(); + resolvedPKRange.id("range_" + (numPartitionKeyRangeIds + 10)); + GatewayTestUtils.setParent(resolvedPKRange, ImmutableList.of("range_2", "range_x")); + dsrContext.resolvedPartitionKeyRange = resolvedPKRange; + request.requestContext = dsrContext; + + sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, resolvedPKRange.id()); + assertThat(sessionToken.getLSN()).isEqualTo(2); + } + + @Test(groups = "unit") + public void setSessionToken_NoSessionTokenForPartitionKeyRangeId() throws Exception { + String collectionRid = "uf4PAK6T-Cw="; + long collectionRidAsLong = ResourceId.parse(collectionRid).getUniqueDocumentCollectionId(); + String partitionKeyRangeId = "test_range_id"; + String sessionToken = "1#100#1=20#2=5#3=30"; + String collectionName = "dbs/db1/colls/collName_1"; + + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + + RxDocumentServiceRequest request1 = RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Document, + collectionName + "/docs", IOUtils.toInputStream("content1", "UTF-8"), new HashMap<>()); + + Map respHeaders = new HashMap<>(); + RxDocumentServiceResponse resp = Mockito.mock(RxDocumentServiceResponse.class); + Mockito.doReturn(respHeaders).when(resp).getResponseHeaders(); + respHeaders.put(HttpConstants.HttpHeaders.SESSION_TOKEN, partitionKeyRangeId + ":" + sessionToken); + respHeaders.put(HttpConstants.HttpHeaders.OWNER_FULL_NAME, collectionName); + respHeaders.put(HttpConstants.HttpHeaders.OWNER_ID, collectionRid); + sessionContainer.setSessionToken(request1, resp.getResponseHeaders()); + + ConcurrentHashMap collectionNameToCollectionResourceId = (ConcurrentHashMap) FieldUtils.readField(sessionContainer, "collectionNameToCollectionResourceId", true); + ConcurrentHashMap> collectionResourceIdToSessionTokens = (ConcurrentHashMap>) FieldUtils.readField(sessionContainer, "collectionResourceIdToSessionTokens", true); + assertThat(collectionNameToCollectionResourceId).hasSize(1); + assertThat(collectionResourceIdToSessionTokens).hasSize(1); + assertThat(collectionNameToCollectionResourceId.get(collectionName)).isEqualTo(collectionRidAsLong); + assertThat(collectionResourceIdToSessionTokens.get(collectionRidAsLong)).isNotNull(); + assertThat(collectionResourceIdToSessionTokens.get(collectionRidAsLong)).hasSize(1); + assertThat(collectionResourceIdToSessionTokens.get(collectionRidAsLong).get(partitionKeyRangeId).convertToString()).isEqualTo(sessionToken); + + RxDocumentServiceRequest request2 = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Document, + collectionName + "/docs", IOUtils.toInputStream("", "UTF-8"), new HashMap<>()); + + ISessionToken resolvedSessionToken = sessionContainer.resolvePartitionLocalSessionToken(request2, partitionKeyRangeId); + assertThat(resolvedSessionToken.convertToString()).isEqualTo(sessionToken); + } + + @Test(groups = "unit") + public void setSessionToken_MergeOldWithNew() throws Exception { + String collectionRid = "uf4PAK6T-Cw="; + String collectionName = "dbs/db1/colls/collName_1"; + String initialSessionToken = "1#100#1=20#2=5#3=30"; + String newSessionTokenInServerResponse = "1#100#1=31#2=5#3=21"; + String partitionKeyRangeId = "test_range_id"; + String expectedMergedSessionToken = "1#100#1=31#2=5#3=30"; + + Map respHeaders = new HashMap<>(); + + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + + RxDocumentServiceRequest request1 = RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Document, + collectionName + "/docs", IOUtils.toInputStream("content1", "UTF-8"), new HashMap<>()); + + RxDocumentServiceResponse resp = Mockito.mock(RxDocumentServiceResponse.class); + Mockito.doReturn(respHeaders).when(resp).getResponseHeaders(); + respHeaders.put(HttpConstants.HttpHeaders.SESSION_TOKEN, partitionKeyRangeId + ":" + initialSessionToken); + respHeaders.put(HttpConstants.HttpHeaders.OWNER_FULL_NAME, collectionName); + respHeaders.put(HttpConstants.HttpHeaders.OWNER_ID, collectionRid); + sessionContainer.setSessionToken(request1, resp.getResponseHeaders()); + + resp = Mockito.mock(RxDocumentServiceResponse.class); + Mockito.doReturn(respHeaders).when(resp).getResponseHeaders(); + respHeaders.put(HttpConstants.HttpHeaders.SESSION_TOKEN, partitionKeyRangeId + ":" + newSessionTokenInServerResponse); + respHeaders.put(HttpConstants.HttpHeaders.OWNER_FULL_NAME, collectionName); + respHeaders.put(HttpConstants.HttpHeaders.OWNER_ID, collectionRid); + sessionContainer.setSessionToken(request1, resp.getResponseHeaders()); + + RxDocumentServiceRequest request2 = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Document, + collectionName + "/docs", IOUtils.toInputStream("", "UTF-8"), new HashMap<>()); + + ISessionToken resolvedSessionToken = sessionContainer.resolvePartitionLocalSessionToken(request2, partitionKeyRangeId); + assertThat(resolvedSessionToken.convertToString()).isEqualTo(expectedMergedSessionToken); + } + + + @Test(groups = "unit") + public void resolveGlobalSessionTokenReturnsEmptyStringOnEmptyCache() { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Document, + "dbs/db1/colls/collName/docs/doc1", new HashMap<>()); + assertThat(StringUtils.EMPTY).isEqualTo(sessionContainer.resolveGlobalSessionToken(request)); + } + + @Test(groups = "unit") + public void resolveGlobalSessionTokenReturnsEmptyStringOnCacheMiss() { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + String partitionKeyRangeId = "range_0"; + String documentCollectionId = ResourceId.newDocumentCollectionId(getRandomDbId(), getRandomCollectionId()).getDocumentCollectionId().toString(); + String initialSessionToken = "1#100#1=20#2=5#3=30"; + sessionContainer.setSessionToken(documentCollectionId, "dbs/db1/colls1/collName", + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, partitionKeyRangeId + ":" + initialSessionToken)); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Document, + "dbs/db1/colls1/collName2/docs/doc1", new HashMap<>()); + assertThat(StringUtils.EMPTY).isEqualTo(sessionContainer.resolveGlobalSessionToken(request)); + } + + @Test(groups = "unit") + public void resolveGlobalSessionTokenReturnsTokenMapUsingName() { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + String documentCollectionId = ResourceId.newDocumentCollectionId(getRandomDbId(), getRandomCollectionId()).getDocumentCollectionId().toString(); + String collectionFullName = "dbs/db1/colls1/collName"; + + sessionContainer.setSessionToken(documentCollectionId, collectionFullName, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_0:1#100#1=20#2=5#3=30")); + sessionContainer.setSessionToken(documentCollectionId, collectionFullName, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_1:1#101#1=20#2=5#3=30")); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName(OperationType.Read, + collectionFullName + "/docs/doc1", ResourceType.Document); + String sessionToken = sessionContainer.resolveGlobalSessionToken(request); + Set tokens = Sets.newSet(sessionToken.split(",")); + + assertThat(tokens.size()).isEqualTo(2); + assertThat(tokens.contains("range_0:1#100#1=20#2=5#3=30")).isTrue(); + assertThat(tokens.contains("range_1:1#101#1=20#2=5#3=30")).isTrue(); + } + + @Test(groups = "unit") + public void resolveGlobalSessionTokenReturnsTokenMapUsingResourceId() { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + String documentCollectionId = ResourceId.newDocumentCollectionId(getRandomDbId(), getRandomCollectionId()).getDocumentCollectionId().toString(); + String collectionFullName = "dbs/db1/colls1/collName"; + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + documentCollectionId, ResourceType.Document, new HashMap<>()); + + sessionContainer.setSessionToken(documentCollectionId, collectionFullName, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_0:1#100#1=20#2=5#3=30")); + sessionContainer.setSessionToken(documentCollectionId, collectionFullName, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_1:1#101#1=20#2=5#3=30")); + String sessionToken = sessionContainer.resolveGlobalSessionToken(request); + + Set tokens = Sets.newSet(sessionToken.split(",")); + assertThat(tokens.size()).isEqualTo(2); + assertThat(tokens.contains("range_0:1#100#1=20#2=5#3=30")).isTrue(); + assertThat(tokens.contains("range_1:1#101#1=20#2=5#3=30")).isTrue(); + } + + + @Test(groups = "unit") + public void resolveLocalSessionTokenReturnsTokenMapUsingName() { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + String documentCollectionId = ResourceId.newDocumentCollectionId(getRandomDbId(), getRandomCollectionId()).getDocumentCollectionId().toString(); + String collectionFullName = "dbs/db1/colls1/collName"; + + sessionContainer.setSessionToken(documentCollectionId, collectionFullName, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_0:1#100#1=20#2=5#3=30")); + sessionContainer.setSessionToken(documentCollectionId, collectionFullName, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_1:1#101#1=20#2=5#3=30")); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName(OperationType.Read, + collectionFullName + "/docs/doc1", ResourceType.Document); + ISessionToken sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken.getLSN()).isEqualTo(100); + sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_1"); + assertThat(sessionToken.getLSN()).isEqualTo(101); + } + + @Test(groups = "unit") + public void resolveLocalSessionTokenReturnsTokenMapUsingResourceId() { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + String documentCollectionId = ResourceId.newDocumentCollectionId(getRandomDbId(), getRandomCollectionId()).getDocumentCollectionId().toString(); + String collectionFullName = "dbs/db1/colls1/collName"; + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + documentCollectionId, ResourceType.Document, new HashMap<>()); + + sessionContainer.setSessionToken(documentCollectionId, collectionFullName, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_0:1#100#1=20#2=5#3=30")); + sessionContainer.setSessionToken(documentCollectionId, collectionFullName, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_1:1#101#1=20#2=5#3=30")); + + ISessionToken sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken.getLSN()).isEqualTo(100); + sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_1"); + assertThat(sessionToken.getLSN()).isEqualTo(101); + } + + @Test(groups = "unit") + public void resolveLocalSessionTokenReturnsNullOnPartitionMiss() { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + String documentCollectionId = ResourceId.newDocumentCollectionId(getRandomDbId(), getRandomCollectionId()).getDocumentCollectionId().toString(); + String collectionFullName = "dbs/db1/colls1/collName"; + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + documentCollectionId, ResourceType.Document, new HashMap<>()); + + sessionContainer.setSessionToken(documentCollectionId, collectionFullName, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_0:1#100#1=20#2=5#3=30")); + sessionContainer.setSessionToken(documentCollectionId, collectionFullName, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_1:1#101#1=20#2=5#3=30")); + request.requestContext.resolvedPartitionKeyRange = new PartitionKeyRange(); + ISessionToken sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_2"); + assertThat(sessionToken).isNull(); + } + + @Test(groups = "unit") + public void resolveLocalSessionTokenReturnsNullOnCollectionMiss() { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + int randomCollectionId = getRandomCollectionId(); + String documentCollectionId = ResourceId.newDocumentCollectionId(getRandomDbId(), randomCollectionId).getDocumentCollectionId().toString(); + String collectionFullName = "dbs/db1/colls1/collName"; + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + ResourceId.newDocumentCollectionId(getRandomDbId(), randomCollectionId - 1).getDocumentCollectionId().toString(), + ResourceType.Document, new HashMap<>()); + + sessionContainer.setSessionToken(documentCollectionId, collectionFullName, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_0:1#100#1=20#2=5#3=30")); + sessionContainer.setSessionToken(documentCollectionId, collectionFullName, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_1:1#101#1=20#2=5#3=30")); + request.requestContext.resolvedPartitionKeyRange = new PartitionKeyRange(); + ISessionToken sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_1"); + assertThat(sessionToken).isNull(); + } + + @Test(groups = "unit") + public void resolvePartitionLocalSessionTokenReturnsTokenOnParentMatch() { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + String documentCollectionId = ResourceId.newDocumentCollectionId(getRandomDbId(), getRandomCollectionId()).getDocumentCollectionId().toString(); + String collectionFullName = "dbs/db1/colls1/collName"; + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + documentCollectionId, ResourceType.Document, new HashMap<>()); + + sessionContainer.setSessionToken(documentCollectionId, collectionFullName, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_0:1#100#1=20#2=5#3=30")); + sessionContainer.setSessionToken(documentCollectionId, collectionFullName, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_1:1#101#1=20#2=5#3=30")); + request.requestContext.resolvedPartitionKeyRange = new PartitionKeyRange(); + GatewayTestUtils.setParent(request.requestContext.resolvedPartitionKeyRange, ImmutableList.of("range_1")); + ISessionToken sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_2"); + assertThat(sessionToken.getLSN()).isEqualTo(101); + } + + @Test(groups = "unit") + public void clearTokenByCollectionFullNameRemovesToken() { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + String documentCollectionId = ResourceId.newDocumentCollectionId(getRandomDbId(), getRandomCollectionId()).getDocumentCollectionId().toString(); + String collectionFullName = "dbs/db1/colls1/collName"; + + sessionContainer.setSessionToken(documentCollectionId, collectionFullName, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_0:1#100#1=20#2=5#3=30")); + + // Test resourceId based + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + documentCollectionId, ResourceType.Document, new HashMap<>()); + ISessionToken sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken.getLSN()).isEqualTo(100); + + // Test names based + request = RxDocumentServiceRequest.createFromName(OperationType.Read, + collectionFullName + "/docs/doc1", ResourceType.Document); + sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken.getLSN()).isEqualTo(100); + + sessionContainer.clearTokenByCollectionFullName(collectionFullName); + + // Test resourceId based + request = RxDocumentServiceRequest.create(OperationType.Read, + documentCollectionId, ResourceType.Document, new HashMap<>()); + sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken).isNull(); + + // Test names based + request = RxDocumentServiceRequest.createFromName(OperationType.Read, + collectionFullName + "/docs/doc1", ResourceType.Document); + sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken).isNull(); + } + + @Test(groups = "unit") + public void clearTokenByResourceIdRemovesToken() { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + String documentCollectionId = ResourceId.newDocumentCollectionId(getRandomDbId(), getRandomCollectionId()).getDocumentCollectionId().toString(); + String collectionFullName = "dbs/db1/colls1/collName"; + + sessionContainer.setSessionToken(documentCollectionId, collectionFullName, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_0:1#100#1=20#2=5#3=30")); + + // Test resourceId based + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + documentCollectionId, ResourceType.Document, new HashMap<>()); + ISessionToken sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken.getLSN()).isEqualTo(100); + + // Test names based + request = RxDocumentServiceRequest.createFromName(OperationType.Read, + collectionFullName + "/docs/doc1", ResourceType.Document); + sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken.getLSN()).isEqualTo(100); + + sessionContainer.clearTokenByResourceId(documentCollectionId); + + // Test resourceId based + request = RxDocumentServiceRequest.create(OperationType.Read, + documentCollectionId, ResourceType.Document, new HashMap<>()); + sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken).isNull(); + + // Test names based + request = RxDocumentServiceRequest.createFromName(OperationType.Read, + collectionFullName + "/docs/doc1", ResourceType.Document); + sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken).isNull(); + } + + @Test(groups = "unit") + public void clearTokenKeepsUnmatchedCollection() { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + int randomCollectionId = getRandomCollectionId(); + String documentCollectionId1 = ResourceId.newDocumentCollectionId(getRandomDbId(), randomCollectionId).getDocumentCollectionId().toString(); + String collectionFullName1 = "dbs/db1/colls1/collName1"; + + sessionContainer.setSessionToken(documentCollectionId1, collectionFullName1, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_0:1#100#1=20#2=5#3=30")); + + // Test resourceId based + RxDocumentServiceRequest request1 = RxDocumentServiceRequest.create(OperationType.Read, + documentCollectionId1, ResourceType.Document, new HashMap<>()); + String documentCollectionId2 = ResourceId.newDocumentCollectionId(getRandomDbId(), randomCollectionId - 1).getDocumentCollectionId().toString(); + String collectionFullName2 = "dbs/db1/colls1/collName2"; + + // Test resourceId based + RxDocumentServiceRequest request2 = RxDocumentServiceRequest.create(OperationType.Read, + documentCollectionId2, ResourceType.Document, new HashMap<>()); + + sessionContainer.setSessionToken(documentCollectionId2, collectionFullName2, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_0:1#100#1=20#2=5#3=30")); + + ISessionToken sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request1, "range_0"); + assertThat(sessionToken.getLSN()).isEqualTo(100); + sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request2, "range_0"); + assertThat(sessionToken.getLSN()).isEqualTo(100); + + sessionContainer.clearTokenByResourceId(documentCollectionId2); + + sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request1, "range_0"); + assertThat(sessionToken.getLSN()).isEqualTo(100); + sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request2, "range_0"); + assertThat(sessionToken).isNull(); + } + + @Test(groups = "unit") + public void setSessionTokenDoesntFailOnEmptySessionTokenHeader() { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + sessionContainer.setSessionToken(null, new HashMap<>()); + } + + @Test(groups = "unit") + public void setSessionTokenSetsTokenWhenRequestIsntNameBased() { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + String documentCollectionId = ResourceId.newDocumentCollectionId(getRandomDbId(), getRandomCollectionId()).getDocumentCollectionId().toString(); + String collectionFullName = "dbs/db1/colls1/collName"; + + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + collectionFullName + "/docs/doc1", ResourceType.Document, new HashMap<>()); + request.setResourceId(documentCollectionId); + + assertThat(request.getIsNameBased()).isFalse(); + sessionContainer.setSessionToken(request, ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_0:1#100#4=90#5=1")); + request = RxDocumentServiceRequest.create(OperationType.Read, documentCollectionId, ResourceType.Document, new HashMap<>()); + ISessionToken sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken.getLSN()).isEqualTo(100); + + request = RxDocumentServiceRequest.createFromName(OperationType.Read, collectionFullName + "/docs/doc1", ResourceType.Document); + sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken.getLSN()).isEqualTo(100); + } + + @Test(groups = "unit") + public void setSessionTokenGivesPriorityToOwnerFullNameOverResourceAddress() { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + String documentCollectionId = ResourceId.newDocumentCollectionId(getRandomDbId(), getRandomCollectionId()).getDocumentCollectionId().toString(); + String collectionFullName1 = "dbs/db1/colls1/collName1"; + String collectionFullName2 = "dbs/db1/colls1/collName2"; + + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + collectionFullName1 + "/docs/doc1", ResourceType.Document, new HashMap<>()); + request.setResourceId(documentCollectionId); + sessionContainer.setSessionToken(request, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_0:1#100#4=90#5=1", + HttpConstants.HttpHeaders.OWNER_FULL_NAME, collectionFullName2)); + + request = RxDocumentServiceRequest.createFromName(OperationType.Read, collectionFullName1 + "/docs/doc1", ResourceType.Document); + ISessionToken sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken).isNull(); + + request = RxDocumentServiceRequest.createFromName(OperationType.Read, collectionFullName2 + "/docs/doc1", ResourceType.Document); + sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken.getLSN()).isEqualTo(100); + } + + @Test(groups = "unit") + public void setSessionTokenIgnoresOwnerIdWhenRequestIsntNameBased() { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + int randomCollectionId = getRandomCollectionId(); + int randomDbId = getRandomDbId(); + String documentCollectionId1 = ResourceId.newDocumentCollectionId(randomDbId, randomCollectionId).getDocumentCollectionId().toString(); + String documentCollectionId2 = ResourceId.newDocumentCollectionId(randomDbId, randomCollectionId - 1).getDocumentCollectionId().toString(); + String collectionFullName = "dbs/db1/colls1/collName1"; + + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + collectionFullName + "/docs/doc1", ResourceType.Document, new HashMap<>()); + request.setResourceId(documentCollectionId1); + assertThat(request.getIsNameBased()).isFalse(); + + sessionContainer.setSessionToken(request, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_0:1#100#4=90#5=1", + HttpConstants.HttpHeaders.OWNER_ID, documentCollectionId2)); + + request = RxDocumentServiceRequest.create(OperationType.Read, + documentCollectionId1, ResourceType.Document, new HashMap<>()); + ISessionToken sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken.getLSN()).isEqualTo(100); + + + request = RxDocumentServiceRequest.create(OperationType.Read, + documentCollectionId2, ResourceType.Document, new HashMap<>()); + sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken).isNull(); + } + + @Test(groups = "unit") + public void setSessionTokenGivesPriorityToOwnerIdOverResourceIdWhenRequestIsNameBased() { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + int randomCollectionId = getRandomCollectionId(); + int randomDbId = getRandomDbId(); + String documentCollectionId1 = ResourceId.newDocumentCollectionId(randomDbId, randomCollectionId).getDocumentCollectionId().toString(); + String documentCollectionId2 = ResourceId.newDocumentCollectionId(randomDbId, randomCollectionId - 1).getDocumentCollectionId().toString(); + + String collectionFullName = "dbs/db1/colls1/collName1"; + + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName(OperationType.Read, + collectionFullName + "/docs/doc1", ResourceType.Document); + request.setResourceId(documentCollectionId1); + assertThat(request.getIsNameBased()).isTrue(); + + sessionContainer.setSessionToken(request, + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_0:1#100#4=90#5=1", + HttpConstants.HttpHeaders.OWNER_ID, documentCollectionId2)); + + request = RxDocumentServiceRequest.create(OperationType.Read, + documentCollectionId1, ResourceType.Document, new HashMap<>()); + ISessionToken sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken).isNull(); + + + request = RxDocumentServiceRequest.create(OperationType.Read, + documentCollectionId2, ResourceType.Document, new HashMap<>()); + sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken.getLSN()).isEqualTo(100); + } + + @Test(groups = "unit") + public void setSessionTokenDoesntWorkForMasterQueries() { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + String documentCollectionId = ResourceId.newDocumentCollectionId(getRandomDbId(), getRandomCollectionId()).getDocumentCollectionId().toString(); + String collectionFullName = "dbs/db1/colls1/collName"; + + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed, + collectionFullName + "/docs/doc1", ResourceType.DocumentCollection, new HashMap<>()); + request.setResourceId(documentCollectionId); + sessionContainer.setSessionToken(request, ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_0:1")); + + request = RxDocumentServiceRequest.create(OperationType.Read, + documentCollectionId, ResourceType.Document, new HashMap<>()); + ISessionToken sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken).isNull(); + + request = RxDocumentServiceRequest.createFromName(OperationType.Read, collectionFullName + "/docs/doc1", ResourceType.Document); + sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken).isNull(); + } + + @Test(groups = "unit") + public void setSessionTokenDoesntOverwriteHigherLSN() { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + String documentCollectionId = ResourceId.newDocumentCollectionId(getRandomDbId(), getRandomCollectionId()).getDocumentCollectionId().toString(); + String collectionFullName = "dbs/db1/colls1/collName"; + + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + collectionFullName + "/docs/doc1", ResourceType.Document, new HashMap<>()); + request.setResourceId(documentCollectionId); + sessionContainer.setSessionToken(request, ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_0:1#105#4=90#5=1")); + + + request = RxDocumentServiceRequest.create(OperationType.Read, + collectionFullName + "/docs/doc1", ResourceType.Document, new HashMap<>()); + request.setResourceId(documentCollectionId); + sessionContainer.setSessionToken(request, ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_0:1#100#4=90#5=1")); + + request = RxDocumentServiceRequest.create(OperationType.Read, + documentCollectionId, ResourceType.Document, new HashMap<>()); + request.setResourceId(documentCollectionId); + ISessionToken sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken.getLSN()).isEqualTo(105); + } + + @Test(groups = "unit") + public void setSessionTokenOverwriteLowerLSN() { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + String documentCollectionId = ResourceId.newDocumentCollectionId(getRandomDbId(), getRandomCollectionId()).getDocumentCollectionId().toString(); + String collectionFullName = "dbs/db1/colls1/collName"; + + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + collectionFullName + "/docs/doc1", ResourceType.Document, new HashMap<>()); + request.setResourceId(documentCollectionId); + sessionContainer.setSessionToken(request, ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_0:1#100#4=90#5=1")); + + + request = RxDocumentServiceRequest.create(OperationType.Read, + collectionFullName + "/docs/doc1", ResourceType.Document, new HashMap<>()); + request.setResourceId(documentCollectionId); + sessionContainer.setSessionToken(request, ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_0:1#105#4=90#5=1")); + + request = RxDocumentServiceRequest.create(OperationType.Read, + documentCollectionId, ResourceType.Document, new HashMap<>()); + request.setResourceId(documentCollectionId); + ISessionToken sessionToken = sessionContainer.resolvePartitionLocalSessionToken(request, "range_0"); + assertThat(sessionToken.getLSN()).isEqualTo(105); + } + + @Test(groups = "unit") + public void setSessionTokenDoesNothingOnEmptySessionTokenHeader() { + SessionContainer sessionContainer = new SessionContainer("127.0.0.1"); + String documentCollectionId = ResourceId.newDocumentCollectionId(getRandomDbId(), getRandomCollectionId()).getDocumentCollectionId().toString(); + String collectionFullName = "dbs/db1/colls1/collName"; + + sessionContainer.setSessionToken(documentCollectionId, collectionFullName + "/docs/doc1", + ImmutableMap.of(HttpConstants.HttpHeaders.SESSION_TOKEN, "range_0:1#100#4=90#5=1")); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + documentCollectionId, ResourceType.Document, new HashMap<>()); + String sessionToken = sessionContainer.resolveGlobalSessionToken(request); + Set tokens = Sets.newSet(sessionToken.split(",")); + assertThat(tokens.size()).isEqualTo(1); + assertThat(tokens.contains("range_0:1#100#4=90#5=1")).isTrue(); + + sessionContainer.setSessionToken(documentCollectionId, collectionFullName, new HashMap<>()); + request = RxDocumentServiceRequest.create(OperationType.Read, + documentCollectionId, ResourceType.Document, new HashMap<>()); + sessionToken = sessionContainer.resolveGlobalSessionToken(request); + tokens = Sets.newSet(sessionToken.split(",")); + assertThat(tokens.size()).isEqualTo(1); + assertThat(tokens.contains("range_0:1#100#4=90#5=1")).isTrue(); + } + + private static int getRandomCollectionId() { + return random.nextInt(Integer.MAX_VALUE / 2) - (Integer.MAX_VALUE / 2); + } + + private static int getRandomDbId() { + return random.nextInt(Integer.MAX_VALUE / 2); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/SessionTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/SessionTest.java new file mode 100644 index 0000000000000..3ec00b7246b68 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/SessionTest.java @@ -0,0 +1,211 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.internal.http.HttpRequest; +import io.netty.handler.codec.http.HttpMethod; +import org.apache.commons.lang3.StringUtils; +import org.testng.SkipException; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; + +import java.io.UnsupportedEncodingException; +import java.lang.reflect.Method; +import java.net.URLDecoder; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; + +public class SessionTest extends TestSuiteBase { + protected static final int TIMEOUT = 20000; + + private Database createdDatabase; + private DocumentCollection createdCollection; + private String collectionId = "+ -_,:.|~" + UUID.randomUUID().toString() + " +-_,:.|~"; + private SpyClientUnderTestFactory.SpyBaseClass spyClient; + private AsyncDocumentClient houseKeepingClient; + private ConnectionMode connectionMode; + private RequestOptions options; + + @Factory(dataProvider = "clientBuildersWithDirectSession") + public SessionTest(AsyncDocumentClient.Builder clientBuilder) { + super(clientBuilder); + this.subscriberValidationTimeout = TIMEOUT; + } + + @DataProvider(name = "sessionTestArgProvider") + public Object[] sessionTestArgProvider() { + return new Object[] { + // boolean indicating whether requests should be name based or not + true, + false + }; + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + createdDatabase = SHARED_DATABASE; + + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + + DocumentCollection collection = new DocumentCollection(); + collection.id(collectionId); + collection.setPartitionKey(partitionKeyDef); + + createdCollection = createCollection(createGatewayHouseKeepingDocumentClient().build(), createdDatabase.id(), + collection, null); + houseKeepingClient = clientBuilder().build(); + connectionMode = houseKeepingClient.getConnectionPolicy().connectionMode(); + + if (connectionMode == ConnectionMode.DIRECT) { + spyClient = SpyClientUnderTestFactory.createDirectHttpsClientUnderTest(clientBuilder()); + } else { + spyClient = SpyClientUnderTestFactory.createClientUnderTest(clientBuilder()); + } + options = new RequestOptions(); + options.setPartitionKey(PartitionKey.None); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteCollection(houseKeepingClient, createdCollection); + safeClose(houseKeepingClient); + safeClose(spyClient); + } + + @BeforeMethod(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeTest(Method method) { + spyClient.clearCapturedRequests(); + } + + private List getSessionTokensInRequests() { + return spyClient.getCapturedRequests().stream() + .map(r -> r.headers().value(HttpConstants.HttpHeaders.SESSION_TOKEN)).collect(Collectors.toList()); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sessionTestArgProvider") + public void sessionConsistency_ReadYourWrites(boolean isNameBased) { + spyClient.readCollection(getCollectionLink(isNameBased), null).blockFirst(); + spyClient.createDocument(getCollectionLink(isNameBased), new Document(), null, false).blockFirst(); + + spyClient.clearCapturedRequests(); + + for (int i = 0; i < 10; i++) { + Document documentCreated = spyClient.createDocument(getCollectionLink(isNameBased), new Document(), null, false) + .blockFirst().getResource(); + + // We send session tokens on Writes in GATEWAY mode + if (connectionMode == ConnectionMode.GATEWAY) { + assertThat(getSessionTokensInRequests()).hasSize(3 * i + 1); + assertThat(getSessionTokensInRequests().get(3 * i + 0)).isNotEmpty(); + } + + spyClient.readDocument(getDocumentLink(documentCreated, isNameBased), options).blockFirst(); + + assertThat(getSessionTokensInRequests()).hasSize(3 * i + 2); + assertThat(getSessionTokensInRequests().get(3 * i + 1)).isNotEmpty(); + + spyClient.readDocument(getDocumentLink(documentCreated, isNameBased), options).blockFirst(); + + assertThat(getSessionTokensInRequests()).hasSize(3 * i + 3); + assertThat(getSessionTokensInRequests().get(3 * i + 2)).isNotEmpty(); + } + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sessionTestArgProvider") + public void sessionTokenInDocumentRead(boolean isNameBased) throws UnsupportedEncodingException { + Document document = new Document(); + document.id(UUID.randomUUID().toString()); + BridgeInternal.setProperty(document, "pk", "pk"); + document = spyClient.createDocument(getCollectionLink(isNameBased), document, null, false) + .blockFirst() + .getResource(); + + final String documentLink = getDocumentLink(document, isNameBased); + spyClient.readDocument(documentLink, options).blockFirst() + .getResource(); + + List documentReadHttpRequests = spyClient.getCapturedRequests().stream() + .filter(r -> r.httpMethod() == HttpMethod.GET) + .filter(r -> { + try { + return URLDecoder.decode(r.uri().toString().replaceAll("\\+", "%2b"), "UTF-8").contains( + StringUtils.removeEnd(documentLink, "/")); + } catch (UnsupportedEncodingException e) { + return false; + } + }).collect(Collectors.toList()); + + // DIRECT mode may make more than one call (multiple replicas) + assertThat(documentReadHttpRequests.size() >= 1).isTrue(); + assertThat(documentReadHttpRequests.get(0).headers().value(HttpConstants.HttpHeaders.SESSION_TOKEN)).isNotEmpty(); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sessionTestArgProvider") + public void sessionTokenRemovedForMasterResource(boolean isNameBased) throws UnsupportedEncodingException { + if (connectionMode == ConnectionMode.DIRECT) { + throw new SkipException("Master resource access is only through gateway"); + } + String collectionLink = getCollectionLink(isNameBased); + spyClient.readCollection(collectionLink, null).blockFirst(); + + List collectionReadHttpRequests = spyClient.getCapturedRequests().stream() + .filter(r -> r.httpMethod() == HttpMethod.GET) + .filter(r -> { + try { + return URLDecoder.decode(r.uri().toString().replaceAll("\\+", "%2b"), "UTF-8").contains( + StringUtils.removeEnd(collectionLink, "/")); + } catch (UnsupportedEncodingException e) { + return false; + } + }) + .collect(Collectors.toList()); + + assertThat(collectionReadHttpRequests).hasSize(1); + assertThat(collectionReadHttpRequests.get(0).headers().value(HttpConstants.HttpHeaders.SESSION_TOKEN)).isNull(); + } + + private String getCollectionLink(boolean isNameBased) { + return isNameBased ? "dbs/" + createdDatabase.id() + "/colls/" + createdCollection.id(): + createdCollection.selfLink(); + } + + private String getDocumentLink(Document doc, boolean isNameBased) { + return isNameBased ? "dbs/" + createdDatabase.id() + "/colls/" + createdCollection.id() + "/docs/" + doc.id() : + "dbs/" + createdDatabase.resourceId() + "/colls/" + createdCollection.resourceId() + "/docs/" + doc.resourceId() + "/"; + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/SessionTokenTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/SessionTokenTest.java new file mode 100644 index 0000000000000..13299080d0978 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/SessionTokenTest.java @@ -0,0 +1,139 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.InternalServerErrorException; +import org.testng.annotations.Test; + +import static com.azure.data.cosmos.internal.Utils.ValueHolder; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +public class SessionTokenTest { + + @Test(groups = "unit") + public void validateSuccessfulSessionTokenParsing() { + // valid session token + String sessionToken = "1#100#1=20#2=5#3=30"; + ValueHolder parsedSessionToken = new ValueHolder<>(null); + + assertThat(VectorSessionToken.tryCreate(sessionToken, parsedSessionToken)).isTrue(); + } + + @Test(groups = "unit") + public void validateSessionTokenParsingWithInvalidVersion() { + String sessionToken = "foo#100#1=20#2=5#3=30"; + ValueHolder parsedSessionToken = new ValueHolder<>(null); + assertThat(VectorSessionToken.tryCreate(sessionToken, parsedSessionToken)).isFalse(); + } + + @Test(groups = "unit") + public void validateSessionTokenParsingWithInvalidGlobalLsn() { + String sessionToken = "1#foo#1=20#2=5#3=30"; + ValueHolder parsedSessionToken = new ValueHolder<>(null); + assertThat(VectorSessionToken.tryCreate(sessionToken, parsedSessionToken)).isFalse(); + } + + @Test(groups = "unit") + public void validateSessionTokenParsingWithInvalidRegionProgress() { + String sessionToken = "1#100#1=20#2=x#3=30"; + ValueHolder parsedSessionToken = new ValueHolder<>(null); + assertThat(VectorSessionToken.tryCreate(sessionToken, parsedSessionToken)).isFalse(); + + } + + @Test(groups = "unit") + public void validateSessionTokenParsingWithInvalidFormat() { + String sessionToken = "1;100#1=20#2=40"; + ValueHolder parsedSessionToken = new ValueHolder<>(null); + assertThat(VectorSessionToken.tryCreate(sessionToken, parsedSessionToken)).isFalse(); + } + + @Test(groups = "unit") + public void validateSessionTokenParsingFromEmptyString() { + String sessionToken = ""; + ValueHolder parsedSessionToken = new ValueHolder<>(null); + assertThat(VectorSessionToken.tryCreate(sessionToken, parsedSessionToken)).isFalse(); + } + + @Test(groups = "unit") + public void validateSessionTokenComparison() throws Exception { + // valid session token + ValueHolder sessionToken1 = new ValueHolder<>(null); + ValueHolder sessionToken2 = new ValueHolder<>(null); + ValueHolder sessionTokenMerged = new ValueHolder<>(null); + + assertThat(VectorSessionToken.tryCreate("1#100#1=20#2=5#3=30", sessionToken1)).isTrue(); + assertThat(VectorSessionToken.tryCreate("2#105#4=10#2=5#3=30", sessionToken2)).isTrue(); + + assertThat(sessionToken1.v).isNotEqualTo(sessionToken2.v); + assertThat(sessionToken2.v).isNotEqualTo(sessionToken1.v); + + assertThat(sessionToken1.v.isValid(sessionToken2.v)).isTrue(); + assertThat(sessionToken2.v.isValid(sessionToken1.v)).isFalse(); + + + assertThat(VectorSessionToken.tryCreate("2#105#2=5#3=30#4=10", sessionTokenMerged)).isTrue(); + assertThat(sessionTokenMerged.v).isEqualTo(sessionToken1.v.merge(sessionToken2.v)); + + assertThat(VectorSessionToken.tryCreate("1#100#1=20#2=5#3=30", sessionToken1)).isTrue(); + assertThat(VectorSessionToken.tryCreate("1#100#1=10#2=8#3=30", sessionToken2)).isTrue(); + + assertThat(sessionToken1.v.equals(sessionToken2.v)).isFalse(); + assertThat(sessionToken2.v.equals(sessionToken1.v)).isFalse(); + assertThat(sessionToken1.v.isValid(sessionToken2.v)).isFalse(); + assertThat(sessionToken2.v.isValid(sessionToken1.v)).isFalse(); + + assertThat(VectorSessionToken.tryCreate("1#100#1=20#2=8#3=30", sessionTokenMerged)).isTrue(); + assertThat(sessionTokenMerged.v.equals(sessionToken1.v.merge(sessionToken2.v))).isTrue(); + + assertThat(VectorSessionToken.tryCreate("1#100#1=20#2=5#3=30", sessionToken1)).isTrue(); + assertThat(VectorSessionToken.tryCreate("1#102#1=100#2=8#3=30", sessionToken2)).isTrue(); + + assertThat(sessionToken1.v.equals(sessionToken2.v)).isFalse(); + assertThat(sessionToken2.v.equals(sessionToken1.v)).isFalse(); + assertThat(sessionToken1.v.isValid(sessionToken2.v)).isTrue(); + assertThat(sessionToken2.v.isValid(sessionToken1.v)).isFalse(); + + assertThat(VectorSessionToken.tryCreate("1#102#2=8#3=30#1=100", sessionTokenMerged)).isTrue(); + + assertThat(sessionTokenMerged.v.equals(sessionToken1.v.merge(sessionToken2.v))).isTrue(); + + assertThat(VectorSessionToken.tryCreate("1#101#1=20#2=5#3=30", sessionToken1)).isTrue(); + assertThat(VectorSessionToken.tryCreate("1#100#1=20#2=5#3=30#4=40", sessionToken2)).isTrue(); + + + try { + sessionToken1.v.merge(sessionToken2.v); + fail("Region progress can not be different when version is same"); + } catch (InternalServerErrorException e) { + } + + try { + sessionToken2.v.isValid(sessionToken1.v); + fail("Region progress can not be different when version is same"); + } catch (InternalServerErrorException e) { + } + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ShouldRetryValidator.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ShouldRetryValidator.java new file mode 100644 index 0000000000000..9416d990abbac --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/ShouldRetryValidator.java @@ -0,0 +1,140 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.internal.FailureValidator; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * This is a helper class for validating a partition address for tests. + */ +public interface ShouldRetryValidator { + + void validate(IRetryPolicy.ShouldRetryResult shouldRetryResult); + + static Builder builder() { + return new Builder(); + } + + class Builder { + private List validators = new ArrayList<>(); + + public ShouldRetryValidator build() { + return new ShouldRetryValidator() { + + @Override + public void validate(IRetryPolicy.ShouldRetryResult shouldRetryResult) { + for (ShouldRetryValidator validator : validators) { + validator.validate(shouldRetryResult); + } + } + }; + } + + public Builder nullException() { + validators.add(new ShouldRetryValidator() { + + @Override + public void validate(IRetryPolicy.ShouldRetryResult shouldRetryResult) { + assertThat(shouldRetryResult.exception).isNull(); + } + }); + return this; + } + + public Builder hasException() { + validators.add(new ShouldRetryValidator() { + + @Override + public void validate(IRetryPolicy.ShouldRetryResult shouldRetryResult) { + assertThat(shouldRetryResult.exception).isNotNull(); + } + }); + return this; + } + + public Builder exceptionOfType(Class klass) { + validators.add(new ShouldRetryValidator() { + + @Override + public void validate(IRetryPolicy.ShouldRetryResult shouldRetryResult) { + assertThat(shouldRetryResult.exception).isNotNull(); + assertThat(shouldRetryResult.exception).isInstanceOf(klass); + } + }); + return this; + } + + public Builder withException(FailureValidator failureValidator) { + validators.add(new ShouldRetryValidator() { + + @Override + public void validate(IRetryPolicy.ShouldRetryResult shouldRetryResult) { + assertThat(shouldRetryResult.exception).isNotNull(); + failureValidator.validate(shouldRetryResult.exception); + } + }); + return this; + } + + public Builder withException(Exception exception) { + validators.add(new ShouldRetryValidator() { + + @Override + public void validate(IRetryPolicy.ShouldRetryResult shouldRetryResult) { + assertThat(shouldRetryResult.exception).isNotNull(); + assertThat(shouldRetryResult.exception).isEqualTo(exception); + } + }); + return this; + } + + public Builder shouldRetry(boolean value) { + validators.add(new ShouldRetryValidator() { + + @Override + public void validate(IRetryPolicy.ShouldRetryResult shouldRetryResult) { + assertThat(shouldRetryResult.shouldRetry).isEqualTo(value); + } + }); + return this; + } + + + public Builder backOfTime(Duration backOfTime) { + validators.add(new ShouldRetryValidator() { + + @Override + public void validate(IRetryPolicy.ShouldRetryResult shouldRetryResult) { + assertThat(shouldRetryResult.backOffTime).isEqualTo(backOfTime); + } + }); + return this; + } + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/SpyClientBuilder.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/SpyClientBuilder.java new file mode 100644 index 0000000000000..ea0a005922b12 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/SpyClientBuilder.java @@ -0,0 +1,61 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +public class SpyClientBuilder extends AsyncDocumentClient.Builder { + + public SpyClientBuilder(AsyncDocumentClient.Builder builder) { + super(); + super.configs = builder.configs; + super.connectionPolicy = builder.connectionPolicy; + super.desiredConsistencyLevel = builder.desiredConsistencyLevel; + super.masterKeyOrResourceToken = builder.masterKeyOrResourceToken; + super.serviceEndpoint = builder.serviceEndpoint; + } + + public SpyClientUnderTestFactory.ClientUnderTest build() { + return SpyClientUnderTestFactory.createClientUnderTest( + serviceEndpoint, + masterKeyOrResourceToken, + connectionPolicy, + desiredConsistencyLevel, + configs); + } + + public SpyClientUnderTestFactory.ClientWithGatewaySpy buildWithGatewaySpy() { + return SpyClientUnderTestFactory.createClientWithGatewaySpy( + serviceEndpoint, + masterKeyOrResourceToken, + connectionPolicy, + desiredConsistencyLevel, + configs); + } + + public SpyClientUnderTestFactory.DirectHttpsClientUnderTest buildWithDirectHttps() { + return SpyClientUnderTestFactory.createDirectHttpsClientUnderTest( + serviceEndpoint, + masterKeyOrResourceToken, + connectionPolicy, + desiredConsistencyLevel); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/SpyClientUnderTestFactory.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/SpyClientUnderTestFactory.java new file mode 100644 index 0000000000000..5a7c776721ec8 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/SpyClientUnderTestFactory.java @@ -0,0 +1,300 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.internal.directconnectivity.Protocol; +import com.azure.data.cosmos.internal.directconnectivity.ReflectionUtils; +import com.azure.data.cosmos.internal.http.HttpClient; +import com.azure.data.cosmos.internal.http.HttpHeaders; +import com.azure.data.cosmos.internal.http.HttpRequest; +import org.apache.commons.lang3.reflect.FieldUtils; +import org.apache.commons.lang3.tuple.Pair; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.net.URI; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Future; +import java.util.stream.Collectors; + +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.spy; + +public class SpyClientUnderTestFactory { + + public static abstract class SpyBaseClass extends RxDocumentClientImpl { + + public SpyBaseClass(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs) { + super(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs); + } + + public abstract List getCapturedRequests(); + + public abstract void clearCapturedRequests(); + + protected static Configs createConfigsSpy(final Protocol protocol) { + final Configs configs = spy(new Configs()); + doAnswer((Answer) invocation -> protocol).when(configs).getProtocol(); + return configs; + } + } + + public static class ClientWithGatewaySpy extends SpyBaseClass { + + private RxGatewayStoreModel origRxGatewayStoreModel; + private RxGatewayStoreModel spyRxGatewayStoreModel; + + private List requests; + + + ClientWithGatewaySpy(URI serviceEndpoint, String masterKey, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs) { + super(serviceEndpoint, masterKey, connectionPolicy, consistencyLevel, configs); + init(); + } + + @Override + public List getCapturedRequests() { + return requests; + } + + @Override + RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, + ConsistencyLevel consistencyLevel, + QueryCompatibilityMode queryCompatibilityMode, + UserAgentContainer userAgentContainer, + GlobalEndpointManager globalEndpointManager, + HttpClient rxClient) { + this.origRxGatewayStoreModel = super.createRxGatewayProxy( + sessionContainer, + consistencyLevel, + queryCompatibilityMode, + userAgentContainer, + globalEndpointManager, + rxClient); + this.requests = Collections.synchronizedList(new ArrayList<>()); + this.spyRxGatewayStoreModel = spy(this.origRxGatewayStoreModel); + this.initRequestCapture(); + return this.spyRxGatewayStoreModel; + } + + protected void initRequestCapture() { + doAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocationOnMock) { + RxDocumentServiceRequest req = invocationOnMock.getArgumentAt(0, RxDocumentServiceRequest.class); + requests.add(req); + return ClientWithGatewaySpy.this.origRxGatewayStoreModel.processMessage(req); + } + }).when(ClientWithGatewaySpy.this.spyRxGatewayStoreModel).processMessage(Mockito.any(RxDocumentServiceRequest.class)); + } + + @Override + public void clearCapturedRequests() { + requests.clear(); + } + + public RxGatewayStoreModel getSpyGatewayStoreModel() { + return spyRxGatewayStoreModel; + } + + public RxGatewayStoreModel getOrigGatewayStoreModel() { + return origRxGatewayStoreModel; + } + } + + public static class ClientUnderTest extends SpyBaseClass { + + HttpClient origHttpClient; + HttpClient spyHttpClient; + List>> requestsResponsePairs = + Collections.synchronizedList(new ArrayList<>()); + + ClientUnderTest(URI serviceEndpoint, String masterKey, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs) { + super(serviceEndpoint, masterKey, connectionPolicy, consistencyLevel, configs); + init(); + } + + public List>> capturedRequestResponseHeaderPairs() { + return requestsResponsePairs; + } + + @Override + public List getCapturedRequests() { + return requestsResponsePairs.stream().map(Pair::getLeft).collect(Collectors.toList()); + } + + void initRequestCapture(HttpClient spyClient) { + doAnswer(invocationOnMock -> { + HttpRequest httpRequest = invocationOnMock.getArgumentAt(0, HttpRequest.class); + CompletableFuture f = new CompletableFuture<>(); + requestsResponsePairs.add(Pair.of(httpRequest, f)); + + return origHttpClient + .send(httpRequest) + .doOnNext(httpResponse -> f.complete(httpResponse.headers())) + .doOnError(f::completeExceptionally); + }).when(spyClient).send(Mockito.any(HttpRequest.class)); + } + + @Override + public void clearCapturedRequests() { + requestsResponsePairs.clear(); + } + + public ISessionContainer getSessionContainer() { + try { + return (ISessionContainer) FieldUtils.readField(this, "sessionContainer", true); + } catch (Exception e){ + throw new RuntimeException(e); + } + } + + public HttpClient getSpyHttpClient() { + return spyHttpClient; + } + } + + public static class DirectHttpsClientUnderTest extends SpyBaseClass { + + HttpClient origHttpClient; + HttpClient spyHttpClient; + List>> requestsResponsePairs = + Collections.synchronizedList(new ArrayList<>()); + + DirectHttpsClientUnderTest(URI serviceEndpoint, String masterKey, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel) { + super(serviceEndpoint, masterKey, connectionPolicy, consistencyLevel, createConfigsSpy(Protocol.HTTPS)); + assert connectionPolicy.connectionMode() == ConnectionMode.DIRECT; + init(); + + this.origHttpClient = ReflectionUtils.getDirectHttpsHttpClient(this); + this.spyHttpClient = spy(this.origHttpClient); + ReflectionUtils.setDirectHttpsHttpClient(this, this.spyHttpClient); + this.initRequestCapture(this.spyHttpClient); + } + + public List>> capturedRequestResponseHeaderPairs() { + return requestsResponsePairs; + } + + @Override + public List getCapturedRequests() { + return requestsResponsePairs.stream().map(Pair::getLeft).collect(Collectors.toList()); + } + + void initRequestCapture(HttpClient spyClient) { + doAnswer(invocationOnMock -> { + HttpRequest httpRequest = invocationOnMock.getArgumentAt(0, HttpRequest.class); + CompletableFuture f = new CompletableFuture<>(); + requestsResponsePairs.add(Pair.of(httpRequest, f)); + + return origHttpClient + .send(httpRequest) + .doOnNext(httpResponse -> f.complete(httpResponse.headers())) + .doOnError(f::completeExceptionally); + + }).when(spyClient).send(Mockito.any(HttpRequest.class)); + } + + @Override + public void clearCapturedRequests() { + requestsResponsePairs.clear(); + } + + public ISessionContainer getSessionContainer() { + try { + return (ISessionContainer) FieldUtils.readField(this, "sessionContainer", true); + } catch (Exception e){ + throw new RuntimeException(e); + } + } + + public HttpClient getSpyHttpClient() { + return spyHttpClient; + } + } + + public static ClientWithGatewaySpy createClientWithGatewaySpy(AsyncDocumentClient.Builder builder) { + return new SpyClientBuilder(builder).buildWithGatewaySpy(); + } + + public static ClientWithGatewaySpy createClientWithGatewaySpy(URI serviceEndpoint, + String masterKey, + ConnectionPolicy connectionPolicy, + ConsistencyLevel consistencyLevel, + Configs configs) { + return new ClientWithGatewaySpy(serviceEndpoint, masterKey, connectionPolicy, consistencyLevel, configs); + } + + public static ClientUnderTest createClientUnderTest(AsyncDocumentClient.Builder builder) { + return new SpyClientBuilder(builder).build(); + } + + public static DirectHttpsClientUnderTest createDirectHttpsClientUnderTest(AsyncDocumentClient.Builder builder) { + return new SpyClientBuilder(builder).buildWithDirectHttps(); + } + + public static ClientUnderTest createClientUnderTest(URI serviceEndpoint, + String masterKey, + ConnectionPolicy connectionPolicy, + ConsistencyLevel consistencyLevel, + Configs configs) { + return new ClientUnderTest(serviceEndpoint, masterKey, connectionPolicy, consistencyLevel, configs) { + + @Override + RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, + ConsistencyLevel consistencyLevel, + QueryCompatibilityMode queryCompatibilityMode, + UserAgentContainer userAgentContainer, + GlobalEndpointManager globalEndpointManager, + HttpClient rxClient) { + + HttpClient spy = spy(rxClient); + + this.origHttpClient = rxClient; + this.spyHttpClient = spy; + + this.initRequestCapture(spyHttpClient); + + return super.createRxGatewayProxy( + sessionContainer, + consistencyLevel, + queryCompatibilityMode, + userAgentContainer, + globalEndpointManager, + spy); + } + }; + } + + public static DirectHttpsClientUnderTest createDirectHttpsClientUnderTest(URI serviceEndpoint, String masterKey, + ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel) { + return new DirectHttpsClientUnderTest(serviceEndpoint, masterKey, connectionPolicy, consistencyLevel); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/StoreHeaderTests.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/StoreHeaderTests.java new file mode 100644 index 0000000000000..f748a4e325fdb --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/StoreHeaderTests.java @@ -0,0 +1,83 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import org.testng.Assert; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; + +import java.util.UUID; + +public class StoreHeaderTests extends TestSuiteBase { + + private static Database createdDatabase; + private static DocumentCollection createdCollection; + + private AsyncDocumentClient client; + + @Factory(dataProvider = "clientBuildersWithDirect") + public StoreHeaderTests(AsyncDocumentClient.Builder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void validateStoreHeader() { + Document docDefinition1 = getDocumentDefinition(); + Document responseDoc1 = createDocument(client, createdDatabase.id(), createdCollection.id(), docDefinition1); + Assert.assertNotNull(responseDoc1.selfLink()); + Assert.assertNotNull(responseDoc1.get("_attachments")); + + Document docDefinition2 = getDocumentDefinition(); + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setHeader("x-ms-exclude-system-properties", "true"); + Document responseDoc2 = createDocument(client, createdDatabase.id(), createdCollection.id(), docDefinition2, requestOptions); + Assert.assertNull(responseDoc2.selfLink()); + Assert.assertNull(responseDoc2.get("_attachments")); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + + createdDatabase = SHARED_DATABASE; + createdCollection = SHARED_MULTI_PARTITION_COLLECTION; + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + private Document getDocumentDefinition() { + String uuid = UUID.randomUUID().toString(); + Document doc = new Document(String.format("{ " + + "\"id\": \"%s\", " + + "\"mypk\": \"%s\", " + + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + + "}" + , uuid, uuid)); + return doc; + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/StoreResponseBuilder.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/StoreResponseBuilder.java new file mode 100644 index 0000000000000..a42cf98178fcb --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/StoreResponseBuilder.java @@ -0,0 +1,116 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.internal.directconnectivity.StoreResponse; +import com.azure.data.cosmos.internal.directconnectivity.WFConstants; + +import java.math.BigDecimal; +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public class StoreResponseBuilder { + private int status; + private List> headerEntries; + private String content; + + public static StoreResponseBuilder create() { + return new StoreResponseBuilder(); + } + + public StoreResponseBuilder() { + headerEntries = new ArrayList<>(); + } + + public StoreResponseBuilder withHeader(String key, String value) { + headerEntries.add(new AbstractMap.SimpleEntry(key, value)); + return this; + } + + public StoreResponseBuilder withLSN(long lsn) { + headerEntries.add(new AbstractMap.SimpleEntry(WFConstants.BackendHeaders.LSN, Long.toString(lsn))); + return this; + } + + public StoreResponseBuilder withRequestCharge(BigDecimal requestCharge) { + withRequestCharge(requestCharge.doubleValue()); + return this; + } + + public StoreResponseBuilder withRequestCharge(double requestCharge) { + headerEntries.add(new AbstractMap.SimpleEntry(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double.toString(requestCharge))); + return this; + } + + public StoreResponseBuilder withLocalLSN(long localLsn) { + headerEntries.add(new AbstractMap.SimpleEntry(WFConstants.BackendHeaders.LOCAL_LSN, Long.toString(localLsn))); + return this; + } + + public StoreResponseBuilder withPartitionKeyRangeId(String partitionKeyRangeId) { + headerEntries.add(new AbstractMap.SimpleEntry(WFConstants.BackendHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId)); + return this; + } + + public StoreResponseBuilder withItemLocalLSN(long itemLocalLsn) { + headerEntries.add(new AbstractMap.SimpleEntry(WFConstants.BackendHeaders.ITEM_LOCAL_LSN, Long.toString(itemLocalLsn))); + return this; + } + + public StoreResponseBuilder withQuorumAckecdLsn(long quorumAckecdLsn) { + headerEntries.add(new AbstractMap.SimpleEntry(WFConstants.BackendHeaders.QUORUM_ACKED_LSN, Long.toString(quorumAckecdLsn))); + return this; + } + + public StoreResponseBuilder withQuorumAckecdLocalLsn(long quorumAckecdLocalLsn) { + headerEntries.add(new AbstractMap.SimpleEntry(WFConstants.BackendHeaders.QUORUM_ACKED_LOCAL_LSN, Long.toString(quorumAckecdLocalLsn))); + return this; + } + + public StoreResponseBuilder withGlobalCommittedLsn(long globalCommittedLsn) { + headerEntries.add(new AbstractMap.SimpleEntry(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN, Long.toString(globalCommittedLsn))); + return this; + } + + public StoreResponseBuilder withSessionToken(String sessionToken) { + headerEntries.add(new AbstractMap.SimpleEntry(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken)); + return this; + } + + public StoreResponseBuilder withStatus(int status) { + this.status = status; + return this; + } + + public StoreResponseBuilder withContent(String content) { + this.content = content; + return this; + } + + public StoreResponse build() { + return new StoreResponse(status, headerEntries, content); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/TestSuiteBase.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/TestSuiteBase.java new file mode 100644 index 0000000000000..e353afbdbefa8 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/TestSuiteBase.java @@ -0,0 +1,988 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.internal.AsyncDocumentClient.Builder; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CompositePath; +import com.azure.data.cosmos.CompositePathSortOrder; +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.DataType; +import com.azure.data.cosmos.DocumentClientTest; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.IncludedPath; +import com.azure.data.cosmos.Index; +import com.azure.data.cosmos.IndexingPolicy; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.RetryOptions; +import com.azure.data.cosmos.SqlQuerySpec; +import com.azure.data.cosmos.internal.directconnectivity.Protocol; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.CaseFormat; +import com.google.common.collect.ImmutableList; +import io.reactivex.subscribers.TestSubscriber; +import org.apache.commons.lang3.ObjectUtils; +import org.apache.commons.lang3.StringUtils; +import org.mockito.stubbing.Answer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterSuite; +import org.testng.annotations.BeforeSuite; +import org.testng.annotations.DataProvider; +import reactor.core.publisher.Flux; +import reactor.core.scheduler.Schedulers; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.spy; + +public class TestSuiteBase extends DocumentClientTest { + + private static final int DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL = 500; + private static final ObjectMapper objectMapper = new ObjectMapper(); + protected static Logger logger = LoggerFactory.getLogger(TestSuiteBase.class.getSimpleName()); + protected static final int TIMEOUT = 40000; + protected static final int FEED_TIMEOUT = 40000; + protected static final int SETUP_TIMEOUT = 60000; + protected static final int SHUTDOWN_TIMEOUT = 12000; + + protected static final int SUITE_SETUP_TIMEOUT = 120000; + protected static final int SUITE_SHUTDOWN_TIMEOUT = 60000; + + protected static final int WAIT_REPLICA_CATCH_UP_IN_MILLIS = 4000; + + protected final static ConsistencyLevel accountConsistency; + protected static final ImmutableList preferredLocations; + private static final ImmutableList desiredConsistencies; + private static final ImmutableList protocols; + + protected int subscriberValidationTimeout = TIMEOUT; + protected static Database SHARED_DATABASE; + protected static DocumentCollection SHARED_MULTI_PARTITION_COLLECTION; + protected static DocumentCollection SHARED_SINGLE_PARTITION_COLLECTION; + protected static DocumentCollection SHARED_MULTI_PARTITION_COLLECTION_WITH_COMPOSITE_AND_SPATIAL_INDEXES; + + private static ImmutableList immutableListOrNull(List list) { + return list != null ? ImmutableList.copyOf(list) : null; + } + + static { + accountConsistency = parseConsistency(TestConfigurations.CONSISTENCY); + desiredConsistencies = immutableListOrNull( + ObjectUtils.defaultIfNull(parseDesiredConsistencies(TestConfigurations.DESIRED_CONSISTENCIES), + allEqualOrLowerConsistencies(accountConsistency))); + preferredLocations = immutableListOrNull(parsePreferredLocation(TestConfigurations.PREFERRED_LOCATIONS)); + protocols = ObjectUtils.defaultIfNull(immutableListOrNull(parseProtocols(TestConfigurations.PROTOCOLS)), + ImmutableList.of(Protocol.HTTPS, Protocol.TCP)); + // Object mapper configuration + objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true); + objectMapper.configure(JsonParser.Feature.ALLOW_TRAILING_COMMA, true); + objectMapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); + } + + protected TestSuiteBase() { + this(new AsyncDocumentClient.Builder()); + } + + protected TestSuiteBase(AsyncDocumentClient.Builder clientBuilder) { + super(clientBuilder); + logger.debug("Initializing {} ...", this.getClass().getSimpleName()); + } + + private static class DatabaseManagerImpl implements DatabaseForTest.DatabaseManager { + public static DatabaseManagerImpl getInstance(AsyncDocumentClient client) { + return new DatabaseManagerImpl(client); + } + + private final AsyncDocumentClient client; + + private DatabaseManagerImpl(AsyncDocumentClient client) { + this.client = client; + } + + @Override + public Flux> queryDatabases(SqlQuerySpec query) { + return client.queryDatabases(query, null); + } + + @Override + public Flux> createDatabase(Database databaseDefinition) { + return client.createDatabase(databaseDefinition, null); + } + + @Override + public Flux> deleteDatabase(String id) { + return client.deleteDatabase("dbs/" + id, null); + } + } + + @BeforeSuite(groups = {"simple", "long", "direct", "multi-master", "emulator", "non-emulator"}, timeOut = SUITE_SETUP_TIMEOUT) + public static void beforeSuite() { + logger.info("beforeSuite Started"); + AsyncDocumentClient houseKeepingClient = createGatewayHouseKeepingDocumentClient().build(); + try { + DatabaseForTest dbForTest = DatabaseForTest.create(DatabaseManagerImpl.getInstance(houseKeepingClient)); + SHARED_DATABASE = dbForTest.createdDatabase; + RequestOptions options = new RequestOptions(); + options.setOfferThroughput(10100); + SHARED_MULTI_PARTITION_COLLECTION = createCollection(houseKeepingClient, SHARED_DATABASE.id(), getCollectionDefinitionWithRangeRangeIndex(), options); + SHARED_SINGLE_PARTITION_COLLECTION = createCollection(houseKeepingClient, SHARED_DATABASE.id(), getCollectionDefinition(), null); + SHARED_MULTI_PARTITION_COLLECTION_WITH_COMPOSITE_AND_SPATIAL_INDEXES = createCollection(houseKeepingClient, SHARED_DATABASE.id(), getCollectionDefinitionMultiPartitionWithCompositeAndSpatialIndexes(), options); + } finally { + houseKeepingClient.close(); + } + } + + @AfterSuite(groups = {"simple", "long", "direct", "multi-master", "emulator", "non-emulator"}, timeOut = SUITE_SHUTDOWN_TIMEOUT) + public static void afterSuite() { + logger.info("afterSuite Started"); + AsyncDocumentClient houseKeepingClient = createGatewayHouseKeepingDocumentClient().build(); + try { + safeDeleteDatabase(houseKeepingClient, SHARED_DATABASE); + DatabaseForTest.cleanupStaleTestDatabases(DatabaseManagerImpl.getInstance(houseKeepingClient)); + } finally { + safeClose(houseKeepingClient); + } + } + + protected static void truncateCollection(DocumentCollection collection) { + logger.info("Truncating collection {} ...", collection.id()); + AsyncDocumentClient houseKeepingClient = createGatewayHouseKeepingDocumentClient().build(); + try { + List paths = collection.getPartitionKey().paths(); + + FeedOptions options = new FeedOptions(); + options.maxDegreeOfParallelism(-1); + options.enableCrossPartitionQuery(true); + options.maxItemCount(100); + + logger.info("Truncating collection {} documents ...", collection.id()); + + houseKeepingClient.queryDocuments(collection.selfLink(), "SELECT * FROM root", options) + .publishOn(Schedulers.parallel()) + .flatMap(page -> Flux.fromIterable(page.results())) + .flatMap(doc -> { + RequestOptions requestOptions = new RequestOptions(); + + if (paths != null && !paths.isEmpty()) { + List pkPath = PathParser.getPathParts(paths.get(0)); + Object propertyValue = doc.getObjectByPath(pkPath); + if (propertyValue == null) { + propertyValue = Undefined.Value(); + } + + requestOptions.setPartitionKey(new PartitionKey(propertyValue)); + } + + return houseKeepingClient.deleteDocument(doc.selfLink(), requestOptions); + }).then().block(); + + logger.info("Truncating collection {} triggers ...", collection.id()); + + houseKeepingClient.queryTriggers(collection.selfLink(), "SELECT * FROM root", options) + .publishOn(Schedulers.parallel()) + .flatMap(page -> Flux.fromIterable(page.results())) + .flatMap(trigger -> { + RequestOptions requestOptions = new RequestOptions(); + +// if (paths != null && !paths.isEmpty()) { +// Object propertyValue = trigger.getObjectByPath(PathParser.getPathParts(paths.get(0))); +// requestOptions.partitionKey(new PartitionKey(propertyValue)); +// } + + return houseKeepingClient.deleteTrigger(trigger.selfLink(), requestOptions); + }).then().block(); + + logger.info("Truncating collection {} storedProcedures ...", collection.id()); + + houseKeepingClient.queryStoredProcedures(collection.selfLink(), "SELECT * FROM root", options) + .publishOn(Schedulers.parallel()) + .flatMap(page -> Flux.fromIterable(page.results())) + .flatMap(storedProcedure -> { + RequestOptions requestOptions = new RequestOptions(); + +// if (paths != null && !paths.isEmpty()) { +// Object propertyValue = storedProcedure.getObjectByPath(PathParser.getPathParts(paths.get(0))); +// requestOptions.partitionKey(new PartitionKey(propertyValue)); +// } + + return houseKeepingClient.deleteStoredProcedure(storedProcedure.selfLink(), requestOptions); + }).then().block(); + + logger.info("Truncating collection {} udfs ...", collection.id()); + + houseKeepingClient.queryUserDefinedFunctions(collection.selfLink(), "SELECT * FROM root", options) + .publishOn(Schedulers.parallel()) + .flatMap(page -> Flux.fromIterable(page.results())) + .flatMap(udf -> { + RequestOptions requestOptions = new RequestOptions(); + +// if (paths != null && !paths.isEmpty()) { +// Object propertyValue = udf.getObjectByPath(PathParser.getPathParts(paths.get(0))); +// requestOptions.partitionKey(new PartitionKey(propertyValue)); +// } + + return houseKeepingClient.deleteUserDefinedFunction(udf.selfLink(), requestOptions); + }).then().block(); + + } finally { + houseKeepingClient.close(); + } + + logger.info("Finished truncating collection {}.", collection.id()); + } + + protected static void waitIfNeededForReplicasToCatchUp(Builder clientBuilder) { + switch (clientBuilder.getDesiredConsistencyLevel()) { + case EVENTUAL: + case CONSISTENT_PREFIX: + logger.info(" additional wait in EVENTUAL mode so the replica catch up"); + // give times to replicas to catch up after a write + try { + TimeUnit.MILLISECONDS.sleep(WAIT_REPLICA_CATCH_UP_IN_MILLIS); + } catch (Exception e) { + logger.error("unexpected failure", e); + } + + case SESSION: + case BOUNDED_STALENESS: + case STRONG: + default: + break; + } + } + + public static DocumentCollection createCollection(String databaseId, + DocumentCollection collection, + RequestOptions options) { + AsyncDocumentClient client = createGatewayHouseKeepingDocumentClient().build(); + try { + return client.createCollection("dbs/" + databaseId, collection, options).single().block().getResource(); + } finally { + client.close(); + } + } + + public static DocumentCollection createCollection(AsyncDocumentClient client, String databaseId, + DocumentCollection collection, RequestOptions options) { + return client.createCollection("dbs/" + databaseId, collection, options).single().block().getResource(); + } + + public static DocumentCollection createCollection(AsyncDocumentClient client, String databaseId, + DocumentCollection collection) { + return client.createCollection("dbs/" + databaseId, collection, null).single().block().getResource(); + } + + private static DocumentCollection getCollectionDefinitionMultiPartitionWithCompositeAndSpatialIndexes() { + final String NUMBER_FIELD = "numberField"; + final String STRING_FIELD = "stringField"; + final String NUMBER_FIELD_2 = "numberField2"; + final String STRING_FIELD_2 = "stringField2"; + final String BOOL_FIELD = "boolField"; + final String NULL_FIELD = "nullField"; + final String OBJECT_FIELD = "objectField"; + final String ARRAY_FIELD = "arrayField"; + final String SHORT_STRING_FIELD = "shortStringField"; + final String MEDIUM_STRING_FIELD = "mediumStringField"; + final String LONG_STRING_FIELD = "longStringField"; + final String PARTITION_KEY = "pk"; + + DocumentCollection documentCollection = new DocumentCollection(); + + IndexingPolicy indexingPolicy = new IndexingPolicy(); + List> compositeIndexes = new ArrayList<>(); + + //Simple + ArrayList compositeIndexSimple = new ArrayList(); + CompositePath compositePath1 = new CompositePath(); + compositePath1.path("/" + NUMBER_FIELD); + compositePath1.order(CompositePathSortOrder.ASCENDING); + + CompositePath compositePath2 = new CompositePath(); + compositePath2.path("/" + STRING_FIELD); + compositePath2.order(CompositePathSortOrder.DESCENDING); + + compositeIndexSimple.add(compositePath1); + compositeIndexSimple.add(compositePath2); + + //Max Columns + ArrayList compositeIndexMaxColumns = new ArrayList(); + CompositePath compositePath3 = new CompositePath(); + compositePath3.path("/" + NUMBER_FIELD); + compositePath3.order(CompositePathSortOrder.DESCENDING); + + CompositePath compositePath4 = new CompositePath(); + compositePath4.path("/" + STRING_FIELD); + compositePath4.order(CompositePathSortOrder.ASCENDING); + + CompositePath compositePath5 = new CompositePath(); + compositePath5.path("/" + NUMBER_FIELD_2); + compositePath5.order(CompositePathSortOrder.DESCENDING); + + CompositePath compositePath6 = new CompositePath(); + compositePath6.path("/" + STRING_FIELD_2); + compositePath6.order(CompositePathSortOrder.ASCENDING); + + compositeIndexMaxColumns.add(compositePath3); + compositeIndexMaxColumns.add(compositePath4); + compositeIndexMaxColumns.add(compositePath5); + compositeIndexMaxColumns.add(compositePath6); + + //Primitive Values + ArrayList compositeIndexPrimitiveValues = new ArrayList(); + CompositePath compositePath7 = new CompositePath(); + compositePath7.path("/" + NUMBER_FIELD); + compositePath7.order(CompositePathSortOrder.DESCENDING); + + CompositePath compositePath8 = new CompositePath(); + compositePath8.path("/" + STRING_FIELD); + compositePath8.order(CompositePathSortOrder.ASCENDING); + + CompositePath compositePath9 = new CompositePath(); + compositePath9.path("/" + BOOL_FIELD); + compositePath9.order(CompositePathSortOrder.DESCENDING); + + CompositePath compositePath10 = new CompositePath(); + compositePath10.path("/" + NULL_FIELD); + compositePath10.order(CompositePathSortOrder.ASCENDING); + + compositeIndexPrimitiveValues.add(compositePath7); + compositeIndexPrimitiveValues.add(compositePath8); + compositeIndexPrimitiveValues.add(compositePath9); + compositeIndexPrimitiveValues.add(compositePath10); + + //Long Strings + ArrayList compositeIndexLongStrings = new ArrayList(); + CompositePath compositePath11 = new CompositePath(); + compositePath11.path("/" + STRING_FIELD); + + CompositePath compositePath12 = new CompositePath(); + compositePath12.path("/" + SHORT_STRING_FIELD); + + CompositePath compositePath13 = new CompositePath(); + compositePath13.path("/" + MEDIUM_STRING_FIELD); + + CompositePath compositePath14 = new CompositePath(); + compositePath14.path("/" + LONG_STRING_FIELD); + + compositeIndexLongStrings.add(compositePath11); + compositeIndexLongStrings.add(compositePath12); + compositeIndexLongStrings.add(compositePath13); + compositeIndexLongStrings.add(compositePath14); + + compositeIndexes.add(compositeIndexSimple); + compositeIndexes.add(compositeIndexMaxColumns); + compositeIndexes.add(compositeIndexPrimitiveValues); + compositeIndexes.add(compositeIndexLongStrings); + + indexingPolicy.compositeIndexes(compositeIndexes); + documentCollection.setIndexingPolicy(indexingPolicy); + + PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); + ArrayList partitionKeyPaths = new ArrayList(); + partitionKeyPaths.add("/" + PARTITION_KEY); + partitionKeyDefinition.paths(partitionKeyPaths); + documentCollection.setPartitionKey(partitionKeyDefinition); + + documentCollection.id(UUID.randomUUID().toString()); + + return documentCollection; + } + + public static Document createDocument(AsyncDocumentClient client, String databaseId, String collectionId, Document document) { + return createDocument(client, databaseId, collectionId, document, null); + } + + public static Document createDocument(AsyncDocumentClient client, String databaseId, String collectionId, Document document, RequestOptions options) { + return client.createDocument(TestUtils.getCollectionNameLink(databaseId, collectionId), document, options, false).single().block().getResource(); + } + + public Flux> bulkInsert(AsyncDocumentClient client, + String collectionLink, + List documentDefinitionList, + int concurrencyLevel) { + ArrayList>> result = new ArrayList<>(documentDefinitionList.size()); + for (Document docDef : documentDefinitionList) { + result.add(client.createDocument(collectionLink, docDef, null, false)); + } + + return Flux.merge(Flux.fromIterable(result), concurrencyLevel).publishOn(Schedulers.parallel()); + } + + public Flux> bulkInsert(AsyncDocumentClient client, + String collectionLink, + List documentDefinitionList) { + return bulkInsert(client, collectionLink, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL); + } + + public static ConsistencyLevel getAccountDefaultConsistencyLevel(AsyncDocumentClient client) { + return client.getDatabaseAccount().single().block().getConsistencyPolicy().defaultConsistencyLevel(); + } + + public static User createUser(AsyncDocumentClient client, String databaseId, User user) { + return client.createUser("dbs/" + databaseId, user, null).single().block().getResource(); + } + + public static User safeCreateUser(AsyncDocumentClient client, String databaseId, User user) { + deleteUserIfExists(client, databaseId, user.id()); + return createUser(client, databaseId, user); + } + + private static DocumentCollection safeCreateCollection(AsyncDocumentClient client, String databaseId, DocumentCollection collection, RequestOptions options) { + deleteCollectionIfExists(client, databaseId, collection.id()); + return createCollection(client, databaseId, collection, options); + } + + public static String getCollectionLink(DocumentCollection collection) { + return collection.selfLink(); + } + + static protected DocumentCollection getCollectionDefinition() { + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + + DocumentCollection collectionDefinition = new DocumentCollection(); + collectionDefinition.id(UUID.randomUUID().toString()); + collectionDefinition.setPartitionKey(partitionKeyDef); + + return collectionDefinition; + } + + static protected DocumentCollection getCollectionDefinitionWithRangeRangeIndex() { + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList<>(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + IndexingPolicy indexingPolicy = new IndexingPolicy(); + List includedPaths = new ArrayList<>(); + IncludedPath includedPath = new IncludedPath(); + includedPath.path("/*"); + Collection indexes = new ArrayList<>(); + Index stringIndex = Index.Range(DataType.STRING); + BridgeInternal.setProperty(stringIndex, "precision", -1); + indexes.add(stringIndex); + + Index numberIndex = Index.Range(DataType.NUMBER); + BridgeInternal.setProperty(numberIndex, "precision", -1); + indexes.add(numberIndex); + includedPath.indexes(indexes); + includedPaths.add(includedPath); + indexingPolicy.setIncludedPaths(includedPaths); + + DocumentCollection collectionDefinition = new DocumentCollection(); + collectionDefinition.setIndexingPolicy(indexingPolicy); + collectionDefinition.id(UUID.randomUUID().toString()); + collectionDefinition.setPartitionKey(partitionKeyDef); + + return collectionDefinition; + } + + public static void deleteCollectionIfExists(AsyncDocumentClient client, String databaseId, String collectionId) { + List res = client.queryCollections("dbs/" + databaseId, + String.format("SELECT * FROM root r where r.id = '%s'", collectionId), null).single().block() + .results(); + if (!res.isEmpty()) { + deleteCollection(client, TestUtils.getCollectionNameLink(databaseId, collectionId)); + } + } + + public static void deleteCollection(AsyncDocumentClient client, String collectionLink) { + client.deleteCollection(collectionLink, null).single().block(); + } + + public static void deleteDocumentIfExists(AsyncDocumentClient client, String databaseId, String collectionId, String docId) { + FeedOptions options = new FeedOptions(); + options.partitionKey(new PartitionKey(docId)); + List res = client + .queryDocuments(TestUtils.getCollectionNameLink(databaseId, collectionId), String.format("SELECT * FROM root r where r.id = '%s'", docId), options) + .single().block().results(); + if (!res.isEmpty()) { + deleteDocument(client, TestUtils.getDocumentNameLink(databaseId, collectionId, docId)); + } + } + + public static void safeDeleteDocument(AsyncDocumentClient client, String documentLink, RequestOptions options) { + if (client != null && documentLink != null) { + try { + client.deleteDocument(documentLink, options).single().block(); + } catch (Exception e) { + CosmosClientException dce = Utils.as(e, CosmosClientException.class); + if (dce == null || dce.statusCode() != 404) { + throw e; + } + } + } + } + + public static void deleteDocument(AsyncDocumentClient client, String documentLink) { + client.deleteDocument(documentLink, null).single().block(); + } + + public static void deleteUserIfExists(AsyncDocumentClient client, String databaseId, String userId) { + List res = client + .queryUsers("dbs/" + databaseId, String.format("SELECT * FROM root r where r.id = '%s'", userId), null) + .single().block().results(); + if (!res.isEmpty()) { + deleteUser(client, TestUtils.getUserNameLink(databaseId, userId)); + } + } + + public static void deleteUser(AsyncDocumentClient client, String userLink) { + client.deleteUser(userLink, null).single().block(); + } + + public static String getDatabaseLink(Database database) { + return database.selfLink(); + } + + static private Database safeCreateDatabase(AsyncDocumentClient client, Database database) { + safeDeleteDatabase(client, database.id()); + return createDatabase(client, database); + } + + static protected Database createDatabase(AsyncDocumentClient client, Database database) { + Flux> databaseObservable = client.createDatabase(database, null); + return databaseObservable.single().block().getResource(); + } + + static protected Database createDatabase(AsyncDocumentClient client, String databaseId) { + Database databaseDefinition = new Database(); + databaseDefinition.id(databaseId); + return createDatabase(client, databaseDefinition); + } + + static protected Database createDatabaseIfNotExists(AsyncDocumentClient client, String databaseId) { + return client.queryDatabases(String.format("SELECT * FROM r where r.id = '%s'", databaseId), null).flatMap(p -> Flux.fromIterable(p.results())).switchIfEmpty( + Flux.defer(() -> { + + Database databaseDefinition = new Database(); + databaseDefinition.id(databaseId); + + return client.createDatabase(databaseDefinition, null).map(ResourceResponse::getResource); + }) + ).single().block(); + } + + static protected void safeDeleteDatabase(AsyncDocumentClient client, Database database) { + if (database != null) { + safeDeleteDatabase(client, database.id()); + } + } + + static protected void safeDeleteDatabase(AsyncDocumentClient client, String databaseId) { + if (client != null) { + try { + client.deleteDatabase(TestUtils.getDatabaseNameLink(databaseId), null).single().block(); + } catch (Exception e) { + } + } + } + + static protected void safeDeleteAllCollections(AsyncDocumentClient client, Database database) { + if (database != null) { + List collections = client.readCollections(database.selfLink(), null) + .flatMap(p -> Flux.fromIterable(p.results())) + .collectList() + .single() + .block(); + + for (DocumentCollection collection : collections) { + client.deleteCollection(collection.selfLink(), null).single().block().getResource(); + } + } + } + + static protected void safeDeleteCollection(AsyncDocumentClient client, DocumentCollection collection) { + if (client != null && collection != null) { + try { + client.deleteCollection(collection.selfLink(), null).single().block(); + } catch (Exception e) { + } + } + } + + static protected void safeDeleteCollection(AsyncDocumentClient client, String databaseId, String collectionId) { + if (client != null && databaseId != null && collectionId != null) { + try { + client.deleteCollection("/dbs/" + databaseId + "/colls/" + collectionId, null).single().block(); + } catch (Exception e) { + } + } + } + + static protected void safeCloseAsync(AsyncDocumentClient client) { + if (client != null) { + new Thread(() -> { + try { + client.close(); + } catch (Exception e) { + e.printStackTrace(); + } + }).start(); + } + } + + static protected void safeClose(AsyncDocumentClient client) { + if (client != null) { + try { + client.close(); + } catch (Exception e) { + e.printStackTrace(); + } + } + } + + public void validateSuccess(Flux> observable, + ResourceResponseValidator validator) { + validateSuccess(observable, validator, subscriberValidationTimeout); + } + + public static void validateSuccess(Flux> observable, + ResourceResponseValidator validator, long timeout) { + + TestSubscriber> testSubscriber = new TestSubscriber<>(); + + observable.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + testSubscriber.assertValueCount(1); + validator.validate(testSubscriber.values().get(0)); + } + + public void validateFailure(Flux> observable, + FailureValidator validator) { + validateFailure(observable, validator, subscriberValidationTimeout); + } + + public static void validateFailure(Flux> observable, + FailureValidator validator, long timeout) { + + TestSubscriber> testSubscriber = new TestSubscriber<>(); + + observable.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNotComplete(); + testSubscriber.assertTerminated(); + assertThat(testSubscriber.errorCount()).isEqualTo(1); + validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0)); + } + + public void validateQuerySuccess(Flux> observable, + FeedResponseListValidator validator) { + validateQuerySuccess(observable, validator, subscriberValidationTimeout); + } + + public static void validateQuerySuccess(Flux> observable, + FeedResponseListValidator validator, long timeout) { + + TestSubscriber> testSubscriber = new TestSubscriber<>(); + + observable.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + validator.validate(testSubscriber.values()); + } + + public void validateQueryFailure(Flux> observable, + FailureValidator validator) { + validateQueryFailure(observable, validator, subscriberValidationTimeout); + } + + public static void validateQueryFailure(Flux> observable, + FailureValidator validator, long timeout) { + + TestSubscriber> testSubscriber = new TestSubscriber<>(); + + observable.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNotComplete(); + testSubscriber.assertTerminated(); + assertThat(testSubscriber.errorCount()).isEqualTo(1); + validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0)); + } + + @DataProvider + public static Object[][] clientBuilders() { + return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null)}}; + } + + @DataProvider + public static Object[][] clientBuildersWithSessionConsistency() { + return new Object[][]{ + {createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null)}, + {createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.HTTPS, false, null)}, + {createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null)} + }; + } + + private static ConsistencyLevel parseConsistency(String consistency) { + if (consistency != null) { + consistency = CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency).trim(); + return ConsistencyLevel.valueOf(consistency); + } + + logger.error("INVALID configured test consistency [{}].", consistency); + throw new IllegalStateException("INVALID configured test consistency " + consistency); + } + + static List parsePreferredLocation(String preferredLocations) { + if (StringUtils.isEmpty(preferredLocations)) { + return null; + } + + try { + return objectMapper.readValue(preferredLocations, new TypeReference>() { + }); + } catch (Exception e) { + logger.error("INVALID configured test preferredLocations [{}].", preferredLocations); + throw new IllegalStateException("INVALID configured test preferredLocations " + preferredLocations); + } + } + + static List parseProtocols(String protocols) { + if (StringUtils.isEmpty(protocols)) { + return null; + } + List protocolList = new ArrayList<>(); + try { + List protocolStrings = objectMapper.readValue(protocols, new TypeReference>() { + }); + for(String protocol : protocolStrings) { + protocolList.add(Protocol.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, protocol))); + } + return protocolList; + } catch (Exception e) { + logger.error("INVALID configured test protocols [{}].", protocols); + throw new IllegalStateException("INVALID configured test protocols " + protocols); + } + } + + @DataProvider + public static Object[][] simpleClientBuildersWithDirect() { + return simpleClientBuildersWithDirect(toArray(protocols)); + } + + @DataProvider + public static Object[][] simpleClientBuildersWithDirectHttps() { + return simpleClientBuildersWithDirect(Protocol.HTTPS); + } + + private static Object[][] simpleClientBuildersWithDirect(Protocol... protocols) { + logger.info("Max test consistency to use is [{}]", accountConsistency); + List testConsistencies = ImmutableList.of(ConsistencyLevel.EVENTUAL); + + boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION; + + List builders = new ArrayList<>(); + builders.add(createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null)); + + for (Protocol protocol : protocols) { + testConsistencies.forEach(consistencyLevel -> builders.add(createDirectRxDocumentClient(consistencyLevel, + protocol, + isMultiMasterEnabled, + preferredLocations))); + } + + builders.forEach(b -> logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]", + b.getConnectionPolicy().connectionMode(), + b.getDesiredConsistencyLevel(), + b.getConfigs().getProtocol() + )); + + return builders.stream().map(b -> new Object[]{b}).collect(Collectors.toList()).toArray(new Object[0][]); + } + + @DataProvider + public static Object[][] clientBuildersWithDirect() { + return clientBuildersWithDirectAllConsistencies(toArray(protocols)); + } + + @DataProvider + public static Object[][] clientBuildersWithDirectHttps() { + return clientBuildersWithDirectAllConsistencies(Protocol.HTTPS); + } + + @DataProvider + public static Object[][] clientBuildersWithDirectSession() { + return clientBuildersWithDirectSession(toArray(protocols)); + } + + static Protocol[] toArray(List protocols) { + return protocols.toArray(new Protocol[0]); + } + + private static Object[][] clientBuildersWithDirectSession(Protocol... protocols) { + return clientBuildersWithDirect(new ArrayList() {{ + add(ConsistencyLevel.SESSION); + }}, protocols); + } + + private static Object[][] clientBuildersWithDirectAllConsistencies(Protocol... protocols) { + logger.info("Max test consistency to use is [{}]", accountConsistency); + return clientBuildersWithDirect(desiredConsistencies, protocols); + } + + static List parseDesiredConsistencies(String consistencies) { + if (StringUtils.isEmpty(consistencies)) { + return null; + } + List consistencyLevels = new ArrayList<>(); + try { + List consistencyStrings = objectMapper.readValue(consistencies, new TypeReference>() {}); + for(String consistency : consistencyStrings) { + consistencyLevels.add(ConsistencyLevel.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency))); + } + return consistencyLevels; + } catch (Exception e) { + logger.error("INVALID consistency test desiredConsistencies [{}].", consistencies); + throw new IllegalStateException("INVALID configured test desiredConsistencies " + consistencies); + } + } + + static List allEqualOrLowerConsistencies(ConsistencyLevel accountConsistency) { + List testConsistencies = new ArrayList<>(); + switch (accountConsistency) { + case STRONG: + testConsistencies.add(ConsistencyLevel.STRONG); + case BOUNDED_STALENESS: + testConsistencies.add(ConsistencyLevel.BOUNDED_STALENESS); + case SESSION: + testConsistencies.add(ConsistencyLevel.SESSION); + case CONSISTENT_PREFIX: + testConsistencies.add(ConsistencyLevel.CONSISTENT_PREFIX); + case EVENTUAL: + testConsistencies.add(ConsistencyLevel.EVENTUAL); + break; + default: + throw new IllegalStateException("INVALID configured test consistency " + accountConsistency); + } + return testConsistencies; + } + + private static Object[][] clientBuildersWithDirect(List testConsistencies, Protocol... protocols) { + boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION; + + List builders = new ArrayList<>(); + builders.add(createGatewayRxDocumentClient(ConsistencyLevel.SESSION, isMultiMasterEnabled, preferredLocations)); + + for (Protocol protocol : protocols) { + testConsistencies.forEach(consistencyLevel -> builders.add(createDirectRxDocumentClient(consistencyLevel, + protocol, + isMultiMasterEnabled, + preferredLocations))); + } + + builders.forEach(b -> logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]", + b.getConnectionPolicy().connectionMode(), + b.getDesiredConsistencyLevel(), + b.getConfigs().getProtocol() + )); + + return builders.stream().map(b -> new Object[]{b}).collect(Collectors.toList()).toArray(new Object[0][]); + } + + static protected Builder createGatewayHouseKeepingDocumentClient() { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.GATEWAY); + RetryOptions options = new RetryOptions(); + options.maxRetryWaitTimeInSeconds(SUITE_SETUP_TIMEOUT); + connectionPolicy.retryOptions(options); + return new Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION); + } + + static protected Builder createGatewayRxDocumentClient(ConsistencyLevel consistencyLevel, boolean multiMasterEnabled, List preferredLocations) { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.GATEWAY); + connectionPolicy.usingMultipleWriteLocations(multiMasterEnabled); + connectionPolicy.preferredLocations(preferredLocations); + return new Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(consistencyLevel); + } + + static protected Builder createGatewayRxDocumentClient() { + return createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null); + } + + static protected Builder createDirectRxDocumentClient(ConsistencyLevel consistencyLevel, + Protocol protocol, + boolean multiMasterEnabled, + List preferredLocations) { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + + if (preferredLocations != null) { + connectionPolicy.preferredLocations(preferredLocations); + } + + if (multiMasterEnabled && consistencyLevel == ConsistencyLevel.SESSION) { + connectionPolicy.usingMultipleWriteLocations(true); + } + + Configs configs = spy(new Configs()); + doAnswer((Answer)invocation -> protocol).when(configs).getProtocol(); + + return new Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(consistencyLevel) + .withConfigs(configs); + } + + protected int expectedNumberOfPages(int totalExpectedResult, int maxPageSize) { + return Math.max((totalExpectedResult + maxPageSize - 1 ) / maxPageSize, 1); + } + + @DataProvider(name = "queryMetricsArgProvider") + public Object[][] queryMetricsArgProvider() { + return new Object[][]{ + {true}, + {false}, + }; + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/TestUtils.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/TestUtils.java new file mode 100644 index 0000000000000..1c8ef3590d025 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/TestUtils.java @@ -0,0 +1,60 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal; + +public class TestUtils { + private static final String DATABASES_PATH_SEGMENT = "dbs"; + private static final String COLLECTIONS_PATH_SEGMENT = "colls"; + private static final String DOCUMENTS_PATH_SEGMENT = "docs"; + private static final String USERS_PATH_SEGMENT = "users"; + + public static String getDatabaseLink(Database database, boolean isNameBased) { + if (isNameBased) { + return getDatabaseNameLink(database.id()); + } else { + return database.selfLink(); + } + } + + public static String getDatabaseNameLink(String databaseId) { + return DATABASES_PATH_SEGMENT + "/" + databaseId; + } + + public static String getCollectionNameLink(String databaseId, String collectionId) { + + return DATABASES_PATH_SEGMENT + "/" + databaseId + "/" + COLLECTIONS_PATH_SEGMENT + "/" + collectionId; + } + + public static String getDocumentNameLink(String databaseId, String collectionId, String docId) { + + return DATABASES_PATH_SEGMENT + "/" + databaseId + "/" + COLLECTIONS_PATH_SEGMENT + "/" +collectionId + "/" + DOCUMENTS_PATH_SEGMENT + "/" + docId; + } + + public static String getUserNameLink(String databaseId, String userId) { + + return DATABASES_PATH_SEGMENT + "/" + databaseId + "/" + USERS_PATH_SEGMENT + "/" + userId; + } + + private TestUtils() { + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/TimeTokenTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/TimeTokenTest.java new file mode 100644 index 0000000000000..8cc1477868426 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/TimeTokenTest.java @@ -0,0 +1,61 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal; + +import org.testng.annotations.AfterTest; +import org.testng.annotations.BeforeTest; +import org.testng.annotations.Test; + +import java.time.format.DateTimeFormatter; +import java.util.Locale; + +public class TimeTokenTest { + + private Locale defaultLocale; + + @BeforeTest(groups = { "unit" }) + public void beforeMethod() { + defaultLocale = Locale.getDefault(); + } + + @Test(groups = { "unit" }) + public void nonLocaleUS() { + Locale.setDefault(Locale.ITALIAN); + DateTimeFormatter RFC_1123_DATE_TIME = + DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US); + String time = Utils.nowAsRFC1123(); + Locale.setDefault(Locale.US); + RFC_1123_DATE_TIME.parse(time); + } + + @AfterTest(groups = { "unit" }) + public void afterMethod() { + // set back default locale before test + if (defaultLocale != null) { + Locale.setDefault(defaultLocale); + } else { + Locale.setDefault(Locale.US); + } + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/caches/AsyncCacheTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/caches/AsyncCacheTest.java new file mode 100644 index 0000000000000..1df2b6547073c --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/caches/AsyncCacheTest.java @@ -0,0 +1,89 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.caches; + +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; + +public class AsyncCacheTest { + + private static final int TIMEOUT = 2000; + + @Test(groups = { "unit" }, timeOut = TIMEOUT) + public void getAsync() { + AtomicInteger numberOfCacheRefreshes = new AtomicInteger(0); + final Function> refreshFunc = key -> { + numberOfCacheRefreshes.incrementAndGet(); + return Mono.just(key*2); + }; + + AsyncCache cache = new AsyncCache<>(); + + List> tasks = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + for (int j = 0; j < 10; j++) { + int key = j; + tasks.add(cache.getAsync(key, -1, () -> refreshFunc.apply(key))); + } + } + + Flux o = Flux.merge(tasks.stream().map(Mono::flux).collect(Collectors.toList())); + o.collectList().single().block(); + + assertThat(numberOfCacheRefreshes.get()).isEqualTo(10); + assertThat(cache.getAsync(2, -1, () -> refreshFunc.apply(2)).block()).isEqualTo(4); + + Function> refreshFunc1 = key -> { + numberOfCacheRefreshes.incrementAndGet(); + return Mono.just(key * 2 + 1); + }; + + List> tasks1 = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + for (int j = 0; j < 10; j++) { + int key = j; + tasks1.add(cache.getAsync(key, key * 2, () -> refreshFunc1.apply(key))); + } + + for (int j = 0; j < 10; j++) { + int key = j; + tasks1.add(cache.getAsync(key, key * 2 , () -> refreshFunc1.apply(key))); + } + } + + Flux o1 = Flux.merge(tasks1.stream().map(Mono::flux).collect(Collectors.toList())); + o1.collectList().single().block(); + + assertThat(numberOfCacheRefreshes.get()).isEqualTo(20); + assertThat(cache.getAsync(2, -1, () -> refreshFunc.apply(2)).block()).isEqualTo(5); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/AddressResolverTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/AddressResolverTest.java new file mode 100644 index 0000000000000..64676e488db0b --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/AddressResolverTest.java @@ -0,0 +1,993 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + + +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.PartitionKeyRangeGoneException; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.ICollectionRoutingMapCache; +import com.azure.data.cosmos.InvalidPartitionException; +import com.azure.data.cosmos.NotFoundException; +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.caches.RxCollectionCache; +import com.azure.data.cosmos.internal.routing.CollectionRoutingMap; +import com.azure.data.cosmos.internal.routing.IServerIdentity; +import com.azure.data.cosmos.internal.routing.InMemoryCollectionRoutingMap; +import com.azure.data.cosmos.internal.routing.PartitionKeyInternalHelper; +import com.azure.data.cosmos.internal.routing.PartitionKeyRangeIdentity; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import org.apache.commons.lang3.NotImplementedException; +import org.apache.commons.lang3.ObjectUtils; +import org.apache.commons.lang3.mutable.MutableObject; +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.mockito.Mockito; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.net.URI; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; +import static org.assertj.core.api.AssertionsForClassTypes.fail; + +/** + * Tests that partition manager correctly resolves addresses for requests and does appropriate number of cache refreshes. + */ +public class AddressResolverTest { + + private static final Logger logger = LoggerFactory.getLogger(AddressResolverTest.class); + private static final String DOCUMENT_TEST_URL = "dbs/IXYFAA==/colls/IXYFAOHEBPM=/docs/IXYFAOHEBPMBAAAAAAAAAA==/"; + private AddressResolver addressResolver; + private RxCollectionCache collectionCache; + private ICollectionRoutingMapCache collectionRoutingMapCache; + private IAddressCache fabricAddressCache; + + private int collectionCacheRefreshedCount; + private Map routingMapRefreshCount; + private Map addressesRefreshCount; + + @BeforeClass(groups = "unit") + public void setup() throws Exception { + this.addressResolver = new AddressResolver(); + this.collectionCache = Mockito.mock(RxCollectionCache.class); + this.collectionRoutingMapCache = Mockito.mock(ICollectionRoutingMapCache.class); + this.fabricAddressCache = Mockito.mock(IAddressCache.class); + this.addressResolver.initializeCaches(this.collectionCache, this.collectionRoutingMapCache, this.fabricAddressCache); + + this.collection1 = new DocumentCollection(); + this.collection1.id("coll"); + this.collection1.resourceId("rid1"); + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + partitionKeyDef.paths(ImmutableList.of("/field1")); + this.collection1.setPartitionKey(partitionKeyDef); + + this.collection2 = new DocumentCollection(); + this.collection2.id("coll"); + this.collection2.resourceId("rid2"); + new PartitionKeyDefinition(); + partitionKeyDef.paths(ImmutableList.of("/field1")); + this.collection2.setPartitionKey(partitionKeyDef); + + Function>, Void> addPartitionKeyRangeFunc = listArg -> { + listArg.forEach(tuple -> ((ServiceIdentity) tuple.right).partitionKeyRangeIds.add(new PartitionKeyRangeIdentity(collection1.resourceId(), tuple.left.id()))); + return null; + }; + + List> rangesBeforeSplit1 = + new ArrayList<>(); + ServiceIdentity serverServiceIdentity = new ServiceIdentity("federation1", new URI("fabric://serverservice1"), false); + + rangesBeforeSplit1.add( + ImmutablePair.of(new PartitionKeyRange("0", PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, + PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey), serverServiceIdentity)); + + addPartitionKeyRangeFunc.apply(rangesBeforeSplit1); + + + this.routingMapCollection1BeforeSplit = + InMemoryCollectionRoutingMap.tryCreateCompleteRoutingMap( + rangesBeforeSplit1, + collection1.resourceId()); + + List> rangesAfterSplit1 = + new ArrayList<>(); + ServiceIdentity serverServiceIdentity2 = new ServiceIdentity("federation1", new URI("fabric://serverservice2"), false); + ServiceIdentity serverServiceIdentity3 = new ServiceIdentity("federation1", new URI("fabric://serverservice3"), false); + + rangesAfterSplit1.add( + ImmutablePair.of( + new PartitionKeyRange("1", PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, "5E", ImmutableList.of("0")), + serverServiceIdentity2)); + + rangesAfterSplit1.add( + ImmutablePair.of( + new PartitionKeyRange("2", "5E", PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, ImmutableList.of("0")), + serverServiceIdentity3)); + + addPartitionKeyRangeFunc.apply(rangesAfterSplit1); + + this.routingMapCollection1AfterSplit = InMemoryCollectionRoutingMap.tryCreateCompleteRoutingMap(rangesAfterSplit1, collection1.resourceId()); + + List> rangesBeforeSplit2 = + new ArrayList<>(); + ServiceIdentity serverServiceIdentity4 = new ServiceIdentity("federation1", new URI("fabric://serverservice4"), false); + + rangesBeforeSplit2.add( + ImmutablePair.of( + new PartitionKeyRange("0", PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey), + serverServiceIdentity4)); + + addPartitionKeyRangeFunc.apply(rangesBeforeSplit2); + + + this.routingMapCollection2BeforeSplit = InMemoryCollectionRoutingMap.tryCreateCompleteRoutingMap(rangesBeforeSplit2, collection2.resourceId()); + + List> rangesAfterSplit2 = + new ArrayList<>(); + + ServiceIdentity serverServiceIdentity5 = new ServiceIdentity("federation1", new URI("fabric://serverservice5"), false); + ServiceIdentity serverServiceIdentity6 = new ServiceIdentity("federation1", new URI("fabric://serverservice6"), false); + rangesAfterSplit2.add( + ImmutablePair.of( + new PartitionKeyRange("1", PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, "5E", ImmutableList.of("0")), + serverServiceIdentity5)); + + rangesAfterSplit2.add( + ImmutablePair.of( + new PartitionKeyRange("2", "5E", PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, ImmutableList.of("0")), + serverServiceIdentity6)); + + + addPartitionKeyRangeFunc.apply(rangesAfterSplit2); + + + this.routingMapCollection2AfterSplit = InMemoryCollectionRoutingMap.tryCreateCompleteRoutingMap(rangesAfterSplit2, collection2.resourceId()); + } + + private void TestCacheRefreshWhileRouteByPartitionKey( + DocumentCollection collectionBeforeRefresh, + DocumentCollection collectionAfterRefresh, + Map routingMapBeforeRefresh, + Map routingMapAfterRefresh, + Map addressesBeforeRefresh, + Map addressesAfterRefresh, + AddressInformation[] targetAddresses, + ServiceIdentity targetServiceIdentity, + PartitionKeyRange targetPartitionKeyRange, + boolean forceNameCacheRefresh, + boolean forceRoutingMapRefresh, + boolean forceAddressRefresh, + int collectionCacheRefreshed, + int routingMapCacheRefreshed, + int addressCacheRefreshed, + boolean nameBased) throws Exception { + + if (targetServiceIdentity != null && targetPartitionKeyRange != null) { + targetServiceIdentity.partitionKeyRangeIds.add(new PartitionKeyRangeIdentity(collectionAfterRefresh != null ? collectionAfterRefresh.resourceId() : collectionBeforeRefresh.resourceId(), targetPartitionKeyRange.id())); + } + + this.initializeMocks( + collectionBeforeRefresh, + collectionAfterRefresh, + routingMapBeforeRefresh, + routingMapAfterRefresh, + addressesBeforeRefresh, + addressesAfterRefresh); + + RxDocumentServiceRequest request; + if (nameBased) { + request = RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.Document, + "dbs/db/colls/coll/docs/doc1", + new HashMap<>()); + } else { + request = RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.Document, + DOCUMENT_TEST_URL, + new HashMap<>()); + } + + request.forceNameCacheRefresh = forceNameCacheRefresh; + request.forcePartitionKeyRangeRefresh = forceRoutingMapRefresh; + request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, new PartitionKey("foo").toString()); + AddressInformation[] resolvedAddresses; + try { + resolvedAddresses = this.addressResolver.resolveAsync(request, forceAddressRefresh).block(); + } catch (RuntimeException e) { + throw (Exception) e.getCause(); + } finally { + assertThat(collectionCacheRefreshed).isEqualTo(collectionCacheRefreshedCount).describedAs("collection cache refresh count mismath"); + + assertThat(routingMapCacheRefreshed).isEqualTo(routingMapRefreshCount.values().stream().mapToInt(v -> v).sum()).describedAs("routing map cache refresh count mismath"); + assertThat(addressCacheRefreshed).isEqualTo(addressesRefreshCount.values().stream().mapToInt(v -> v).sum()).describedAs("address cache refresh count mismatch"); + assertThat(routingMapRefreshCount.entrySet().stream().filter(pair -> pair.getValue() > 1).count()).isEqualTo(0); + assertThat(addressesRefreshCount.entrySet().stream().filter(pair -> pair.getValue() > 1).count()).isEqualTo(0); + } + + assertThat(targetAddresses[0].getPhysicalUri()).isEqualTo(resolvedAddresses[0].getPhysicalUri()); + // Assert.AreEqual(targetServiceIdentity, request.requestContext.TargetIdentity); + assertThat(targetPartitionKeyRange.id()).isEqualTo(request.requestContext.resolvedPartitionKeyRange.id()); + } + + private void TestCacheRefreshWhileRouteByPartitionKeyRangeId( + DocumentCollection collectionBeforeRefresh, + DocumentCollection collectionAfterRefresh, + Map routingMapBeforeRefresh, + Map routingMapAfterRefresh, + Map addressesBeforeRefresh, + Map addressesAfterRefresh, + PartitionKeyRangeIdentity rangeIdentity, + AddressInformation[] targetAddresses, + ServiceIdentity targetServiceIdentity, + PartitionKeyRange targetPartitionKeyRange, + boolean forceNameCacheRefresh, + boolean forceRoutingMapRefresh, + boolean forceAddressRefresh, + int collectionCacheRefreshed, + int routingMapCacheRefreshed, + int addressCacheRefreshed, + boolean nameBased) throws Exception { + + if (targetServiceIdentity != null && targetPartitionKeyRange != null) { + targetServiceIdentity.partitionKeyRangeIds.add(new PartitionKeyRangeIdentity(collectionAfterRefresh != null ? collectionAfterRefresh.resourceId() : collectionBeforeRefresh.resourceId(), targetPartitionKeyRange.id())); + } + + this.initializeMocks( + collectionBeforeRefresh, + collectionAfterRefresh, + routingMapBeforeRefresh, + routingMapAfterRefresh, + addressesBeforeRefresh, + addressesAfterRefresh); + + RxDocumentServiceRequest request; + if (nameBased) { + request = RxDocumentServiceRequest.createFromName( + OperationType.Read, + "dbs/db/colls/coll/docs/doc1", + ResourceType.Document); + } else { + request = RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.Document, + DOCUMENT_TEST_URL, + new HashMap<>()); + } + + request.forceNameCacheRefresh = forceNameCacheRefresh; + request.forcePartitionKeyRangeRefresh = forceRoutingMapRefresh; + request.routeTo(rangeIdentity); + AddressInformation[] resolvedAddresses; + try { + resolvedAddresses = this.addressResolver.resolveAsync(request, forceAddressRefresh).block(); + } catch (RuntimeException e) { + throw (Exception) e.getCause(); + } finally { + assertThat(collectionCacheRefreshed).isEqualTo(collectionCacheRefreshedCount).describedAs("collection cache refresh count mismath"); + + assertThat(routingMapCacheRefreshed).isEqualTo(routingMapRefreshCount.values().stream().mapToInt(v -> v).sum()).describedAs("routing map cache refresh count mismath"); + assertThat(addressCacheRefreshed).isEqualTo(addressesRefreshCount.values().stream().mapToInt(v -> v).sum()).describedAs("address cache refresh count mismatch"); + + + assertThat(routingMapRefreshCount.entrySet().stream().filter(pair -> pair.getValue() > 1).count()).isEqualTo(0); + assertThat(addressesRefreshCount.entrySet().stream().filter(pair -> pair.getValue() > 1).count()).isEqualTo(0); + } + + assertThat(targetAddresses[0].getPhysicalUri()).isEqualTo(resolvedAddresses[0].getPhysicalUri()); + // Assert.AreEqual(targetServiceIdentity, request.requestContext.TargetIdentity); + assertThat(targetPartitionKeyRange.id()).isEqualTo(request.requestContext.resolvedPartitionKeyRange.id()); + } + + private void initializeMocks( + DocumentCollection collectionBeforeRefresh, + DocumentCollection collectionAfterRefresh, + Map routingMapBeforeRefresh, + Map routingMapAfterRefreshInitial, + Map addressesBeforeRefresh, + Map addressesAfterRefreshInitial) { + final Map routingMapAfterRefresh = ObjectUtils.defaultIfNull(routingMapAfterRefreshInitial, routingMapBeforeRefresh); + final Map addressesAfterRefresh = ObjectUtils.defaultIfNull(addressesAfterRefreshInitial, addressesBeforeRefresh); + + // Collection cache + MutableObject currentCollection = new MutableObject(collectionBeforeRefresh); + this.collectionCacheRefreshedCount = 0; + + Mockito.doAnswer(invocationOnMock -> { + RxDocumentServiceRequest request = invocationOnMock.getArgumentAt(0, RxDocumentServiceRequest.class); + if (request.forceNameCacheRefresh && collectionAfterRefresh != null) { + currentCollection.setValue(collectionAfterRefresh); + AddressResolverTest.this.collectionCacheRefreshedCount++; + request.forceNameCacheRefresh = false; + return Mono.just(currentCollection.getValue()); + } + + if (request.forceNameCacheRefresh && collectionAfterRefresh == null) { + currentCollection.setValue(null); + AddressResolverTest.this.collectionCacheRefreshedCount++; + request.forceNameCacheRefresh = false; + return Mono.error(new NotFoundException()); + } + + if (!request.forceNameCacheRefresh && currentCollection.getValue() == null) { + return Mono.error(new NotFoundException()); + + } + + if (!request.forceNameCacheRefresh && currentCollection.getValue() != null) { + return Mono.just(currentCollection.getValue()); + } + + return Mono.empty(); + }).when(this.collectionCache).resolveCollectionAsync(Mockito.any(RxDocumentServiceRequest.class)); + + // Routing map cache + Map currentRoutingMap = + new HashMap<>(routingMapBeforeRefresh); + this.routingMapRefreshCount = new HashMap<>(); + + Mockito.doAnswer(invocationOnMock -> { + String collectionRid = invocationOnMock.getArgumentAt(0, String.class); + CollectionRoutingMap previousValue = invocationOnMock.getArgumentAt(1, CollectionRoutingMap.class); + + return collectionRoutingMapCache.tryLookupAsync(collectionRid, previousValue, false, null); + }).when(this.collectionRoutingMapCache).tryLookupAsync(Mockito.anyString(), Mockito.any(CollectionRoutingMap.class), Mockito.anyMap()); + + // Refresh case + Mockito.doAnswer(invocationOnMock -> { + String collectionRid = invocationOnMock.getArgumentAt(0, String.class); + CollectionRoutingMap previousValue = invocationOnMock.getArgumentAt(1, CollectionRoutingMap.class); + + if (previousValue == null) { + return Mono.justOrEmpty(currentRoutingMap.get(collectionRid)); + } + + if (previousValue != null && currentRoutingMap.containsKey(previousValue.getCollectionUniqueId()) && + currentRoutingMap.get(previousValue.getCollectionUniqueId()) == previousValue) { + + + if (previousValue != null && previousValue.getCollectionUniqueId() != collectionRid) { + throw new RuntimeException("InvalidOperation"); + } + + if (routingMapAfterRefresh.containsKey(collectionRid)) { + currentRoutingMap.put(collectionRid, routingMapAfterRefresh.get(collectionRid)); + } else { + currentRoutingMap.remove(collectionRid); + } + + if (!routingMapRefreshCount.containsKey(collectionRid)) { + routingMapRefreshCount.put(collectionRid, 1); + } else { + routingMapRefreshCount.put(collectionRid, routingMapRefreshCount.get(collectionRid) + 1); + } + + + return Mono.justOrEmpty(currentRoutingMap.get(collectionRid)); + } + + return Mono.error(new NotImplementedException("not mocked")); + }).when(this.collectionRoutingMapCache).tryLookupAsync(Mockito.anyString(), Mockito.any(CollectionRoutingMap.class), Mockito.anyBoolean(), Mockito.anyMap()); + + + // Fabric Address Cache + Map currentAddresses = + new HashMap<>(addressesBeforeRefresh); + this.addressesRefreshCount = new HashMap<>(); + + // No refresh case + // + Mockito.doAnswer(invocationOnMock -> { + RxDocumentServiceRequest request = invocationOnMock.getArgumentAt(0, RxDocumentServiceRequest.class); + PartitionKeyRangeIdentity pkri = invocationOnMock.getArgumentAt(1, PartitionKeyRangeIdentity.class); + Boolean forceRefresh = invocationOnMock.getArgumentAt(2, Boolean.class); + + if (!forceRefresh) { + return Mono.justOrEmpty(currentAddresses.get(findMatchingServiceIdentity(currentAddresses, pkri))); + } else { + + ServiceIdentity si; + + if ((si = findMatchingServiceIdentity(addressesAfterRefresh, pkri)) != null) { + currentAddresses.put(si, addressesAfterRefresh.get(si)); + } else { + + si = findMatchingServiceIdentity(currentAddresses, pkri); + currentAddresses.remove(si); + } + + if (si == null) { + si = ServiceIdentity.dummyInstance; + } + + if (!addressesRefreshCount.containsKey(si)) { + addressesRefreshCount.put(si, 1); + } else { + addressesRefreshCount.put(si, addressesRefreshCount.get(si) + 1); + } + + // TODO: what to return in this case if it is null!! + return Mono.justOrEmpty(currentAddresses.get(si)); + } + }).when(fabricAddressCache).tryGetAddresses(Mockito.any(RxDocumentServiceRequest.class), Mockito.any(PartitionKeyRangeIdentity.class), Mockito.anyBoolean()); + } + + private static ServiceIdentity findMatchingServiceIdentity(Map map, PartitionKeyRangeIdentity pkri) { + for (ServiceIdentity si : map.keySet()) { + if (si.partitionKeyRangeIds.contains(pkri)) { + return si; + } + + } + return null; + } + + private final AddressInformation[] addresses1 = {new AddressInformation(true, true, "tcp://host/partition1", Protocol.HTTPS)}; + private final AddressInformation[] addresses2 = {new AddressInformation(true, true, "tcp://host/partition2", Protocol.HTTPS)}; + private final AddressInformation[] addresses3 = {new AddressInformation(true, true, "tcp://host/partition3", Protocol.HTTPS)}; + + private DocumentCollection collection1; + private DocumentCollection collection2; + private CollectionRoutingMap routingMapCollection1BeforeSplit; + private CollectionRoutingMap routingMapCollection1AfterSplit; + private CollectionRoutingMap routingMapCollection2BeforeSplit; + private CollectionRoutingMap routingMapCollection2AfterSplit; + + @Test(groups = "unit") + public void testCacheRefreshesWhileRoutingByPartitionKey() throws Exception { + logger.info("ALL caches are up to date. Name Based"); + this.TestCacheRefreshWhileRouteByPartitionKey( + this.collection1, + this.collection1, + ImmutableMap.of(this.collection1.resourceId(), this.routingMapCollection1BeforeSplit), + null, + ImmutableMap.of(getServiceIdentityAt(this.routingMapCollection1BeforeSplit, 0), this.addresses1), + null, + this.addresses1, + getServiceIdentityAt(this.routingMapCollection1BeforeSplit, 0), + getRangeAt(this.routingMapCollection1BeforeSplit, 0), + false, + false, + false, + 0, + 0, + 0, + true); + + logger.info("ALL caches are up to date. Rid Based"); + this.TestCacheRefreshWhileRouteByPartitionKey( + this.collection1, + this.collection1, + ImmutableMap.of(this.collection1.resourceId(), this.routingMapCollection1BeforeSplit), + null, + ImmutableMap.of(getServiceIdentityAt(this.routingMapCollection1BeforeSplit, 0), this.addresses1), + null, + this.addresses1, + getServiceIdentityAt(this.routingMapCollection1BeforeSplit, 0), + getRangeAt(this.routingMapCollection1BeforeSplit, 0), + false, + false, + false, + 0, + 0, + 0, + true); + + logger.info("Address cache is stale. Force Refresh. Name Based"); + this.TestCacheRefreshWhileRouteByPartitionKey( + this.collection1, + this.collection1, + ImmutableMap.of(this.collection1.resourceId(), this.routingMapCollection1BeforeSplit), + null, + ImmutableMap.of(getServiceIdentityAt(this.routingMapCollection1BeforeSplit, 0), this.addresses1), + ImmutableMap.of(getServiceIdentityAt(this.routingMapCollection1BeforeSplit, 0), this.addresses2), + this.addresses2, + getServiceIdentityAt(this.routingMapCollection1BeforeSplit, 0), + this.routingMapCollection1BeforeSplit.getOrderedPartitionKeyRanges().get(0), + false, + false, + true, + 0, + 0, + 1, + true); + + logger.info("Address cache is stale. Force Refresh. Rid Based"); + this.TestCacheRefreshWhileRouteByPartitionKey( + this.collection1, + this.collection1, + ImmutableMap.of(this.collection1.resourceId(), this.routingMapCollection1BeforeSplit), + null, + ImmutableMap.of(getServiceIdentityAt(this.routingMapCollection1BeforeSplit, 0), this.addresses1), + ImmutableMap.of(getServiceIdentityAt(this.routingMapCollection1BeforeSplit, 0), this.addresses2), + this.addresses2, + getServiceIdentityAt(this.routingMapCollection1BeforeSplit, 0), + getRangeAt(this.routingMapCollection1BeforeSplit, 0), + false, + false, + true, + 0, + 0, + 1, + false); + + logger.info("Routing map cache is stale. Force Refresh. Name based"); + this.TestCacheRefreshWhileRouteByPartitionKey( + this.collection1, + this.collection1, + ImmutableMap.of(this.collection1.resourceId(), this.routingMapCollection1BeforeSplit), + ImmutableMap.of(this.collection1.resourceId(), this.routingMapCollection1AfterSplit), + ImmutableMap.of( + getServiceIdentityAt(this.routingMapCollection1BeforeSplit, 0), this.addresses1, + getServiceIdentityAt(this.routingMapCollection1AfterSplit, 0), this.addresses2, + getServiceIdentityAt(this.routingMapCollection1AfterSplit, 1), this.addresses3), + null, + this.addresses2, + getServiceIdentityAt(this.routingMapCollection1AfterSplit, 0), + getRangeAt(this.routingMapCollection1AfterSplit, 0), + false, + true, + false, + 0, + 1, + 0, + true); + + logger.info("Routing map cache is stale. Force Refresh. Rid based"); + this.TestCacheRefreshWhileRouteByPartitionKey( + this.collection1, + this.collection1, + ImmutableMap.of(this.collection1.resourceId(), this.routingMapCollection1BeforeSplit), + ImmutableMap.of(this.collection1.resourceId(), this.routingMapCollection1AfterSplit), + ImmutableMap.of( + getServiceIdentityAt(this.routingMapCollection1BeforeSplit, 0), this.addresses1, + getServiceIdentityAt(this.routingMapCollection1AfterSplit, 0), this.addresses2, + getServiceIdentityAt(this.routingMapCollection1AfterSplit, 1), this.addresses3), + null, + this.addresses2, + getServiceIdentityAt(this.routingMapCollection1AfterSplit, 0), + getRangeAt(this.routingMapCollection1AfterSplit, 0), + false, + true, + false, + 0, + 1, + 0, + false); + + logger.info("Name cache is stale. Force Refresh. Name based"); + this.TestCacheRefreshWhileRouteByPartitionKey( + this.collection1, + this.collection2, + ImmutableMap.of(this.collection2.resourceId(), this.routingMapCollection2BeforeSplit), + null, + ImmutableMap.of( + getServiceIdentityAt(this.routingMapCollection2BeforeSplit, 0), this.addresses1), + null, + this.addresses1, + getServiceIdentityAt(this.routingMapCollection2BeforeSplit, 0), + getRangeAt(this.routingMapCollection2BeforeSplit, 0), + true, + false, + false, + 1, + 0, + 0, + true); + + logger.info("Name cache is stale (collection deleted new one created same name). Routing Map Cache returns null"); + this.TestCacheRefreshWhileRouteByPartitionKey( + this.collection1, + this.collection2, + ImmutableMap.of(this.collection2.resourceId(), this.routingMapCollection2BeforeSplit), + null, + ImmutableMap.of( + getServiceIdentityAt(this.routingMapCollection2BeforeSplit, 0), this.addresses1), + null, + this.addresses1, + getServiceIdentityAt(this.routingMapCollection2BeforeSplit, 0), + getRangeAt(this.routingMapCollection2BeforeSplit, 0), + false, + false, + false, + 1, + 0, + 0, + true); + + logger.info("Routing map cache is stale (split happened). Address Cache returns null"); + this.TestCacheRefreshWhileRouteByPartitionKey( + this.collection1, + this.collection1, + ImmutableMap.of(this.collection1.resourceId(), this.routingMapCollection1BeforeSplit), + ImmutableMap.of(this.collection1.resourceId(), this.routingMapCollection1AfterSplit), + ImmutableMap.of(getServiceIdentityAt(this.routingMapCollection1AfterSplit, 0), this.addresses1), + null, + this.addresses1, + getServiceIdentityAt(this.routingMapCollection1AfterSplit, 0), + getRangeAt(this.routingMapCollection1AfterSplit, 0), + false, + false, + false, + 1, + 1, + 0, + true); + + + logger.info("Collection cache is stale (deleted created same name). Routing map cache is stale for new collection (split happened). Address Cache returns null"); + this.TestCacheRefreshWhileRouteByPartitionKey( + this.collection1, + this.collection2, + ImmutableMap.of(this.collection1.resourceId(), this.routingMapCollection1BeforeSplit, + this.collection2.resourceId(), this.routingMapCollection2BeforeSplit), + ImmutableMap.of(this.collection2.resourceId(), this.routingMapCollection2AfterSplit), + ImmutableMap.of(getServiceIdentityAt(this.routingMapCollection2AfterSplit, 0), this.addresses1), + null, + this.addresses1, + getServiceIdentityAt(this.routingMapCollection2AfterSplit, 0), + getRangeAt(this.routingMapCollection2AfterSplit, 0), + false, + false, + false, + 1, + 1, + 0, + true); + + logger.info("Collection cache is stale (collection deleted). Routing map cache is stale (collection deleted). Address Cache returns null"); + try { + this.TestCacheRefreshWhileRouteByPartitionKey( + this.collection1, + null, + ImmutableMap.of(this.collection1.resourceId(), this.routingMapCollection1BeforeSplit), + ImmutableMap.of(), + ImmutableMap.of(), + null, + null, + null, + null, + false, + false, + false, + 1, + 0, + 0, + true); + + fail("Expected NotFoundException"); + } catch (NotFoundException e) { + } + + logger.info("Collection cache is stale (collection deleted). Routing map cache returns null."); + try { + this.TestCacheRefreshWhileRouteByPartitionKey( + this.collection1, + null, + ImmutableMap.of(), + null, + ImmutableMap.of(), + null, + null, + null, + null, + false, + false, + false, + 1, + 0, + 0, + true); + + fail("Expected NotFoundException"); + } catch (NotFoundException e) { + } + } + + private static PartitionKeyRange getRangeAt(CollectionRoutingMap routingMap, int index) { + return routingMap.getOrderedPartitionKeyRanges().get(index); + } + + private static ServiceIdentity getServiceIdentityAt(CollectionRoutingMap routingMap, int index) { + return (ServiceIdentity) routingMap.tryGetInfoByPartitionKeyRangeId(routingMap.getOrderedPartitionKeyRanges().get(index).id()); + } + + @Test(groups = "unit") + public void testCacheRefreshesWhileRoutingByPartitionKeyRangeId() throws Exception { + logger.info("ALL caches are up to date. Name Based"); + this.TestCacheRefreshWhileRouteByPartitionKeyRangeId( + this.collection1, + this.collection1, + ImmutableMap.of(this.collection1.resourceId(), this.routingMapCollection1BeforeSplit), + null, + ImmutableMap.of(getServiceIdentityAt(this.routingMapCollection1BeforeSplit, 0), this.addresses1), + null, + new PartitionKeyRangeIdentity(this.collection1.resourceId(), "0"), + this.addresses1, + getServiceIdentityAt(this.routingMapCollection1BeforeSplit, 0), + getRangeAt(this.routingMapCollection1BeforeSplit, 0), + false, + false, + false, + 0, + 0, + 0, + true); + + logger.info("ALL caches are up to date. Name Based. Non existent range with collection rid"); + try { + this.TestCacheRefreshWhileRouteByPartitionKeyRangeId( + this.collection1, + this.collection1, + ImmutableMap.of(this.collection1.resourceId(), this.routingMapCollection1BeforeSplit), + null, + ImmutableMap.of(getServiceIdentityAt(this.routingMapCollection1BeforeSplit, 0), this.addresses1), + null, + new PartitionKeyRangeIdentity(this.collection1.resourceId(), "1"), + this.addresses1, + getServiceIdentityAt(this.routingMapCollection1BeforeSplit, 0), + getRangeAt(this.routingMapCollection1BeforeSplit, 0), + false, + false, + false, + 0, + 1, + 0, + true); + + fail("Should have gotten PartitionKeyRangeGoneException"); + } catch (PartitionKeyRangeGoneException e) { + } + + logger.info("ALL caches are up to date. Name Based. Non existent range withOUT collection rid"); + try { + this.TestCacheRefreshWhileRouteByPartitionKeyRangeId( + this.collection1, + this.collection1, + ImmutableMap.of(this.collection1.resourceId(), this.routingMapCollection1BeforeSplit), + null, + ImmutableMap.of(getServiceIdentityAt(this.routingMapCollection1BeforeSplit, 0), this.addresses1), + null, + new PartitionKeyRangeIdentity("1"), + this.addresses1, + getServiceIdentityAt(this.routingMapCollection1BeforeSplit, 0), + getRangeAt(this.routingMapCollection1BeforeSplit, 0), + false, + false, + false, + 1, + 1, + 0, + true); + + fail("Should have gotten PartitionKeyRangeGoneException"); + } catch (PartitionKeyRangeGoneException e) { + } + + logger.info("ALL caches are up to date. Name Based.RANGE doesn't exist in routing map because split happened."); + try { + this.TestCacheRefreshWhileRouteByPartitionKeyRangeId( + this.collection1, + this.collection1, + ImmutableMap.of(this.collection1.resourceId(), this.routingMapCollection1AfterSplit), + null, + ImmutableMap.of(getServiceIdentityAt(this.routingMapCollection1AfterSplit, 0), this.addresses1), + null, + new PartitionKeyRangeIdentity(collection1.resourceId(), "0"), + this.addresses1, + getServiceIdentityAt(this.routingMapCollection1BeforeSplit, 0), + getRangeAt(this.routingMapCollection1BeforeSplit, 0), + false, + false, + false, + 0, + 0, + 0, + true); + + fail("Should have gotten PartitionKeyRangeGoneException"); + } catch (PartitionKeyRangeGoneException e) { + } + + try { + logger.info("Name Based.Routing map cache is outdated because split happened. Address cache returns null."); + this.TestCacheRefreshWhileRouteByPartitionKeyRangeId( + this.collection1, + this.collection1, + ImmutableMap.of(this.collection1.resourceId(), this.routingMapCollection1BeforeSplit), + ImmutableMap.of(this.collection1.resourceId(), this.routingMapCollection1AfterSplit), + ImmutableMap.of(getServiceIdentityAt(this.routingMapCollection1AfterSplit, 0), this.addresses1), + null, + new PartitionKeyRangeIdentity(collection1.resourceId(), "0"), + this.addresses1, + getServiceIdentityAt(this.routingMapCollection1AfterSplit, 0), + getRangeAt(this.routingMapCollection1AfterSplit, 0), + false, + false, + false, + 0, + 1, + 0, + true); + + fail("Should have gotten PartitionKeyRangeGoneException"); + } catch (PartitionKeyRangeGoneException e) { + } + + logger.info("Name Based.Routing map cache is outdated because split happened."); + this.TestCacheRefreshWhileRouteByPartitionKeyRangeId( + this.collection1, + this.collection1, + ImmutableMap.of(this.collection1.resourceId(), this.routingMapCollection1BeforeSplit), + ImmutableMap.of(this.collection1.resourceId(), this.routingMapCollection1AfterSplit), + ImmutableMap.of(getServiceIdentityAt(this.routingMapCollection1AfterSplit, 0), this.addresses1), + null, + new PartitionKeyRangeIdentity(collection1.resourceId(), "1"), + this.addresses1, + getServiceIdentityAt(this.routingMapCollection1AfterSplit, 0), + getRangeAt(this.routingMapCollection1AfterSplit, 0), + false, + false, + false, + 0, + 1, + 0, + true); + + try { + logger.info("Collection cache is outdated. Routing map cache returns null. Collection is deleted. RANGE with collection rid."); + this.TestCacheRefreshWhileRouteByPartitionKeyRangeId( + this.collection1, + null, + ImmutableMap.of(), + ImmutableMap.of(), + ImmutableMap.of(getServiceIdentityAt(this.routingMapCollection1AfterSplit, 0), this.addresses1), + null, + new PartitionKeyRangeIdentity(collection1.resourceId(), "0"), + this.addresses1, + getServiceIdentityAt(this.routingMapCollection1AfterSplit, 0), + getRangeAt(this.routingMapCollection1AfterSplit, 0), + false, + false, + false, + 0, + 0, + 0, + true); + + fail("Should have gotten InvalidPartitionException"); + } catch (InvalidPartitionException e) { + } + + try { + logger.info("Collection cache is outdated. Routing map cache returns null. Collection is deleted. RANGE without collection rid"); + this.TestCacheRefreshWhileRouteByPartitionKeyRangeId( + this.collection1, + null, + ImmutableMap.of(), + ImmutableMap.of(), + ImmutableMap.of(getServiceIdentityAt(this.routingMapCollection1AfterSplit, 0), this.addresses1), + null, + new PartitionKeyRangeIdentity("0"), + this.addresses1, + getServiceIdentityAt(this.routingMapCollection1AfterSplit, 0), + getRangeAt(this.routingMapCollection1AfterSplit, 0), + false, + false, + false, + 1, + 0, + 0, + true); + + fail("Should have gotten NotFoundException"); + } catch (NotFoundException e) { + } + + try { + logger.info("Collection cache is outdated. Routing map cache returns null. Collection is deleted. RANGE with collection rid. Rid based."); + this.TestCacheRefreshWhileRouteByPartitionKeyRangeId( + this.collection1, + null, + ImmutableMap.of(), + ImmutableMap.of(), + ImmutableMap.of(getServiceIdentityAt(this.routingMapCollection1AfterSplit, 0), this.addresses1), + null, + new PartitionKeyRangeIdentity(collection1.resourceId(), "0"), + this.addresses1, + getServiceIdentityAt(this.routingMapCollection1AfterSplit, 0), + getRangeAt(this.routingMapCollection1AfterSplit, 0), + false, + false, + false, + 0, + 0, + 0, + false); + + fail("Should have gotten NotFoundException"); + } catch (NotFoundException e) { + } + + try { + logger.info("Collection cache is outdated. Routing map cache is outdated. Address cache is outdated. ForceAddressRefresh. RANGE with collection rid. Name based."); + this.TestCacheRefreshWhileRouteByPartitionKeyRangeId( + this.collection1, + this.collection2, + ImmutableMap.of(this.collection1.resourceId(), this.routingMapCollection1BeforeSplit), + ImmutableMap.of(this.collection2.resourceId(), this.routingMapCollection2BeforeSplit), + ImmutableMap.of(getServiceIdentityAt(this.routingMapCollection1AfterSplit, 0), this.addresses1), + ImmutableMap.of(getServiceIdentityAt(this.routingMapCollection2AfterSplit, 0), this.addresses2), + new PartitionKeyRangeIdentity(collection1.resourceId(), "0"), + this.addresses1, + getServiceIdentityAt(this.routingMapCollection1AfterSplit, 0), + getRangeAt(this.routingMapCollection1AfterSplit, 0), + false, + false, + true, + 0, + 1, + 1, + true); + + fail("Should have gotten InvalidPartitionException"); + } catch (InvalidPartitionException e) { + } + } + + static class ServiceIdentity implements IServerIdentity { + final boolean IsMasterService; + final URI ServiceName; + final String FederationId; + final Set partitionKeyRangeIds; + final static ServiceIdentity dummyInstance = new ServiceIdentity(null, null, true); + + public ServiceIdentity(String federationId, URI serviceName, boolean isMasterService, PartitionKeyRangeIdentity... partitionKeyRangeIdentities) { + this.FederationId = federationId; + this.ServiceName = serviceName; + this.IsMasterService = isMasterService; + this.partitionKeyRangeIds = new HashSet<>(Arrays.stream(partitionKeyRangeIdentities).collect(Collectors.toList())); + } + } +} + diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/AddressSelectorTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/AddressSelectorTest.java new file mode 100644 index 0000000000000..c9b786e5246c5 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/AddressSelectorTest.java @@ -0,0 +1,198 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.GoneException; +import com.azure.data.cosmos.internal.directconnectivity.AddressInformation; +import com.azure.data.cosmos.internal.directconnectivity.AddressSelector; +import com.azure.data.cosmos.internal.directconnectivity.IAddressResolver; +import com.azure.data.cosmos.internal.directconnectivity.Protocol; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.directconnectivity.AddressInformation; +import com.azure.data.cosmos.internal.directconnectivity.AddressSelector; +import com.azure.data.cosmos.internal.directconnectivity.IAddressResolver; +import com.azure.data.cosmos.internal.directconnectivity.Protocol; +import com.google.common.collect.ImmutableList; +import org.mockito.Matchers; +import org.mockito.Mockito; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.net.URI; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; + +public class AddressSelectorTest { + + @Test(groups = "unit", expectedExceptions = GoneException.class) + public void getPrimaryUri_NoAddress() throws Exception { + RxDocumentServiceRequest request = Mockito.mock(RxDocumentServiceRequest.class); + Mockito.doReturn(null).when(request).getDefaultReplicaIndex(); + List replicaAddresses = new ArrayList<>(); + + AddressSelector.getPrimaryUri(request, replicaAddresses); + } + + @Test(groups = "unit", expectedExceptions = GoneException.class, expectedExceptionsMessageRegExp = + "The requested resource is no longer available at the server. Returned addresses are \\{https://cosmos1,https://cosmos2\\}") + public void getPrimaryUri_NoPrimaryAddress() throws Exception { + RxDocumentServiceRequest request = Mockito.mock(RxDocumentServiceRequest.class); + Mockito.doReturn(null).when(request).getDefaultReplicaIndex(); + + List replicaAddresses = new ArrayList<>(); + + replicaAddresses.add(new AddressInformation(true, false, "https://cosmos1", Protocol.HTTPS)); + replicaAddresses.add(new AddressInformation(true, false, "https://cosmos2", Protocol.HTTPS)); + + AddressSelector.getPrimaryUri(request, replicaAddresses); + } + + @Test(groups = "unit") + public void getPrimaryUri() throws Exception { + RxDocumentServiceRequest request = Mockito.mock(RxDocumentServiceRequest.class); + Mockito.doReturn(null).when(request).getDefaultReplicaIndex(); + + List replicaAddresses = new ArrayList<>(); + + replicaAddresses.add(new AddressInformation(true, false, "https://cosmos1", Protocol.HTTPS)); + replicaAddresses.add(new AddressInformation(true, true, "https://cosmos2", Protocol.HTTPS)); + replicaAddresses.add(new AddressInformation(true, false, "https://cosmos3", Protocol.HTTPS)); + + URI res = AddressSelector.getPrimaryUri(request, replicaAddresses); + + assertThat(res).isEqualTo(URI.create("https://cosmos2")); + } + + @Test(groups = "unit") + public void getPrimaryUri_WithRequestReplicaIndex() throws Exception { + RxDocumentServiceRequest request = Mockito.mock(RxDocumentServiceRequest.class); + Mockito.doReturn(1).when(request).getDefaultReplicaIndex(); + + List replicaAddresses = new ArrayList<>(); + + replicaAddresses.add(new AddressInformation(true, false, "https://cosmos1", Protocol.HTTPS)); + replicaAddresses.add(new AddressInformation(true, false, "https://cosmos2", Protocol.HTTPS)); + replicaAddresses.add(new AddressInformation(true, false, "https://cosmos3", Protocol.HTTPS)); + + URI res = AddressSelector.getPrimaryUri(request, replicaAddresses); + + assertThat(res).isEqualTo(URI.create("https://cosmos2")); + } + + @Test(groups = "unit") + public void resolvePrimaryUriAsync() { + IAddressResolver addressResolver = Mockito.mock(IAddressResolver.class); + AddressSelector selector = new AddressSelector(addressResolver, Protocol.HTTPS); + + RxDocumentServiceRequest request = Mockito.mock(RxDocumentServiceRequest.class); + Mockito.doReturn(null).when(request).getDefaultReplicaIndex(); + + List replicaAddresses = new ArrayList<>(); + + replicaAddresses.add(new AddressInformation(true, false, "https://cosmos4", Protocol.TCP)); + replicaAddresses.add(new AddressInformation(true, true, "https://cosmos5", Protocol.TCP)); + replicaAddresses.add(new AddressInformation(true, false, "https://cosmos1", Protocol.HTTPS)); + replicaAddresses.add(new AddressInformation(true, true, "https://cosmos2", Protocol.HTTPS)); + replicaAddresses.add(new AddressInformation(true, false, "https://cosmos3", Protocol.HTTPS)); + + Mockito.doReturn(Mono.just(replicaAddresses.toArray(new AddressInformation[0]))).when(addressResolver).resolveAsync(Mockito.any(RxDocumentServiceRequest.class), Matchers.eq(false)); + + URI res = selector.resolvePrimaryUriAsync(request, false).block(); + + assertThat(res).isEqualTo(URI.create("https://cosmos2")); + } + + @Test(groups = "unit") + public void resolveAllUriAsync() { + IAddressResolver addressResolver = Mockito.mock(IAddressResolver.class); + AddressSelector selector = new AddressSelector(addressResolver, Protocol.HTTPS); + + RxDocumentServiceRequest request = Mockito.mock(RxDocumentServiceRequest.class); + Mockito.doReturn(null).when(request).getDefaultReplicaIndex(); + + List replicaAddresses = new ArrayList<>(); + + replicaAddresses.add(new AddressInformation(true, false, "https://cosmos4", Protocol.TCP)); + replicaAddresses.add(new AddressInformation(true, true, "https://cosmos5", Protocol.TCP)); + replicaAddresses.add(new AddressInformation(true, false, "https://cosmos1", Protocol.HTTPS)); + replicaAddresses.add(new AddressInformation(true, true, "https://cosmos2", Protocol.HTTPS)); + replicaAddresses.add(new AddressInformation(true, false, "https://cosmos3", Protocol.HTTPS)); + + Mockito.doReturn(Mono.just(replicaAddresses.toArray(new AddressInformation[0]))).when(addressResolver).resolveAsync(Mockito.any(RxDocumentServiceRequest.class), Matchers.eq(false)); + + List res = selector.resolveAllUriAsync(request, true, false).block(); + + assertThat(res).isEqualTo(ImmutableList.of(URI.create("https://cosmos1"), URI.create("https://cosmos2"), URI.create("https://cosmos3"))); + } + + @Test(groups = "unit") + public void resolveAddressesAsync() { + IAddressResolver addressResolver = Mockito.mock(IAddressResolver.class); + AddressSelector selector = new AddressSelector(addressResolver, Protocol.HTTPS); + + RxDocumentServiceRequest request = Mockito.mock(RxDocumentServiceRequest.class); + Mockito.doReturn(null).when(request).getDefaultReplicaIndex(); + + List replicaAddresses = new ArrayList<>(); + + replicaAddresses.add(new AddressInformation(true, false, "https://cosmos4", Protocol.TCP)); + replicaAddresses.add(new AddressInformation(true, true, "https://cosmos5", Protocol.TCP)); + replicaAddresses.add(new AddressInformation(true, false, "https://cosmos1", Protocol.HTTPS)); + replicaAddresses.add(new AddressInformation(true, true, "https://cosmos2", Protocol.HTTPS)); + replicaAddresses.add(new AddressInformation(true, false, "https://cosmos3", Protocol.HTTPS)); + + Mockito.doReturn(Mono.just(replicaAddresses.toArray(new AddressInformation[0]))).when(addressResolver).resolveAsync(Mockito.any(RxDocumentServiceRequest.class), Matchers.eq(false)); + + List res = selector.resolveAddressesAsync(request, false).block(); + + assertThat(res).isEqualTo(replicaAddresses.stream().filter(a -> a.getProtocolName().equals(Protocol.HTTPS.toString())).collect(Collectors.toList())); + } + + @Test(groups = "unit") + public void resolveAllUriAsync_RNTBD() { + IAddressResolver addressResolver = Mockito.mock(IAddressResolver.class); + AddressSelector selector = new AddressSelector(addressResolver, Protocol.TCP); + + RxDocumentServiceRequest request = Mockito.mock(RxDocumentServiceRequest.class); + Mockito.doReturn(null).when(request).getDefaultReplicaIndex(); + + List replicaAddresses = new ArrayList<>(); + + replicaAddresses.add(new AddressInformation(true, false, "rntbd://cosmos1", Protocol.TCP)); + replicaAddresses.add(new AddressInformation(true, true, "rntbd://cosmos2", Protocol.TCP)); + replicaAddresses.add(new AddressInformation(true, false, "https://cosmos1", Protocol.HTTPS)); + replicaAddresses.add(new AddressInformation(true, true, "https://cosmos2", Protocol.HTTPS)); + replicaAddresses.add(new AddressInformation(true, false, "https://cosmos3", Protocol.HTTPS)); + + Mockito.doReturn(Mono.just(replicaAddresses.toArray(new AddressInformation[0]))).when(addressResolver).resolveAsync(Mockito.any(RxDocumentServiceRequest.class), Matchers.eq(false)); + + List res = selector.resolveAllUriAsync(request, true, false).block(); + + assertThat(res).isEqualTo(ImmutableList.of(URI.create("rntbd://cosmos1"), URI.create("rntbd://cosmos2"))); + } + +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/AddressSelectorWrapper.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/AddressSelectorWrapper.java new file mode 100644 index 0000000000000..6bd4741c34195 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/AddressSelectorWrapper.java @@ -0,0 +1,533 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.directconnectivity.AddressInformation; +import com.azure.data.cosmos.internal.directconnectivity.AddressSelector; +import com.azure.data.cosmos.internal.directconnectivity.Protocol; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.directconnectivity.AddressInformation; +import com.azure.data.cosmos.internal.directconnectivity.AddressSelector; +import com.azure.data.cosmos.internal.directconnectivity.Protocol; +import com.google.common.base.Predicates; +import com.google.common.collect.ImmutableList; +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.apache.commons.lang3.tuple.Pair; +import org.assertj.core.api.Condition; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import reactor.core.publisher.Mono; + +import java.net.URI; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; + +public class AddressSelectorWrapper { + + private static String resolveAllUriAsync = "resolveAllUriAsync"; + private static String resolvePrimaryUriAsync = "resolvePrimaryUriAsync"; + private static String resolveAddressesAsync = "resolveAddressesAsync"; + private final List invocationOnMockList; + + public final AddressSelector addressSelector; + + public static class InOrderVerificationBuilder { + private List> actions = new ArrayList<>(); + + public static InOrderVerificationBuilder create() { + return new InOrderVerificationBuilder(); + } + + public InOrderVerificationBuilder verify(InOrderVerification.Verifier v, int index) { + actions.add(verification -> { + verification.verify(v, index); + return null; + }); + return this; + } + + public InOrderVerificationBuilder verifyOnAll(InOrderVerification.Verifier v) { + actions.add(verification -> { + verification.verifyOnAll(v); + return null; + }); + return this; + } + + public InOrderVerificationBuilder verifyNext(InOrderVerification.Verifier v) { + actions.add(verification -> { + verification.verifyNext(v); + return null; + }); + return this; + } + + public InOrderVerificationBuilder verifyNumberOfInvocations(int expected) { + actions.add(verification -> { + verification.verifyNumberOfInvocations(expected); + return null; + }); + return this; + } + + public void execute(AddressSelectorWrapper addressSelectorWrapper) { + InOrderVerification v = new InOrderVerification(addressSelectorWrapper.invocationOnMockList); + for(Function action: actions) { + action.apply(v); + } + } + } + + public InOrderVerification getInOrderVerification() { + return new InOrderVerification(invocationOnMockList); + } + + public static class InOrderVerification { + private final List invocations; + private int internalIndex = 0; + + InOrderVerification(List invocationOnMockList) { + invocations = invocationOnMockList; + } + + public InOrderVerification verify(Verifier v, int index) { + v.verify(invocations.get(index)); + return this; + } + + public InOrderVerification verifyOnAll(Verifier v) { + for(InvocationOnMock i: invocations) { + v.verify(i); + } + return this; + } + + public InOrderVerification verifyNext(Verifier v) { + v.verify(invocations.get(internalIndex++)); + return this; + } + + public InOrderVerification verifyNumberOfInvocations(int expected) { + assertThat(invocations).hasSize(expected); + return this; + } + + interface Verifier { + + void verify(InvocationOnMock invocation); + + public static VerifierBuilder builder() { + return new VerifierBuilder(); + } + + public static class VerifierBuilder { + + public Verifier build() { + return new Verifier() { + @Override + public void verify(InvocationOnMock invocation) { + for(Verifier v: verifiers) { + v.verify(invocation); + } + } + }; + } + + List verifiers = new ArrayList<>(); + + VerifierBuilder add(Verifier verifier) { + verifiers.add(verifier); + return this; + } + + VerifierBuilder methodName(String methodName) { + add(new Verifier() { + @Override + public void verify(InvocationOnMock invocation) { + assertThat(invocation.getMethod().getName()).isEqualTo(methodName); + } + }); + return this; + } + + VerifierBuilder resolveAllUriAsync() { + methodName(resolveAllUriAsync); + return this; + } + + VerifierBuilder resolvePrimaryUriAsync() { + methodName(resolvePrimaryUriAsync); + return this; + } + + VerifierBuilder resolveAddressesAsync() { + methodName(resolveAddressesAsync); + return this; + } + + VerifierBuilder resolveAllUriAsync(Condition requestMatcher, Condition includePrimaryMatcher, Condition forceRefreshMatcher) { + methodName(resolveAllUriAsync); + add(new Verifier() { + @Override + public void verify(InvocationOnMock invocation) { + RxDocumentServiceRequest request = invocation.getArgumentAt(0, RxDocumentServiceRequest.class); + boolean includePrimary = invocation.getArgumentAt(1, Boolean.class); + boolean forceRefresh = invocation.getArgumentAt(2, Boolean.class); + + assertThat(request).is(requestMatcher); + + assertThat(includePrimary).is(includePrimaryMatcher); + assertThat(forceRefresh).is(forceRefreshMatcher); + } + }); + return this; + } + + VerifierBuilder resolveAllUriAsync_IncludePrimary(boolean primaryIncluded) { + methodName(resolveAllUriAsync); + + Condition alwaysTrue = new Condition(Predicates.alwaysTrue(), "no condition"); + Condition primaryIncludedCond = new Condition(Predicates.equalTo(primaryIncluded), String.format("%b (primaryIncluded)", primaryIncluded)); + + resolveAllUriAsync(alwaysTrue, primaryIncludedCond, alwaysTrue); + return this; + } + + VerifierBuilder resolveAllUriAsync_ForceRefresh(boolean forceRefresh) { + methodName(resolveAllUriAsync); + + Condition alwaysTrue = new Condition(Predicates.alwaysTrue(), "no condition"); + Condition forceRefreshCond = new Condition(Predicates.equalTo(forceRefresh), String.format("%b (forceRefresh)", forceRefresh)); + + resolveAllUriAsync(alwaysTrue, alwaysTrue, forceRefreshCond); + return this; + } + } + } + } + + public AddressSelectorWrapper(AddressSelector addressSelector, List invocationOnMockList) { + this.addressSelector = addressSelector; + this.invocationOnMockList = invocationOnMockList; + } + + public AddressSelectorWrapper verifyNumberOfForceCachRefresh(int expectedNumber) { + int count = 0; + for (InvocationOnMock invocationOnMock : invocationOnMockList) { + boolean forceRefresh; + if (invocationOnMock.getMethod().getName().endsWith("resolveAllUriAsync")) { + forceRefresh = invocationOnMock.getArgumentAt(2, Boolean.class); + } else { + forceRefresh = invocationOnMock.getArgumentAt(1, Boolean.class); + } + if (forceRefresh) { + count++; + } + } + assertThat(count).isEqualTo(expectedNumber); + return this; + } + + public AddressSelectorWrapper verifyNumberOfForceCacheRefreshGreaterThanOrEqualTo(int minimum) { + int count = 0; + for (InvocationOnMock invocationOnMock : invocationOnMockList) { + boolean forceRefresh; + if (invocationOnMock.getMethod().getName().endsWith("resolveAllUriAsync")) { + forceRefresh = invocationOnMock.getArgumentAt(2, Boolean.class); + } else { + forceRefresh = invocationOnMock.getArgumentAt(1, Boolean.class); + } + if (forceRefresh) { + count++; + } + } + assertThat(count).isGreaterThanOrEqualTo(minimum); + return this; + } + + public AddressSelectorWrapper validate() { + // for now do nothing; + return this; + } + + public AddressSelectorWrapper verifyVesolvePrimaryUriAsyncCount(int count) { + Mockito.verify(addressSelector, Mockito.times(count)).resolvePrimaryUriAsync(Mockito.any(), Mockito.anyBoolean()); + return this; + } + + public AddressSelectorWrapper verifyResolveAddressesAsync(int count) { + Mockito.verify(addressSelector, Mockito.times(count)).resolveAddressesAsync(Mockito.any(), Mockito.anyBoolean()); + return this; + } + + public AddressSelectorWrapper verifyResolveAllUriAsync(int count) { + Mockito.verify(addressSelector, Mockito.times(count)).resolveAllUriAsync(Mockito.any(), Mockito.anyBoolean(), Mockito.anyBoolean()); + return this; + } + + public AddressSelectorWrapper verifyTotalInvocations(int count) { + assertThat(invocationOnMockList).hasSize(count); + return this; + } + + public static class Builder { + final Protocol protocol; + AddressSelector addressSelector; + List invocationOnMockList = Collections.synchronizedList(new ArrayList<>()); + + + public Builder(Protocol protocol) { + this.protocol = protocol; + } + + public static class PrimaryReplicaMoveBuilder extends Builder { + static PrimaryReplicaMoveBuilder create(Protocol protocol) { + return new PrimaryReplicaMoveBuilder(protocol); + } + + public PrimaryReplicaMoveBuilder(Protocol protocol) { + super(protocol); + addressSelector = Mockito.mock(AddressSelector.class); + } + + public PrimaryReplicaMoveBuilder withPrimaryReplicaMove(URI primaryURIBeforeForceRefresh, URI primaryURIAfterForceRefresh) { + AtomicBoolean refreshed = new AtomicBoolean(false); + Mockito.doAnswer((invocation) -> { + capture(invocation); + RxDocumentServiceRequest request = invocation.getArgumentAt(0, RxDocumentServiceRequest.class); + boolean forceRefresh = invocation.getArgumentAt(1, Boolean.class); + + if (forceRefresh || refreshed.get()) { + refreshed.set(true); + return Mono.just(primaryURIAfterForceRefresh); + } + + return Mono.just(primaryURIBeforeForceRefresh); + }).when(addressSelector).resolvePrimaryUriAsync(Mockito.any(RxDocumentServiceRequest.class), Mockito.anyBoolean()); + + Mockito.doAnswer((invocation -> { + capture(invocation); + return null; + })).when(addressSelector).resolveAllUriAsync(Mockito.any(), Mockito.anyBoolean(), Mockito.anyBoolean()); + + Mockito.doAnswer((invocation -> { + capture(invocation); + return null; + })).when(addressSelector).resolveAddressesAsync(Mockito.any(), Mockito.anyBoolean()); + + return this; + } + + public AddressSelectorWrapper build() { + return new AddressSelectorWrapper(this.addressSelector, this.invocationOnMockList); + } + } + + public static class ReplicaMoveBuilder extends Builder { + + List> secondary = new ArrayList<>(); + Pair primary; + private Function partitionKeyRangeFunction; + + static ReplicaMoveBuilder create(Protocol protocol) { + return new ReplicaMoveBuilder(protocol); + } + + public ReplicaMoveBuilder(Protocol protocol) { + super(protocol); + addressSelector = Mockito.mock(AddressSelector.class); + } + + public ReplicaMoveBuilder withPrimaryMove(URI uriBeforeForceRefresh, URI uriAfterForceRefresh) { + withReplicaMove(uriBeforeForceRefresh, uriAfterForceRefresh, true); + return this; + } + + public ReplicaMoveBuilder withSecondaryMove(URI uriBeforeForceRefresh, URI uriAfterForceRefresh) { + withReplicaMove(uriBeforeForceRefresh, uriAfterForceRefresh, false); + return this; + } + + public ReplicaMoveBuilder newPartitionKeyRangeIdOnRefresh(Function partitionKeyRangeFunction) { + this.partitionKeyRangeFunction = partitionKeyRangeFunction; + return this; + } + + public ReplicaMoveBuilder withReplicaMove(URI uriBeforeForceRefresh, URI uriAfterForceRefresh, boolean isPrimary) { + if (isPrimary) { + primary = ImmutablePair.of(uriBeforeForceRefresh, uriAfterForceRefresh); + } else { + secondary.add(ImmutablePair.of(uriBeforeForceRefresh, uriAfterForceRefresh)); + } + return this; + } + + + public AddressSelectorWrapper build() { + AtomicBoolean refreshed = new AtomicBoolean(false); + Mockito.doAnswer((invocation) -> { + capture(invocation); + RxDocumentServiceRequest request = invocation.getArgumentAt(0, RxDocumentServiceRequest.class); + boolean forceRefresh = invocation.getArgumentAt(1, Boolean.class); + if (partitionKeyRangeFunction != null) { + request.requestContext.resolvedPartitionKeyRange = partitionKeyRangeFunction.apply(request); + } + if (forceRefresh || refreshed.get()) { + refreshed.set(true); + return Mono.just(primary.getRight()); + } else { + return Mono.just(primary.getLeft()); + } + + }).when(addressSelector).resolvePrimaryUriAsync(Mockito.any(RxDocumentServiceRequest.class), Mockito.anyBoolean()); + + Mockito.doAnswer((invocation -> { + capture(invocation); + RxDocumentServiceRequest request = invocation.getArgumentAt(0, RxDocumentServiceRequest.class); + boolean includePrimary = invocation.getArgumentAt(1, Boolean.class); + boolean forceRefresh = invocation.getArgumentAt(2, Boolean.class); + + ImmutableList.Builder b = ImmutableList.builder(); + + if (forceRefresh || refreshed.get()) { + if (partitionKeyRangeFunction != null) { + request.requestContext.resolvedPartitionKeyRange = partitionKeyRangeFunction.apply(request); + } + refreshed.set(true); + if (includePrimary) { + b.add(primary.getRight()); + } + b.addAll(secondary.stream().map(s -> s.getRight()).collect(Collectors.toList())); + return Mono.just(b.build()); + } else { + // old + if (includePrimary) { + b.add(primary.getLeft()); + } + b.addAll(secondary.stream().map(s -> s.getLeft()).collect(Collectors.toList())); + return Mono.just(b.build()); + } + + })).when(addressSelector).resolveAllUriAsync(Mockito.any(RxDocumentServiceRequest.class), Mockito.anyBoolean(), Mockito.anyBoolean()); + + Mockito.doAnswer((invocation -> { + capture(invocation); + RxDocumentServiceRequest request = invocation.getArgumentAt(0, RxDocumentServiceRequest.class); + boolean forceRefresh = invocation.getArgumentAt(1, Boolean.class); + + ImmutableList.Builder b = ImmutableList.builder(); + + if (forceRefresh || refreshed.get()) { + if (partitionKeyRangeFunction != null) { + request.requestContext.resolvedPartitionKeyRange = partitionKeyRangeFunction.apply(request); + } + + refreshed.set(true); + b.add(primary.getRight()); + b.addAll(secondary.stream().map(s -> s.getRight()).collect(Collectors.toList())); + return Mono.just(b.build()); + } else { + // old + b.add(primary.getLeft()); + b.addAll(secondary.stream().map(s -> s.getLeft()).collect(Collectors.toList())); + return Mono.just(b.build()); + } + })).when(addressSelector).resolveAddressesAsync(Mockito.any(RxDocumentServiceRequest.class), Mockito.anyBoolean()); + + return new AddressSelectorWrapper(addressSelector, invocationOnMockList); + } + } + + public static class Simple extends Builder { + private URI primaryAddress; + private List secondaryAddresses; + static Simple create() { + return new Simple(Protocol.HTTPS); + } + + public Simple(Protocol protocol) { + super(protocol); + addressSelector = Mockito.mock(AddressSelector.class); + } + + public Simple withPrimary(URI primaryAddress) { + this.primaryAddress = primaryAddress; + return this; + } + + public Simple withSecondary(List secondaryAddresses) { + this.secondaryAddresses = secondaryAddresses; + return this; + } + + public AddressSelectorWrapper build() { + Mockito.doAnswer((invocation) -> { + capture(invocation); + return Mono.just(primaryAddress); + }).when(addressSelector).resolvePrimaryUriAsync(Mockito.any(RxDocumentServiceRequest.class), Mockito.anyBoolean()); + + Mockito.doAnswer((invocation -> { + capture(invocation); + RxDocumentServiceRequest request = invocation.getArgumentAt(0, RxDocumentServiceRequest.class); + boolean includePrimary = invocation.getArgumentAt(1, Boolean.class); + boolean forceRefresh = invocation.getArgumentAt(2, Boolean.class); + + if (includePrimary) { + return Mono.just(ImmutableList.builder().addAll(secondaryAddresses).add(primaryAddress).build()); + } else { + return Mono.just(secondaryAddresses); + } + })).when(addressSelector).resolveAllUriAsync(Mockito.any(), Mockito.anyBoolean(), Mockito.anyBoolean()); + + Mockito.doAnswer((invocation -> { + capture(invocation); + return Mono.just(ImmutableList.builder() + .addAll(secondaryAddresses.stream() + .map(uri -> toAddressInformation(uri, false, protocol)) + .collect(Collectors.toList())) + .add(toAddressInformation(primaryAddress, true, protocol)) + .build()); + })).when(addressSelector).resolveAddressesAsync(Mockito.any(), Mockito.anyBoolean()); + + + return new AddressSelectorWrapper(this.addressSelector, this.invocationOnMockList); + } + + private AddressInformation toAddressInformation(URI uri, boolean isPrimary, Protocol protocol) { + return new AddressInformation(true, isPrimary, uri.toString(), protocol); + } + } + + protected void capture(InvocationOnMock invocationOnMock) { + invocationOnMockList.add(invocationOnMock); + } + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/AddressValidator.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/AddressValidator.java new file mode 100644 index 0000000000000..92578c444c7a6 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/AddressValidator.java @@ -0,0 +1,154 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.directconnectivity.Address; +import com.azure.data.cosmos.internal.directconnectivity.Protocol; +import com.azure.data.cosmos.internal.directconnectivity.Address; +import com.azure.data.cosmos.internal.directconnectivity.Protocol; +import org.assertj.core.api.Condition; + +import java.util.ArrayList; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * This is a helper class for validating a partition address for tests. + */ +public interface AddressValidator { + + void validate(Address address); + + class Builder { + private List validators = new ArrayList<>(); + + public AddressValidator build() { + return new AddressValidator() { + + @Override + public void validate(Address address) { + for (AddressValidator validator : validators) { + validator.validate(address); + } + } + }; + } + + public Builder withId(final String resourceId) { + validators.add(new AddressValidator() { + + @Override + public void validate(Address address) { + assertThat(address.id()).as("check Resource Id").isEqualTo(resourceId); + } + }); + return this; + } + + + + public Builder withProperty(String propertyName, Condition validatingCondition) { + validators.add(new AddressValidator() { + + @Override + public void validate(Address address) { + assertThat(address.get(propertyName)).is(validatingCondition); + + } + }); + return this; + } + + public Builder withProperty(String propertyName, Object value) { + validators.add(new AddressValidator() { + + @Override + public void validate(Address address) { + assertThat(address.get(propertyName)).isEqualTo(value); + + } + }); + return this; + } + + public Builder isPrimary(boolean isPrimary) { + validators.add(new AddressValidator() { + + @Override + public void validate(Address address) { + assertThat(address.IsPrimary()).isTrue(); + } + }); + return this; + } + + public Builder httpsProtocol() { + validators.add(new AddressValidator() { + + @Override + public void validate(Address address) { + assertThat(address.getProtocolScheme()).isEqualTo("https"); + } + }); + return this; + } + + public Builder protocol(Protocol protocol) { + validators.add(new AddressValidator() { + + @Override + public void validate(Address address) { + if (protocol == Protocol.HTTPS) { + assertThat(address.getProtocolScheme()).isEqualTo("https"); + } else if (protocol == Protocol.TCP){ + assertThat(address.getProtocolScheme()).isEqualTo("rntbd"); + } + } + }); + return this; + } + + public Builder withRid(String rid) { + validators.add(new AddressValidator() { + + @Override + public void validate(Address address) { + assertThat(address.resourceId()).isEqualTo(rid); + } + }); + return this; + } + + public Builder withPartitionKeyRangeId(String partitionKeyRangeId) { + validators.add(new AddressValidator() { + + @Override + public void validate(Address address) { + assertThat(address.getParitionKeyRangeId()).isEqualTo(partitionKeyRangeId); + } + }); + return this; + } + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/BarrierRequestHelperTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/BarrierRequestHelperTest.java new file mode 100644 index 0000000000000..ce6f3a44a13fb --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/BarrierRequestHelperTest.java @@ -0,0 +1,237 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.IAuthorizationTokenProvider; +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RxDocumentClientImpl; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.routing.PartitionKeyRangeIdentity; +import com.azure.data.cosmos.internal.TestConfigurations; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import java.util.Map; +import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Fail.fail; + +public class BarrierRequestHelperTest { + @Test(groups = "direct") + public void barrierBasic() { + IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); + + for (ResourceType resourceType : ResourceType.values()) { + + for (OperationType operationType : OperationType.values()) { + Document randomResource = new Document(); + randomResource.id(UUID.randomUUID().toString()); + RxDocumentServiceRequest request = + RxDocumentServiceRequest.create(operationType, resourceType, "/dbs/7mVFAA==/colls/7mVFAP1jpeU=", randomResource, (Map) null); + + BarrierRequestHelper.createAsync(request, authTokenProvider, 10l, 10l).block(); + request = + RxDocumentServiceRequest.create(operationType, resourceType, "/dbs/7mVFAA==", randomResource, null); + + request.setResourceId("3"); + try { + BarrierRequestHelper.createAsync(request, authTokenProvider, 10l, 10l).block(); + } catch (Exception e) { + if (!BarrierRequestHelper.isCollectionHeadBarrierRequest(resourceType, operationType)) { + fail("Should not fail for non-collection head combinations"); + } + } + } + } + } + + @Test(groups = "direct") + public void barrierDBFeed() { + IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); + + ResourceType resourceType = ResourceType.DocumentCollection; + OperationType operationType = OperationType.Query; + + Document randomResource = new Document(); + randomResource.id(UUID.randomUUID().toString()); + RxDocumentServiceRequest request = + RxDocumentServiceRequest.create(operationType, resourceType, "/dbs/7mVFAA==/colls/7mVFAP1jpeU=", randomResource, (Map) null); + + RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(request, authTokenProvider, 11l, 10l).block(); + + assertThat(barrierRequest.getOperationType()).isEqualTo(OperationType.HeadFeed); + assertThat(barrierRequest.getResourceType()).isEqualTo(ResourceType.Database); + + + assertThat(getTargetGlobalLsn(barrierRequest)).isEqualTo(10l); + assertThat(getTargetLsn(barrierRequest)).isEqualTo(11l); + } + + @Test(groups = "direct") + public void barrierDocumentQueryNameBasedRequest() { + IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); + + ResourceType resourceType = ResourceType.Document; + OperationType operationType = OperationType.Query; + + Document randomResource = new Document(); + randomResource.id(UUID.randomUUID().toString()); + RxDocumentServiceRequest request = + RxDocumentServiceRequest.create(operationType, resourceType, "/dbs/dbname/colls/collname", randomResource, (Map) null); + + RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(request, authTokenProvider, 11l, 10l).block(); + + assertThat(barrierRequest.getOperationType()).isEqualTo(OperationType.Head); + assertThat(barrierRequest.getResourceType()).isEqualTo(ResourceType.DocumentCollection); + assertThat(barrierRequest.getResourceAddress()).isEqualTo("dbs/dbname/colls/collname"); + + assertThat(getTargetGlobalLsn(barrierRequest)).isEqualTo(10l); + assertThat(getTargetLsn(barrierRequest)).isEqualTo(11l); + } + + @Test(groups = "direct") + public void barrierDocumentReadNameBasedRequest() { + IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); + + ResourceType resourceType = ResourceType.Document; + OperationType operationType = OperationType.Read; + + Document randomResource = new Document(); + randomResource.id(UUID.randomUUID().toString()); + RxDocumentServiceRequest request = + RxDocumentServiceRequest.create(operationType, resourceType, "/dbs/dbname/colls/collname", randomResource, (Map) null); + + RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(request, authTokenProvider, 11l, 10l).block(); + + assertThat(barrierRequest.getOperationType()).isEqualTo(OperationType.Head); + assertThat(barrierRequest.getResourceType()).isEqualTo(ResourceType.DocumentCollection); + assertThat(barrierRequest.getResourceAddress()).isEqualTo("dbs/dbname/colls/collname"); + + assertThat(getTargetGlobalLsn(barrierRequest)).isEqualTo(10l); + assertThat(getTargetLsn(barrierRequest)).isEqualTo(11l); + assertThat(barrierRequest.getIsNameBased()).isEqualTo(true); + + } + + @Test(groups = "direct") + public void barrierDocumentReadRidBasedRequest() { + IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); + + ResourceType resourceType = ResourceType.Document; + OperationType operationType = OperationType.Read; + + Document randomResource = new Document(); + randomResource.id(UUID.randomUUID().toString()); + RxDocumentServiceRequest request = + RxDocumentServiceRequest.create(operationType, "7mVFAA==", resourceType, (Map) null); + + RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(request, authTokenProvider, 11l, 10l).block(); + + assertThat(barrierRequest.getOperationType()).isEqualTo(OperationType.Head); + assertThat(barrierRequest.getResourceType()).isEqualTo(ResourceType.DocumentCollection); + assertThat(barrierRequest.getResourceAddress()).isEqualTo("7mVFAA=="); + + assertThat(getTargetGlobalLsn(barrierRequest)).isEqualTo(10l); + assertThat(getTargetLsn(barrierRequest)).isEqualTo(11l); + assertThat(barrierRequest.getIsNameBased()).isEqualTo(false); + } + + @DataProvider(name = "isCollectionHeadBarrierRequestArgProvider") + public Object[][] isCollectionHeadBarrierRequestArgProvider() { + return new Object[][]{ + // resourceType, operationType, isCollectionHeadBarrierRequest + + {ResourceType.Attachment, null, true}, + {ResourceType.Document, null, true}, + {ResourceType.Conflict, null, true}, + {ResourceType.StoredProcedure, null, true}, + {ResourceType.Attachment, null, true}, + {ResourceType.Trigger, null, true}, + + {ResourceType.DocumentCollection, OperationType.ReadFeed, false}, + {ResourceType.DocumentCollection, OperationType.Query, false}, + {ResourceType.DocumentCollection, OperationType.SqlQuery, false}, + + {ResourceType.DocumentCollection, OperationType.Create, true}, + {ResourceType.DocumentCollection, OperationType.Read, true}, + {ResourceType.DocumentCollection, OperationType.Replace, true}, + {ResourceType.DocumentCollection, OperationType.ExecuteJavaScript, true}, + + {ResourceType.PartitionKeyRange, null, false}, + }; + } + + @Test(groups = "direct", dataProvider = "isCollectionHeadBarrierRequestArgProvider") + public void isCollectionHeadBarrierRequest(ResourceType resourceType, + OperationType operationType, + boolean expectedResult) { + if (operationType != null) { + boolean actual = BarrierRequestHelper.isCollectionHeadBarrierRequest(resourceType, operationType); + assertThat(actual).isEqualTo(expectedResult); + } else { + for (OperationType type : OperationType.values()) { + boolean actual = BarrierRequestHelper.isCollectionHeadBarrierRequest(resourceType, type); + assertThat(actual).isEqualTo(expectedResult); + } + } + } + + private IAuthorizationTokenProvider getIAuthorizationTokenProvider() { + return (RxDocumentClientImpl) + new AsyncDocumentClient.Builder() + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withServiceEndpoint(TestConfigurations.HOST) + .build(); + } + + private String getHeaderValue(RxDocumentServiceRequest req, String name) { + return req.getHeaders().get(name); + } + + private String getPartitionKey(RxDocumentServiceRequest req) { + return getHeaderValue(req, HttpConstants.HttpHeaders.PARTITION_KEY); + } + + private String getCollectionRid(RxDocumentServiceRequest req) { + return getHeaderValue(req, WFConstants.BackendHeaders.COLLECTION_RID); + } + + private PartitionKeyRangeIdentity getPartitionKeyRangeIdentity(RxDocumentServiceRequest req) { + return req.getPartitionKeyRangeIdentity(); + } + + private Long getTargetLsn(RxDocumentServiceRequest req) { + return Long.parseLong(getHeaderValue(req, HttpConstants.HttpHeaders.TARGET_LSN)); + } + + private Long getTargetGlobalLsn(RxDocumentServiceRequest req) { + return Long.parseLong(getHeaderValue(req, HttpConstants.HttpHeaders.TARGET_GLOBAL_COMMITTED_LSN)); + } +} + diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ConsistencyReaderTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ConsistencyReaderTest.java new file mode 100644 index 0000000000000..f73b6db88a505 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ConsistencyReaderTest.java @@ -0,0 +1,771 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.internal.ISessionContainer; +import com.azure.data.cosmos.RequestRateTooLargeException; +import com.azure.data.cosmos.internal.*; +import com.azure.data.cosmos.NotFoundException; +import com.google.common.collect.ImmutableList; +import io.reactivex.subscribers.TestSubscriber; +import org.mockito.Mockito; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.math.BigDecimal; +import java.math.RoundingMode; +import java.net.URI; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.azure.data.cosmos.internal.Utils.ValueHolder; +import static org.assertj.core.api.Assertions.assertThat; + +public class ConsistencyReaderTest { + private final Configs configs = new Configs(); + private static final int TIMEOUT = 30000; + @DataProvider(name = "deduceReadModeArgProvider") + public Object[][] deduceReadModeArgProvider() { + return new Object[][]{ + // account consistency, request consistency, expected readmode, expected consistency to use, whether use session + { ConsistencyLevel.STRONG, null, ReadMode.Strong, ConsistencyLevel.STRONG, false}, + { ConsistencyLevel.STRONG, ConsistencyLevel.EVENTUAL, ReadMode.Any, ConsistencyLevel.EVENTUAL, false}, + { ConsistencyLevel.STRONG, ConsistencyLevel.SESSION, ReadMode.Any, ConsistencyLevel.SESSION, true}, + { ConsistencyLevel.SESSION, ConsistencyLevel.EVENTUAL, ReadMode.Any, ConsistencyLevel.EVENTUAL, false}, + { ConsistencyLevel.SESSION, ConsistencyLevel.SESSION, ReadMode.Any, ConsistencyLevel.SESSION, true}, + { ConsistencyLevel.SESSION, ConsistencyLevel.EVENTUAL, ReadMode.Any, ConsistencyLevel.EVENTUAL, false}, + { ConsistencyLevel.SESSION, null, ReadMode.Any, ConsistencyLevel.SESSION, true}, + { ConsistencyLevel.EVENTUAL, ConsistencyLevel.EVENTUAL, ReadMode.Any, ConsistencyLevel.EVENTUAL, false}, + { ConsistencyLevel.EVENTUAL, null, ReadMode.Any, ConsistencyLevel.EVENTUAL, false}, + }; + } + + @Test(groups = "unit", dataProvider = "deduceReadModeArgProvider") + public void deduceReadMode(ConsistencyLevel accountConsistencyLevel, ConsistencyLevel requestConsistency, ReadMode expectedReadMode, + ConsistencyLevel expectedConsistencyToUse, boolean expectedToUseSession) throws CosmosClientException { + AddressSelector addressSelector = Mockito.mock(AddressSelector.class); + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + TransportClient transportClient = Mockito.mock(TransportClient.class); + GatewayServiceConfiguratorReaderMock gatewayServiceConfigurationReaderWrapper = GatewayServiceConfiguratorReaderMock.from(accountConsistencyLevel); + IAuthorizationTokenProvider authorizationTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + ConsistencyReader consistencyReader = new ConsistencyReader(configs, + addressSelector, + sessionContainer, + transportClient, + gatewayServiceConfigurationReaderWrapper.gatewayServiceConfigurationReader, + authorizationTokenProvider); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + if (requestConsistency != null) { + request.getHeaders().put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, requestConsistency.toString()); + } + + ValueHolder consistencyLevel = ValueHolder.initialize(null); + ValueHolder useSession = ValueHolder.initialize(null); + + ReadMode readMode = consistencyReader.deduceReadMode(request, consistencyLevel, useSession); + + assertThat(readMode).isEqualTo(expectedReadMode); + assertThat(consistencyLevel.v).isEqualTo(expectedConsistencyToUse); + assertThat(useSession.v).isEqualTo(expectedToUseSession); + } + + @DataProvider(name = "getMaxReplicaSetSizeArgProvider") + public Object[][] getMaxReplicaSetSizeArgProvider() { + return new Object[][]{ + // system max replica count, system min replica count, user max replica count, user min replica, is reading from master operation + { 4, 3, 4, 3, false }, + { 4, 3, 4, 3, true }, + + { 4, 3, 3, 2, false }, + { 4, 3, 3, 2, true } + }; + } + + @Test(groups = "unit", dataProvider = "getMaxReplicaSetSizeArgProvider") + public void replicaSizes(int systemMaxReplicaCount, + int systemMinReplicaCount, + int userMaxReplicaCount, + int userMinReplicaCount, + boolean isReadingFromMasterOperation) { + AddressSelector addressSelector = Mockito.mock(AddressSelector.class); + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + TransportClient transportClient = Mockito.mock(TransportClient.class); + GatewayServiceConfiguratorReaderMock gatewayServiceConfigurationReaderWrapper = GatewayServiceConfiguratorReaderMock.from(ConsistencyLevel.STRONG, + systemMaxReplicaCount, + systemMinReplicaCount, + userMaxReplicaCount, + userMinReplicaCount); + IAuthorizationTokenProvider authorizationTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + ConsistencyReader consistencyReader = new ConsistencyReader(configs, + addressSelector, + sessionContainer, + transportClient, + gatewayServiceConfigurationReaderWrapper.gatewayServiceConfigurationReader, + authorizationTokenProvider); + + RxDocumentServiceRequest request; + if (isReadingFromMasterOperation) { + request = RxDocumentServiceRequest.createFromName( + OperationType.ReadFeed, "/dbs/db/colls/col", ResourceType.DocumentCollection); + } else { + request = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + } + + assertThat(consistencyReader.getMaxReplicaSetSize(request)).isEqualTo(isReadingFromMasterOperation? systemMaxReplicaCount : userMaxReplicaCount); + assertThat(consistencyReader.getMinReplicaSetSize(request)).isEqualTo(isReadingFromMasterOperation? systemMinReplicaCount : userMinReplicaCount); + } + + @Test(groups = "unit") + public void readAny() { + List secondaries = ImmutableList.of(URI.create("secondary1"), URI.create("secondary2"), URI.create("secondary3")); + URI primaryAddress = URI.create("primary"); + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.Simple.create() + .withPrimary(primaryAddress) + .withSecondary(secondaries) + .build(); + + StoreResponse primaryResponse = StoreResponseBuilder.create() + .withLSN(54) + .withLocalLSN(18) + .withRequestCharge(1.1) + .build(); + StoreResponse secondaryResponse1 = StoreResponseBuilder.create() + .withLSN(53) + .withLocalLSN(17) + .withRequestCharge(1.1) + .build(); + StoreResponse secondaryResponse2 = StoreResponseBuilder.create() + .withLSN(52) + .withLocalLSN(16) + .withRequestCharge(1.1) + .build(); + StoreResponse secondaryResponse3 = StoreResponseBuilder.create() + .withLSN(51) + .withLocalLSN(15) + .withRequestCharge(1.1) + .build(); + TransportClientWrapper transportClientWrapper = TransportClientWrapper.Builder.uriToResultBuilder() + .storeResponseOn(primaryAddress, OperationType.Read, ResourceType.Document, primaryResponse, true) + .storeResponseOn(secondaries.get(0), OperationType.Read, ResourceType.Document, secondaryResponse1, true) + .storeResponseOn(secondaries.get(1), OperationType.Read, ResourceType.Document, secondaryResponse2, true) + .storeResponseOn(secondaries.get(2), OperationType.Read, ResourceType.Document, secondaryResponse3, true) + .build(); + + GatewayServiceConfiguratorReaderMock gatewayServiceConfigurationReaderWrapper = GatewayServiceConfiguratorReaderMock.from(ConsistencyLevel.STRONG); + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + + IAuthorizationTokenProvider authorizationTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + ConsistencyReaderUnderTest consistencyReader = new ConsistencyReaderUnderTest(addressSelectorWrapper.addressSelector, + sessionContainer, + transportClientWrapper.transportClient, + gatewayServiceConfigurationReaderWrapper.gatewayServiceConfigurationReader, + authorizationTokenProvider); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + request.getHeaders().put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, ConsistencyLevel.EVENTUAL.toString()); + + TimeoutHelper timeout = Mockito.mock(TimeoutHelper.class); + boolean forceRefresh = false; + boolean isInRetry = false; + Mono storeResponseSingle = consistencyReader.readAsync(request, timeout, isInRetry, forceRefresh); + + StoreResponseValidator validator = StoreResponseValidator.create() + .withBELSNGreaterThanOrEqualTo(51) + .withRequestCharge(1.1) + .in(primaryResponse, secondaryResponse1, secondaryResponse2, secondaryResponse3) + .build(); + validateSuccess(storeResponseSingle, validator); + + Mockito.verifyZeroInteractions(consistencyReader.getSpyQuorumReader()); + + + Mockito.verify(consistencyReader.getSpyStoreReader(), Mockito.times(1)) + .readMultipleReplicaAsync(Mockito.any(RxDocumentServiceRequest.class), + Mockito.anyBoolean(), + Mockito.anyInt(), + Mockito.anyBoolean(), + Mockito.anyBoolean(), + Mockito.any(), + Mockito.anyBoolean(), + Mockito.anyBoolean()); + + transportClientWrapper.validate() + .verifyNumberOfInvocations(1); + + addressSelectorWrapper.validate() + .verifyTotalInvocations(1) + .verifyNumberOfForceCachRefresh(0) + .verifyVesolvePrimaryUriAsyncCount(0) + .verifyResolveAllUriAsync(1); + } + + @Test(groups = "unit") + public void readSessionConsistency_SomeReplicasLagBehindAndReturningResponseWithLowerLSN_FindAnotherReplica() { + long slowReplicaLSN = 651176; + String partitionKeyRangeId = "1"; + long fasterReplicaLSN = 651177; + + List secondaries = ImmutableList.of(URI.create("secondary1"), URI.create("secondary2"), URI.create("secondary3")); + URI primaryAddress = URI.create("primary"); + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.Simple.create() + .withPrimary(primaryAddress) + .withSecondary(secondaries) + .build(); + + StoreResponse primaryResponse = StoreResponseBuilder.create() + .withSessionToken(partitionKeyRangeId + ":-1#" + slowReplicaLSN) + .withLSN(slowReplicaLSN) + .withLocalLSN(slowReplicaLSN) + .withQuorumAckecdLsn(slowReplicaLSN) + .withQuorumAckecdLocalLsn(slowReplicaLSN) + .withGlobalCommittedLsn(-1) + .withItemLocalLSN(slowReplicaLSN) + .withRequestCharge(1.1) + .build(); + StoreResponse secondaryResponse1 = StoreResponseBuilder.create() + .withSessionToken(partitionKeyRangeId + ":-1#" + slowReplicaLSN) + .withLSN(slowReplicaLSN) + .withLocalLSN(slowReplicaLSN) + .withQuorumAckecdLsn(slowReplicaLSN) + .withQuorumAckecdLocalLsn(slowReplicaLSN) + .withGlobalCommittedLsn(-1) + .withItemLocalLSN(slowReplicaLSN) + .withRequestCharge(1.1) + .build(); + StoreResponse secondaryResponse2 = StoreResponseBuilder.create() + .withSessionToken(partitionKeyRangeId + ":-1#" + fasterReplicaLSN) + .withLSN(fasterReplicaLSN) + .withLocalLSN(fasterReplicaLSN) + .withQuorumAckecdLsn(fasterReplicaLSN) + .withQuorumAckecdLocalLsn(fasterReplicaLSN) + .withGlobalCommittedLsn(-1) + .withItemLocalLSN(fasterReplicaLSN) + .withRequestCharge(1.1) + .build(); + StoreResponse secondaryResponse3 = StoreResponseBuilder.create() + .withSessionToken(partitionKeyRangeId + ":-1#" + slowReplicaLSN) + .withLSN(slowReplicaLSN) + .withLocalLSN(slowReplicaLSN) + .withQuorumAckecdLsn(slowReplicaLSN) + .withQuorumAckecdLocalLsn(slowReplicaLSN) + .withGlobalCommittedLsn(-1) + .withItemLocalLSN(slowReplicaLSN) + .withRequestCharge(1.1) + .build(); + TransportClientWrapper transportClientWrapper = TransportClientWrapper.Builder.uriToResultBuilder() + .storeResponseOn(primaryAddress, OperationType.Read, ResourceType.Document, primaryResponse, true) + .storeResponseOn(secondaries.get(0), OperationType.Read, ResourceType.Document, secondaryResponse1, true) + .storeResponseOn(secondaries.get(1), OperationType.Read, ResourceType.Document, secondaryResponse2, true) + .storeResponseOn(secondaries.get(2), OperationType.Read, ResourceType.Document, secondaryResponse3, true) + .build(); + + GatewayServiceConfiguratorReaderMock gatewayServiceConfigurationReaderWrapper = GatewayServiceConfiguratorReaderMock.from(ConsistencyLevel.STRONG); + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + + IAuthorizationTokenProvider authorizationTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + ConsistencyReaderUnderTest consistencyReader = new ConsistencyReaderUnderTest(addressSelectorWrapper.addressSelector, + sessionContainer, + transportClientWrapper.transportClient, + gatewayServiceConfigurationReaderWrapper.gatewayServiceConfigurationReader, + authorizationTokenProvider); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + request.getHeaders().put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, ConsistencyLevel.SESSION.toString()); + request.requestContext = new DocumentServiceRequestContext(); + Utils.ValueHolder sessionToken = Utils.ValueHolder.initialize(null); + assertThat(VectorSessionToken.tryCreate("-1#" + fasterReplicaLSN , sessionToken)).isTrue(); + request.requestContext.timeoutHelper = Mockito.mock(TimeoutHelper.class); + request.requestContext.resolvedPartitionKeyRange = partitionKeyRangeWithId(partitionKeyRangeId); + request.requestContext.requestChargeTracker = new RequestChargeTracker(); + + Mockito.doReturn(sessionToken.v).when(sessionContainer).resolvePartitionLocalSessionToken(Mockito.eq(request), Mockito.anyString()); + + + TimeoutHelper timeout = Mockito.mock(TimeoutHelper.class); + boolean forceRefresh = false; + boolean isInRetry = false; + Mono storeResponseSingle = consistencyReader.readAsync(request, timeout, isInRetry, forceRefresh); + + StoreResponseValidator validator = StoreResponseValidator.create() + .withBELSN(fasterReplicaLSN) + .withRequestChargeGreaterThanOrEqualTo(1.1) + .in(primaryResponse, secondaryResponse1, secondaryResponse2, secondaryResponse3) + .build(); + validateSuccess(storeResponseSingle, validator); + + Mockito.verifyZeroInteractions(consistencyReader.getSpyQuorumReader()); + + + Mockito.verify(consistencyReader.getSpyStoreReader(), Mockito.times(1)) + .readMultipleReplicaAsync(Mockito.any(RxDocumentServiceRequest.class), + Mockito.anyBoolean(), + Mockito.anyInt(), + Mockito.anyBoolean(), + Mockito.anyBoolean(), + Mockito.any(), + Mockito.anyBoolean(), + Mockito.anyBoolean()); + + assertThat(transportClientWrapper.validate() + .getNumberOfInvocations()) + .isGreaterThanOrEqualTo(1) + .isLessThanOrEqualTo(4); + + addressSelectorWrapper.validate() + .verifyTotalInvocations(1) + .verifyNumberOfForceCachRefresh(0) + .verifyVesolvePrimaryUriAsyncCount(0) + .verifyResolveAllUriAsync(1); + } + + /** + * reading in session consistency, if the requested session token cannot be supported by some replicas + * tries others till we find a replica which can support the given session token + */ + @Test(groups = "unit") + public void sessionNotAvailableFromSomeReplicasThrowingNotFound_FindReplicaSatisfyingRequestedSession() { + long slowReplicaLSN = 651175; + long globalCommittedLsn = 651174; + + long fasterReplicaLSN = 651176; + String partitionKeyRangeId = "1"; + + NotFoundException foundException = new NotFoundException(); + foundException.responseHeaders().put(HttpConstants.HttpHeaders.SESSION_TOKEN, partitionKeyRangeId + ":-1#" + slowReplicaLSN); + foundException.responseHeaders().put(WFConstants.BackendHeaders.LSN, Long.toString(slowReplicaLSN)); + foundException.responseHeaders().put(WFConstants.BackendHeaders.LOCAL_LSN, Long.toString(slowReplicaLSN)); + foundException.responseHeaders().put(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN, Long.toString(globalCommittedLsn)); + + StoreResponse storeResponse = StoreResponseBuilder.create() + .withSessionToken(partitionKeyRangeId + ":-1#" + fasterReplicaLSN) + .withLSN(fasterReplicaLSN) + .withLocalLSN(fasterReplicaLSN) + .withQuorumAckecdLsn(fasterReplicaLSN) + .withQuorumAckecdLocalLsn(fasterReplicaLSN) + .withGlobalCommittedLsn(-1) + .withItemLocalLSN(fasterReplicaLSN) + .withRequestCharge(1.1) + .build(); + + TransportClientWrapper transportClientWrapper = new TransportClientWrapper.Builder.ReplicaResponseBuilder + .SequentialBuilder() + .then(foundException) // 1st replica read returns not found + .then(foundException) // 2nd replica read returns not found + .then(foundException) // 3rd replica read returns not found + .then(storeResponse) // 4th replica read returns storeResponse satisfying requested session token + .build(); + + URI primaryUri = URI.create("primary"); + URI secondaryUri1 = URI.create("secondary1"); + URI secondaryUri2 = URI.create("secondary2"); + URI secondaryUri3 = URI.create("secondary3"); + + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.Simple.create() + .withPrimary(primaryUri) + .withSecondary(ImmutableList.of(secondaryUri1, secondaryUri2, secondaryUri3)) + .build(); + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + + Configs configs = new Configs(); + GatewayServiceConfiguratorReaderMock gatewayServiceConfigurationReaderWrapper = GatewayServiceConfiguratorReaderMock.from(ConsistencyLevel.STRONG, + 4, + 3, + 4, + 3); + + IAuthorizationTokenProvider authTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + ConsistencyReader consistencyReader = new ConsistencyReader(configs, + addressSelectorWrapper.addressSelector, + sessionContainer, + transportClientWrapper.transportClient, + gatewayServiceConfigurationReaderWrapper.gatewayServiceConfigurationReader, + authTokenProvider); + + + TimeoutHelper timeoutHelper = Mockito.mock(TimeoutHelper.class); + RxDocumentServiceRequest dsr = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + dsr.getHeaders().put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, ConsistencyLevel.SESSION.toString()); + dsr.requestContext = new DocumentServiceRequestContext(); + Utils.ValueHolder sessionToken = Utils.ValueHolder.initialize(null); + assertThat(VectorSessionToken.tryCreate("-1#" + fasterReplicaLSN , sessionToken)).isTrue(); + dsr.requestContext.timeoutHelper = timeoutHelper; + dsr.requestContext.resolvedPartitionKeyRange = partitionKeyRangeWithId(partitionKeyRangeId); + dsr.requestContext.requestChargeTracker = new RequestChargeTracker(); + + Mockito.doReturn(sessionToken.v).when(sessionContainer).resolvePartitionLocalSessionToken(Mockito.eq(dsr), Mockito.anyString()); + + Mono storeResponseSingle = consistencyReader.readAsync(dsr, timeoutHelper, false, false); + + StoreResponseValidator validator = StoreResponseValidator.create().isSameAs(storeResponse).isSameAs(storeResponse).build(); + validateSuccess(storeResponseSingle, validator); + } + + /** + * Reading with session consistency, replicas have session token with higher than requested and return not found + */ + @Test(groups = "unit") + public void sessionRead_LegitimateNotFound() { + long lsn = 651175; + long globalCommittedLsn = 651174; + String partitionKeyRangeId = "73"; + + NotFoundException foundException = new NotFoundException(); + foundException.responseHeaders().put(HttpConstants.HttpHeaders.SESSION_TOKEN, partitionKeyRangeId + ":-1#" + lsn); + foundException.responseHeaders().put(WFConstants.BackendHeaders.LSN, Long.toString(lsn)); + foundException.responseHeaders().put(WFConstants.BackendHeaders.LOCAL_LSN, Long.toString(lsn)); + foundException.responseHeaders().put(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN, Long.toString(globalCommittedLsn)); + + TransportClientWrapper transportClientWrapper = new TransportClientWrapper.Builder.ReplicaResponseBuilder + .SequentialBuilder() + .then(foundException) // 1st replica read returns not found lsn(response) >= lsn(request) + .then(foundException) // 2nd replica read returns not found lsn(response) >= lsn(request) + .then(foundException) // 3rd replica read returns not found lsn(response) >= lsn(request) + .then(foundException) // 4th replica read returns not found lsn(response) >= lsn(request) + .build(); + + URI primaryUri = URI.create("primary"); + URI secondaryUri1 = URI.create("secondary1"); + URI secondaryUri2 = URI.create("secondary2"); + URI secondaryUri3 = URI.create("secondary3"); + + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.Simple.create() + .withPrimary(primaryUri) + .withSecondary(ImmutableList.of(secondaryUri1, secondaryUri2, secondaryUri3)) + .build(); + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + TimeoutHelper timeoutHelper = Mockito.mock(TimeoutHelper.class); + RxDocumentServiceRequest dsr = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + dsr.getHeaders().put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, ConsistencyLevel.SESSION.toString()); + dsr.requestContext = new DocumentServiceRequestContext(); + Utils.ValueHolder sessionToken = Utils.ValueHolder.initialize(null); + assertThat(VectorSessionToken.tryCreate("-1#" + lsn , sessionToken)).isTrue(); + dsr.requestContext.timeoutHelper = timeoutHelper; + dsr.requestContext.resolvedPartitionKeyRange = partitionKeyRangeWithId(partitionKeyRangeId); + dsr.requestContext.requestChargeTracker = new RequestChargeTracker(); + + Mockito.doReturn(sessionToken.v).when(sessionContainer).resolvePartitionLocalSessionToken(Mockito.eq(dsr), Mockito.anyString()); + + Configs configs = new Configs(); + GatewayServiceConfiguratorReaderMock gatewayServiceConfigurationReaderWrapper = GatewayServiceConfiguratorReaderMock.from(ConsistencyLevel.STRONG, + 4, + 3, + 4, + 3); + + IAuthorizationTokenProvider authTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + ConsistencyReader consistencyReader = new ConsistencyReader(configs, + addressSelectorWrapper.addressSelector, + sessionContainer, + transportClientWrapper.transportClient, + gatewayServiceConfigurationReaderWrapper.gatewayServiceConfigurationReader, + authTokenProvider); + + Mono storeResponseSingle = consistencyReader.readAsync(dsr, timeoutHelper, false, false); + + FailureValidator failureValidator = FailureValidator.builder().resourceNotFound().instanceOf(NotFoundException.class).unknownSubStatusCode().build(); + validateException(storeResponseSingle, failureValidator); + } + + /** + * reading in session consistency, no replica support requested lsn + */ + @Test(groups = "unit") + public void sessionRead_ReplicasDoNotHaveTheRequestedLSN() { + long lsn = 651175; + long globalCommittedLsn = 651174; + String partitionKeyRangeId = "73"; + NotFoundException foundException = new NotFoundException(); + foundException.responseHeaders().put(HttpConstants.HttpHeaders.SESSION_TOKEN, partitionKeyRangeId + ":-1#" + lsn); + foundException.responseHeaders().put(WFConstants.BackendHeaders.LSN, Long.toString(651175)); + foundException.responseHeaders().put(WFConstants.BackendHeaders.LOCAL_LSN, Long.toString(651175)); + foundException.responseHeaders().put(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN, Long.toString(globalCommittedLsn)); + + TransportClientWrapper transportClientWrapper = new TransportClientWrapper.Builder.ReplicaResponseBuilder + .SequentialBuilder() + .then(foundException) // 1st replica read lsn lags behind the request lsn + .then(foundException) // 2nd replica read lsn lags behind the request lsn + .then(foundException) // 3rd replica read lsn lags behind the request lsn + .then(foundException) // 4th replica read lsn lags behind the request lsn + .build(); + + URI primaryUri = URI.create("primary"); + URI secondaryUri1 = URI.create("secondary1"); + URI secondaryUri2 = URI.create("secondary2"); + URI secondaryUri3 = URI.create("secondary3"); + + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.Simple.create() + .withPrimary(primaryUri) + .withSecondary(ImmutableList.of(secondaryUri1, secondaryUri2, secondaryUri3)) + .build(); + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + TimeoutHelper timeoutHelper = Mockito.mock(TimeoutHelper.class); + RxDocumentServiceRequest dsr = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + dsr.getHeaders().put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, ConsistencyLevel.SESSION.toString()); + dsr.requestContext = new DocumentServiceRequestContext(); + Utils.ValueHolder sessionToken = Utils.ValueHolder.initialize(null); + assertThat(VectorSessionToken.tryCreate("-1#" + (lsn + 1) , sessionToken)).isTrue(); + dsr.requestContext.timeoutHelper = timeoutHelper; + dsr.requestContext.resolvedPartitionKeyRange = partitionKeyRangeWithId(partitionKeyRangeId); + dsr.requestContext.requestChargeTracker = new RequestChargeTracker(); + + Mockito.doReturn(sessionToken.v).when(sessionContainer).resolvePartitionLocalSessionToken(Mockito.eq(dsr), Mockito.anyString()); + + Configs configs = new Configs(); + GatewayServiceConfiguratorReaderMock gatewayServiceConfigurationReaderWrapper = GatewayServiceConfiguratorReaderMock.from(ConsistencyLevel.STRONG, + 4, + 3, + 4, + 3); + + IAuthorizationTokenProvider authTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + ConsistencyReader consistencyReader = new ConsistencyReader(configs, + addressSelectorWrapper.addressSelector, + sessionContainer, + transportClientWrapper.transportClient, + gatewayServiceConfigurationReaderWrapper.gatewayServiceConfigurationReader, + authTokenProvider); + + Mono storeResponseSingle = consistencyReader.readAsync(dsr, timeoutHelper, false, false); + + FailureValidator failureValidator = FailureValidator.builder().resourceNotFound().instanceOf(NotFoundException.class).subStatusCode(HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE).build(); + validateException(storeResponseSingle, failureValidator); + } + + @Test(groups = "unit") + public void requestRateTooLarge_BubbleUp() { + long lsn = 651175; + long globalCommittedLsn = 651174; + String partitionKeyRangeId = "73"; + + RequestRateTooLargeException requestTooLargeException = new RequestRateTooLargeException(); + requestTooLargeException.responseHeaders().put(HttpConstants.HttpHeaders.SESSION_TOKEN, partitionKeyRangeId + ":-1#" + lsn); + requestTooLargeException.responseHeaders().put(WFConstants.BackendHeaders.LSN, Long.toString(651175)); + requestTooLargeException.responseHeaders().put(WFConstants.BackendHeaders.LOCAL_LSN, Long.toString(651175)); + requestTooLargeException.responseHeaders().put(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN, Long.toString(globalCommittedLsn)); + + TransportClientWrapper transportClientWrapper = new TransportClientWrapper.Builder.ReplicaResponseBuilder + .SequentialBuilder() + .then(requestTooLargeException) // 1st replica read result in throttling + .then(requestTooLargeException) // 2nd replica read result in throttling + .then(requestTooLargeException) // 3rd replica read result in throttling + .then(requestTooLargeException) // 4th replica read result in throttling + .build(); + + URI primaryUri = URI.create("primary"); + URI secondaryUri1 = URI.create("secondary1"); + URI secondaryUri2 = URI.create("secondary2"); + URI secondaryUri3 = URI.create("secondary3"); + + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.Simple.create() + .withPrimary(primaryUri) + .withSecondary(ImmutableList.of(secondaryUri1, secondaryUri2, secondaryUri3)) + .build(); + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + TimeoutHelper timeoutHelper = Mockito.mock(TimeoutHelper.class); + RxDocumentServiceRequest dsr = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + dsr.getHeaders().put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, ConsistencyLevel.SESSION.toString()); + dsr.requestContext = new DocumentServiceRequestContext(); + Utils.ValueHolder sessionToken = Utils.ValueHolder.initialize(null); + assertThat(VectorSessionToken.tryCreate("-1#" + lsn , sessionToken)).isTrue(); + dsr.requestContext.timeoutHelper = timeoutHelper; + dsr.requestContext.resolvedPartitionKeyRange = partitionKeyRangeWithId(partitionKeyRangeId); + dsr.requestContext.requestChargeTracker = new RequestChargeTracker(); + + Mockito.doReturn(sessionToken.v).when(sessionContainer).resolvePartitionLocalSessionToken(Mockito.eq(dsr), Mockito.anyString()); + + Configs configs = new Configs(); + GatewayServiceConfiguratorReaderMock gatewayServiceConfigurationReaderWrapper = GatewayServiceConfiguratorReaderMock.from(ConsistencyLevel.STRONG, + 4, + 3, + 4, + 3); + + IAuthorizationTokenProvider authTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + ConsistencyReader consistencyReader = new ConsistencyReader(configs, + addressSelectorWrapper.addressSelector, + sessionContainer, + transportClientWrapper.transportClient, + gatewayServiceConfigurationReaderWrapper.gatewayServiceConfigurationReader, + authTokenProvider); + + Mono storeResponseSingle = consistencyReader.readAsync(dsr, timeoutHelper, false, false); + + + FailureValidator failureValidator = FailureValidator.builder().instanceOf(RequestRateTooLargeException.class).unknownSubStatusCode().build(); + validateException(storeResponseSingle, failureValidator); + } + + @DataProvider(name = "simpleReadStrongArgProvider") + public Object[][] simpleReadStrongArgProvider() { + return new Object[][]{ + { 1, ReadMode.Strong }, + { 2, ReadMode.Strong }, + { 3, ReadMode.Strong }, + }; + } + + @Test(groups = "unit", dataProvider = "simpleReadStrongArgProvider") + public void basicReadStrong_AllReplicasSameLSN(int replicaCountToRead, ReadMode readMode) { + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + URI primaryReplicaURI = URI.create("primary"); + ImmutableList secondaryReplicaURIs = ImmutableList.of(URI.create("secondary1"), URI.create("secondary2"), URI.create("secondary3")); + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.Simple.create() + .withPrimary(primaryReplicaURI) + .withSecondary(secondaryReplicaURIs) + .build(); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + + request.requestContext = new DocumentServiceRequestContext(); + request.requestContext.timeoutHelper = Mockito.mock(TimeoutHelper.class); + request.requestContext.resolvedPartitionKeyRange = Mockito.mock(PartitionKeyRange.class); + request.requestContext.requestChargeTracker = new RequestChargeTracker(); + + BigDecimal requestChargePerRead = new BigDecimal(1.1); + + StoreResponse primaryResponse = StoreResponseBuilder.create() + .withLSN(51) + .withLocalLSN(18) + .withRequestCharge(requestChargePerRead.doubleValue()) + .build(); + StoreResponse secondaryResponse1 = StoreResponseBuilder.create() + .withLSN(51) + .withLocalLSN(18) + .withRequestCharge(requestChargePerRead.doubleValue()) + .build(); + StoreResponse secondaryResponse2 = StoreResponseBuilder.create() + .withLSN(51) + .withLocalLSN(18) + .withRequestCharge(requestChargePerRead.doubleValue()) + .build(); + StoreResponse secondaryResponse3 = StoreResponseBuilder.create() + .withLSN(51) + .withLocalLSN(18) + .withRequestCharge(requestChargePerRead.doubleValue()) + .build(); + + TransportClientWrapper transportClientWrapper = TransportClientWrapper.Builder.uriToResultBuilder() + .storeResponseOn(primaryReplicaURI, OperationType.Read, ResourceType.Document, primaryResponse, false) + .storeResponseOn(secondaryReplicaURIs.get(0), OperationType.Read, ResourceType.Document, secondaryResponse1, false) + .storeResponseOn(secondaryReplicaURIs.get(1), OperationType.Read, ResourceType.Document, secondaryResponse2, false) + .storeResponseOn(secondaryReplicaURIs.get(2), OperationType.Read, ResourceType.Document, secondaryResponse3, false) + .build(); + + StoreReader storeReader = new StoreReader(transportClientWrapper.transportClient, addressSelectorWrapper.addressSelector, sessionContainer); + GatewayServiceConfigurationReader serviceConfigurator = Mockito.mock(GatewayServiceConfigurationReader.class); + IAuthorizationTokenProvider authTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + QuorumReader quorumReader = new QuorumReader(configs, transportClientWrapper.transportClient, addressSelectorWrapper.addressSelector, storeReader, serviceConfigurator, authTokenProvider); + + Mono storeResponseSingle = quorumReader.readStrongAsync(request, replicaCountToRead, readMode); + + StoreResponseValidator validator = StoreResponseValidator.create() + .withBELSN(51) + .withRequestCharge(requestChargePerRead.multiply(BigDecimal.valueOf(replicaCountToRead)).setScale(2, RoundingMode.FLOOR).doubleValue()) + .build(); + validateSuccess(storeResponseSingle, validator); + + transportClientWrapper.validate() + .verifyNumberOfInvocations(replicaCountToRead); + addressSelectorWrapper.validate() + .verifyNumberOfForceCachRefresh(0) + .verifyVesolvePrimaryUriAsyncCount(0) + .verifyTotalInvocations(1); + } + + // TODO: add more mocking tests for when one replica lags behind and we need to do barrier request. + + public static void validateSuccess(Mono> single, + MultiStoreResultValidator validator) { + validateSuccess(single, validator, 10000); + } + + public static void validateSuccess(Mono> single, + MultiStoreResultValidator validator, + long timeout) { + TestSubscriber> testSubscriber = new TestSubscriber<>(); + + single.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + testSubscriber.assertValueCount(1); + validator.validate(testSubscriber.values().get(0)); + } + + public static void validateSuccess(Mono single, + StoreResponseValidator validator) { + validateSuccess(single, validator, 10000); + } + + public static void validateSuccess(Mono single, + StoreResponseValidator validator, + long timeout) { + TestSubscriber testSubscriber = new TestSubscriber<>(); + + single.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + testSubscriber.assertValueCount(1); + validator.validate(testSubscriber.values().get(0)); + } + + + public static void validateException(Mono single, + FailureValidator validator, + long timeout) { + TestSubscriber testSubscriber = new TestSubscriber<>(); + + single.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNotComplete(); + testSubscriber.assertTerminated(); + assertThat(testSubscriber.errorCount()).isEqualTo(1); + validator.validate(testSubscriber.errors().get(0)); + } + + public static void validateException(Mono single, + FailureValidator validator) { + validateException(single, validator, TIMEOUT); + } + + private PartitionKeyRange partitionKeyRangeWithId(String id) { + PartitionKeyRange partitionKeyRange = Mockito.mock(PartitionKeyRange.class); + Mockito.doReturn(id).when(partitionKeyRange).id(); + return partitionKeyRange; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ConsistencyReaderUnderTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ConsistencyReaderUnderTest.java new file mode 100644 index 0000000000000..3087a4e2cca0e --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ConsistencyReaderUnderTest.java @@ -0,0 +1,86 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.ISessionContainer; +import com.azure.data.cosmos.internal.Configs; +import com.azure.data.cosmos.internal.IAuthorizationTokenProvider; +import org.mockito.Mockito; + +public class ConsistencyReaderUnderTest extends ConsistencyReader { + private QuorumReader origQuorumReader; + private QuorumReader spyQuorumReader; + + private StoreReaderUnderTest origStoreReader; + private StoreReaderUnderTest spyStoreReader; + + public ConsistencyReaderUnderTest(AddressSelector addressSelector, + ISessionContainer sessionContainer, + TransportClient transportClient, + GatewayServiceConfigurationReader serviceConfigReader, + IAuthorizationTokenProvider authorizationTokenProvider) { + super(new Configs(), addressSelector, sessionContainer, transportClient, serviceConfigReader, authorizationTokenProvider); + + } + + public QuorumReader getOrigQuorumReader() { + return origQuorumReader; + } + + public QuorumReader getSpyQuorumReader() { + return spyQuorumReader; + } + + public StoreReaderUnderTest getOrigStoreReader() { + return origStoreReader; + } + + public StoreReaderUnderTest getSpyStoreReader() { + return spyStoreReader; + } + + @Override + public QuorumReader createQuorumReader(TransportClient transportClient, + AddressSelector addressSelector, + StoreReader storeReader, + GatewayServiceConfigurationReader serviceConfigurationReader, + IAuthorizationTokenProvider authorizationTokenProvider) { + this.origQuorumReader = super.createQuorumReader(transportClient, + addressSelector, + storeReader, + serviceConfigurationReader, + authorizationTokenProvider); + this.spyQuorumReader = Mockito.spy(this.origQuorumReader); + return this.spyQuorumReader; + } + + @Override + public StoreReader createStoreReader(TransportClient transportClient, + AddressSelector addressSelector, + ISessionContainer sessionContainer) { + this.origStoreReader = new StoreReaderUnderTest(transportClient, addressSelector, sessionContainer); + this.spyStoreReader = Mockito.spy(this.origStoreReader); + return this.spyStoreReader; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ConsistencyWriterTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ConsistencyWriterTest.java new file mode 100644 index 0000000000000..46148fd7214c4 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ConsistencyWriterTest.java @@ -0,0 +1,287 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.internal.ISessionContainer; +import com.azure.data.cosmos.PartitionKeyRangeGoneException; +import com.azure.data.cosmos.RequestTimeoutException; +import com.azure.data.cosmos.internal.*; +import com.azure.data.cosmos.PartitionIsMigratingException; +import com.azure.data.cosmos.PartitionKeyRangeIsSplittingException; +import com.google.common.collect.ImmutableList; +import io.reactivex.subscribers.TestSubscriber; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; +import reactor.core.publisher.DirectProcessor; +import reactor.core.publisher.Mono; + +import java.net.URI; +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; + +import static com.azure.data.cosmos.internal.HttpConstants.StatusCodes.GONE; +import static com.azure.data.cosmos.internal.HttpConstants.SubStatusCodes.COMPLETING_PARTITION_MIGRATION; +import static com.azure.data.cosmos.internal.HttpConstants.SubStatusCodes.COMPLETING_SPLIT; +import static com.azure.data.cosmos.internal.HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE; +import static org.assertj.core.api.Assertions.assertThat; + +public class ConsistencyWriterTest { + + private AddressSelector addressSelector; + private ISessionContainer sessionContainer; + private TransportClient transportClient; + private GatewayServiceConfigurationReader serviceConfigReader; + private ConsistencyWriter consistencyWriter; + + @DataProvider(name = "exceptionArgProvider") + public Object[][] exceptionArgProvider() { + return new Object[][]{ + // exception to be thrown from transportClient, expected (exception type, status, subStatus) + { new PartitionKeyRangeGoneException(), PartitionKeyRangeGoneException.class, GONE, PARTITION_KEY_RANGE_GONE, }, + { new PartitionKeyRangeIsSplittingException() , PartitionKeyRangeIsSplittingException.class, GONE, COMPLETING_SPLIT, }, + { new PartitionIsMigratingException(), PartitionIsMigratingException.class, GONE, COMPLETING_PARTITION_MIGRATION, }, + }; + } + + @Test(groups = "unit", dataProvider = "exceptionArgProvider") + public void exception(Exception ex, Class klass, int expectedStatusCode, Integer expectedSubStatusCode) { + TransportClientWrapper transportClientWrapper = new TransportClientWrapper.Builder.ReplicaResponseBuilder + .SequentialBuilder() + .then(ex) + .build(); + + URI primaryUri = URI.create("primary"); + URI secondaryUri1 = URI.create("secondary1"); + URI secondaryUri2 = URI.create("secondary2"); + URI secondaryUri3 = URI.create("secondary3"); + + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.Simple.create() + .withPrimary(primaryUri) + .withSecondary(ImmutableList.of(secondaryUri1, secondaryUri2, secondaryUri3)) + .build(); + sessionContainer = Mockito.mock(ISessionContainer.class); + IAuthorizationTokenProvider authorizationTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + serviceConfigReader = Mockito.mock(GatewayServiceConfigurationReader.class); + + consistencyWriter = new ConsistencyWriter( + addressSelectorWrapper.addressSelector, + sessionContainer, + transportClientWrapper.transportClient, + authorizationTokenProvider, + serviceConfigReader, + false); + + TimeoutHelper timeoutHelper = Mockito.mock(TimeoutHelper.class); + RxDocumentServiceRequest dsr = Mockito.mock(RxDocumentServiceRequest.class); + dsr.requestContext = Mockito.mock(DocumentServiceRequestContext.class); + + Mono res = consistencyWriter.writeAsync(dsr, timeoutHelper, false); + + FailureValidator failureValidator = FailureValidator.builder() + .instanceOf(klass) + .statusCode(expectedStatusCode) + .subStatusCode(expectedSubStatusCode) + .build(); + + TestSubscriber subscriber = new TestSubscriber<>(); + res.subscribe(subscriber); + subscriber.awaitTerminalEvent(); + subscriber.assertNotComplete(); + assertThat(subscriber.errorCount()).isEqualTo(1); + failureValidator.validate(subscriber.errors().get(0)); + } + + @Test(groups = "unit") + public void startBackgroundAddressRefresh() throws Exception { + initializeConsistencyWriter(false); + + CyclicBarrier b = new CyclicBarrier(2); + DirectProcessor directProcessor = DirectProcessor.create(); + CountDownLatch c = new CountDownLatch(1); + + URI uri = URI.create("https://localhost:5050"); + + List invocationOnMocks = Collections.synchronizedList(new ArrayList<>()); + Mockito.doAnswer(invocationOnMock -> { + invocationOnMocks.add(invocationOnMock); + return directProcessor.single().doOnSuccess(x -> c.countDown()).doAfterTerminate(() -> new Thread() { + @Override + public void run() { + try { + b.await(); + } catch (Exception e) { + e.printStackTrace(); + } + } + }.start()); + }).when(addressSelector).resolvePrimaryUriAsync(Mockito.any(RxDocumentServiceRequest.class), Mockito.anyBoolean()); + RxDocumentServiceRequest request = Mockito.mock(RxDocumentServiceRequest.class); + consistencyWriter.startBackgroundAddressRefresh(request); + + directProcessor.onNext(uri); + directProcessor.onComplete(); + + TimeUnit.MILLISECONDS.sleep(1000); + assertThat(c.getCount()).isEqualTo(0); + assertThat(b.getNumberWaiting()).isEqualTo(1); + b.await(1000, TimeUnit.MILLISECONDS); + assertThat(invocationOnMocks).hasSize(1); + assertThat(invocationOnMocks.get(0).getArgumentAt(1, Boolean.class)).isTrue(); + } + + @Test(groups = "unit") + public void getLsnAndGlobalCommittedLsn() { + ImmutableList.Builder> builder = new ImmutableList.Builder<>(); + builder.add(new AbstractMap.SimpleEntry<>(WFConstants.BackendHeaders.LSN, "3")); + builder.add(new AbstractMap.SimpleEntry<>(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN, "2")); + ImmutableList> headers = builder.build(); + + StoreResponse sr = new StoreResponse(0, headers, (String) null); + Utils.ValueHolder lsn = Utils.ValueHolder.initialize(-2l); + Utils.ValueHolder globalCommittedLsn = Utils.ValueHolder.initialize(-2l); + ConsistencyWriter.getLsnAndGlobalCommittedLsn(sr, lsn, globalCommittedLsn); + assertThat(lsn.v).isEqualTo(3); + assertThat(globalCommittedLsn.v).isEqualTo(2); + } + + + @Test(groups = "unit") + public void timeout1() throws Exception { + initializeConsistencyWriter(false); + TimeoutHelper timeoutHelper = Mockito.mock(TimeoutHelper.class); + Mockito.doReturn(true).when(timeoutHelper).isElapsed(); + ConsistencyWriter spyConsistencyWriter = Mockito.spy(this.consistencyWriter); + TestSubscriber subscriber = new TestSubscriber(); + + spyConsistencyWriter.writeAsync(Mockito.mock(RxDocumentServiceRequest.class), timeoutHelper, false) + .subscribe(subscriber); + + subscriber.awaitTerminalEvent(10, TimeUnit.MILLISECONDS); + subscriber.assertNoValues(); + + subscriber.assertError(RequestTimeoutException.class); + } + + @Test(groups = "unit") + public void timeout2() throws Exception { + initializeConsistencyWriter(false); + TimeoutHelper timeoutHelper = Mockito.mock(TimeoutHelper.class); + Mockito.doReturn(false).doReturn(true).when(timeoutHelper).isElapsed(); + ConsistencyWriter spyConsistencyWriter = Mockito.spy(this.consistencyWriter); + TestSubscriber subscriber = new TestSubscriber(); + + spyConsistencyWriter.writeAsync(Mockito.mock(RxDocumentServiceRequest.class), timeoutHelper, false) + .subscribe(subscriber); + + subscriber.awaitTerminalEvent(10, TimeUnit.MILLISECONDS); + subscriber.assertError(RequestTimeoutException.class); + } + + @DataProvider(name = "globalStrongArgProvider") + public Object[][] globalStrongArgProvider() { + return new Object[][]{ + { + ConsistencyLevel.SESSION, + Mockito.mock(RxDocumentServiceRequest.class), + Mockito.mock(StoreResponse.class), + + false, + }, + { + ConsistencyLevel.EVENTUAL, + Mockito.mock(RxDocumentServiceRequest.class), + Mockito.mock(StoreResponse.class), + + false, + }, + { + + ConsistencyLevel.EVENTUAL, + Mockito.mock(RxDocumentServiceRequest.class), + StoreResponseBuilder.create() + .withHeader(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS, Integer.toString(5)) + .build(), + false, + }, + { + + ConsistencyLevel.STRONG, + Mockito.mock(RxDocumentServiceRequest.class), + StoreResponseBuilder.create() + .withHeader(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS, Integer.toString(5)) + .build(), + true, + }, + { + + ConsistencyLevel.STRONG, + Mockito.mock(RxDocumentServiceRequest.class), + StoreResponseBuilder.create() + .withHeader(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS, Integer.toString(0)) + .build(), + false, + } + }; + } + + @Test(groups = "unit", dataProvider = "globalStrongArgProvider") + public void isGlobalStrongRequest(ConsistencyLevel defaultConsistencyLevel, RxDocumentServiceRequest req, StoreResponse storeResponse, boolean isGlobalStrongExpected) { + initializeConsistencyWriter(false); + Mockito.doReturn(defaultConsistencyLevel).when(this.serviceConfigReader).getDefaultConsistencyLevel(); + + + assertThat(consistencyWriter.isGlobalStrongRequest(req, storeResponse)).isEqualTo(isGlobalStrongExpected); + } + + private void initializeConsistencyWriter(boolean useMultipleWriteLocation) { + addressSelector = Mockito.mock(AddressSelector.class); + sessionContainer = Mockito.mock(ISessionContainer.class); + transportClient = Mockito.mock(TransportClient.class); + IAuthorizationTokenProvider authorizationTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + serviceConfigReader = Mockito.mock(GatewayServiceConfigurationReader.class); + + consistencyWriter = new ConsistencyWriter( + addressSelector, + sessionContainer, + transportClient, + authorizationTokenProvider, + serviceConfigReader, + useMultipleWriteLocation); + } + + // TODO: add more mocking unit tests for Global STRONG (mocking unit tests) + // TODO: add more tests for SESSION behaviour (mocking unit tests) + // TODO: add more tests for error handling behaviour (mocking unit tests) + // TODO: add tests for replica catch up (request barrier while loop) (mocking unit tests) + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/320977 +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/DCDocumentCrudTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/DCDocumentCrudTest.java new file mode 100644 index 0000000000000..0ae3b193740dd --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/DCDocumentCrudTest.java @@ -0,0 +1,340 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.AsyncDocumentClient.Builder; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.internal.RequestOptions; +import com.azure.data.cosmos.internal.ResourceResponse; +import com.azure.data.cosmos.internal.StoredProcedure; +import com.azure.data.cosmos.internal.StoredProcedureResponse; +import com.azure.data.cosmos.internal.Configs; +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.SpyClientUnderTestFactory; +import com.azure.data.cosmos.internal.TestSuiteBase; +import com.azure.data.cosmos.internal.DocumentServiceRequestValidator; +import com.azure.data.cosmos.internal.FeedResponseListValidator; +import com.azure.data.cosmos.internal.ResourceResponseValidator; +import com.azure.data.cosmos.internal.TestConfigurations; +import org.mockito.stubbing.Answer; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.spy; + +/** + * The purpose of the tests in this class is to ensure the request are routed through direct connectivity stack. + * The tests in other test classes validate the actual behaviour and different scenarios. + */ +public class DCDocumentCrudTest extends TestSuiteBase { + + private final static int QUERY_TIMEOUT = 40000; + private final static String PARTITION_KEY_FIELD_NAME = "mypk"; + + private static Database createdDatabase; + private static DocumentCollection createdCollection; + + private SpyClientUnderTestFactory.ClientWithGatewaySpy client; + + @DataProvider + public static Object[][] directClientBuilder() { + return new Object[][] { { createDCBuilder(Protocol.HTTPS) }, { createDCBuilder(Protocol.TCP) } }; + } + + static Builder createDCBuilder(Protocol protocol) { + + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + + Configs configs = spy(new Configs()); + doAnswer((Answer) invocation -> protocol).when(configs).getProtocol(); + + return new Builder() + .withServiceEndpoint(TestConfigurations.HOST) + .withConfigs(configs) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY); + } + + @Factory(dataProvider = "directClientBuilder") + public DCDocumentCrudTest(Builder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "direct" }, timeOut = TIMEOUT) + public void executeStoredProc() { + StoredProcedure storedProcedure = new StoredProcedure(); + storedProcedure.id(UUID.randomUUID().toString()); + storedProcedure.setBody("function() {var x = 10;}"); + + Flux> createObservable = client + .createStoredProcedure(getCollectionLink(), storedProcedure, null); + + ResourceResponseValidator validator = new ResourceResponseValidator.Builder() + .withId(storedProcedure.id()) + .build(); + + validateSuccess(createObservable, validator, TIMEOUT); + + // creating a stored proc will go through gateway so clearing captured requests + + client.getCapturedRequests().clear(); + + // execute the created storedProc and ensure it goes through direct connectivity stack + String storedProcLink = "dbs/" + createdDatabase.id() + "/colls/" + createdCollection.id() + "/sprocs/" + storedProcedure.id(); + + RequestOptions options = new RequestOptions(); + options.setPartitionKey(new PartitionKey("dummy")); + StoredProcedureResponse storedProcedureResponse = client + .executeStoredProcedure(storedProcLink, options, null).single().block(); + + assertThat(storedProcedureResponse.getStatusCode()).isEqualTo(200); + + // validate the request routed through direct stack + validateNoStoredProcExecutionOperationThroughGateway(); + } + + /** + * Tests document creation through direct mode + */ + @Test(groups = { "direct" }, timeOut = TIMEOUT) + public void create() { + final Document docDefinition = getDocumentDefinition(); + + Flux> createObservable = client.createDocument( + this.getCollectionLink(), docDefinition, null, false); + + ResourceResponseValidator validator = new ResourceResponseValidator.Builder() + .withId(docDefinition.id()) + .build(); + + validateSuccess(createObservable, validator, TIMEOUT); + validateNoDocumentOperationThroughGateway(); + } + + /** + * Tests document read through direct https. + * @throws Exception + */ + @Test(groups = { "direct" }, timeOut = TIMEOUT) + public void read() throws Exception { + Document docDefinition = this.getDocumentDefinition(); + Document document = client.createDocument(getCollectionLink(), docDefinition, null, false).single().block().getResource(); + + // give times to replicas to catch up after a write + waitIfNeededForReplicasToCatchUp(clientBuilder()); + + String pkValue = document.getString(PARTITION_KEY_FIELD_NAME); + + RequestOptions options = new RequestOptions(); + options.setPartitionKey(new PartitionKey(pkValue)); + + String docLink = + String.format("dbs/%s/colls/%s/docs/%s", createdDatabase.id(), createdCollection.id(), document.id()); + + ResourceResponseValidator validator = new ResourceResponseValidator.Builder() + .withId(docDefinition.id()) + .build(); + + validateSuccess(client.readDocument(docLink, options), validator, TIMEOUT); + + validateNoDocumentOperationThroughGateway(); + } + + /** + * Tests document upsert through direct https. + * @throws Exception + */ + @Test(groups = { "direct" }, timeOut = TIMEOUT) + public void upsert() throws Exception { + + final Document docDefinition = getDocumentDefinition(); + + final Document document = client.createDocument(getCollectionLink(), docDefinition, null, false) + .single() + .block() + .getResource(); + + // give times to replicas to catch up after a write + waitIfNeededForReplicasToCatchUp(clientBuilder()); + + String pkValue = document.getString(PARTITION_KEY_FIELD_NAME); + RequestOptions options = new RequestOptions(); + options.setPartitionKey(new PartitionKey(pkValue)); + + String propName = "newProp"; + String propValue = "hello"; + BridgeInternal.setProperty(document, propName, propValue); + + ResourceResponseValidator validator = ResourceResponseValidator.builder() + .withProperty(propName, propValue) + .build(); + validateSuccess(client.upsertDocument(getCollectionLink(), document, options, false), validator, TIMEOUT); + + validateNoDocumentOperationThroughGateway(); + } + + @Test(groups = { "direct" }, timeOut = QUERY_TIMEOUT) + public void crossPartitionQuery() { + + truncateCollection(createdCollection); + waitIfNeededForReplicasToCatchUp(clientBuilder()); + + client.getCapturedRequests().clear(); + + int cnt = 1000; + List documentList = new ArrayList<>(); + for(int i = 0; i < cnt; i++) { + Document docDefinition = getDocumentDefinition(); + documentList.add(docDefinition); + } + + documentList = bulkInsert(client, getCollectionLink(), documentList).map(ResourceResponse::getResource).collectList().single().block(); + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + options.maxDegreeOfParallelism(-1); + options.maxItemCount(100); + Flux> results = client.queryDocuments(getCollectionLink(), "SELECT * FROM r", options); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(documentList.size()) + .exactlyContainsInAnyOrder(documentList.stream().map(Document::resourceId).collect(Collectors.toList())).build(); + + validateQuerySuccess(results, validator, QUERY_TIMEOUT); + validateNoDocumentQueryOperationThroughGateway(); + // validates only the first query for fetching query plan goes to gateway. + assertThat(client.getCapturedRequests().stream().filter(r -> r.getResourceType() == ResourceType.Document)).hasSize(1); + } + + private void validateNoStoredProcExecutionOperationThroughGateway() { + // this validates that Document related requests don't go through gateway + DocumentServiceRequestValidator validateResourceTypesSentToGateway = DocumentServiceRequestValidator.builder() + .resourceTypeIn(ResourceType.DatabaseAccount, + ResourceType.Database, + ResourceType.DocumentCollection, + ResourceType.PartitionKeyRange) + .build(); + + // validate that all gateway captured requests are non document resources + for(RxDocumentServiceRequest request: client.getCapturedRequests()) { + validateResourceTypesSentToGateway.validate(request); + } + } + + private void validateNoDocumentOperationThroughGateway() { + // this validates that Document related requests don't go through gateway + DocumentServiceRequestValidator validateResourceTypesSentToGateway = DocumentServiceRequestValidator.builder() + .resourceTypeIn(ResourceType.DatabaseAccount, + ResourceType.Database, + ResourceType.DocumentCollection, + ResourceType.PartitionKeyRange) + .build(); + + // validate that all gateway captured requests are non document resources + for(RxDocumentServiceRequest request: client.getCapturedRequests()) { + validateResourceTypesSentToGateway.validate(request); + } + } + + private void validateNoDocumentQueryOperationThroughGateway() { + // this validates that Document related requests don't go through gateway + DocumentServiceRequestValidator validateResourceTypesSentToGateway = DocumentServiceRequestValidator.builder() + .resourceTypeIn(ResourceType.DatabaseAccount, + ResourceType.Database, + ResourceType.DocumentCollection, + ResourceType.PartitionKeyRange) + .build(); + + // validate that all gateway captured requests are non document resources + for(RxDocumentServiceRequest request: client.getCapturedRequests()) { + if (request.getOperationType() == OperationType.Query) { + assertThat(request.getPartitionKeyRangeIdentity()).isNull(); + } else { + validateResourceTypesSentToGateway.validate(request); + } + } + } + + @BeforeClass(groups = { "direct" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + + RequestOptions options = new RequestOptions(); + options.setOfferThroughput(10100); + createdDatabase = SHARED_DATABASE; + createdCollection = createCollection(createdDatabase.id(), getCollectionDefinition(), options); + client = SpyClientUnderTestFactory.createClientWithGatewaySpy(clientBuilder()); + + assertThat(client.getCapturedRequests()).isNotEmpty(); + } + + @AfterClass(groups = { "direct" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + @BeforeMethod(groups = { "direct" }) + public void beforeMethod(Method method) { + client.getCapturedRequests().clear(); + } + + private String getCollectionLink() { + return String.format("/dbs/%s/colls/%s", createdDatabase.id(), createdCollection.id()); + } + + private Document getDocumentDefinition() { + Document doc = new Document(); + doc.id(UUID.randomUUID().toString()); + BridgeInternal.setProperty(doc, PARTITION_KEY_FIELD_NAME, UUID.randomUUID().toString()); + BridgeInternal.setProperty(doc, "name", "Hafez"); + return doc; + } + +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/EndpointMock.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/EndpointMock.java new file mode 100644 index 0000000000000..0fba8367fc990 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/EndpointMock.java @@ -0,0 +1,282 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.StoreResponseBuilder; +import com.azure.data.cosmos.internal.directconnectivity.StoreResponse; +import com.azure.data.cosmos.internal.directconnectivity.WFConstants; +import com.google.common.collect.ImmutableList; +import org.apache.commons.collections.map.HashedMap; + +import java.net.URI; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +abstract public class EndpointMock { + + TransportClientWrapper transportClientWrapper; + AddressSelectorWrapper addressSelectorWrapper; + + public EndpointMock(AddressSelectorWrapper addressSelectorWrapper, TransportClientWrapper transportClientWrapper) { + this.addressSelectorWrapper = addressSelectorWrapper; + this.transportClientWrapper = transportClientWrapper; + } + + public static class EndpointMockVerificationBuilder { + public static EndpointMockVerificationBuilder builder() { + return new EndpointMockVerificationBuilder(); + } + + private AddressSelectorWrapper.InOrderVerificationBuilder addressSelectorVerificationBuilder; + private TransportClientWrapper.TransportClientWrapperVerificationBuilder transportClientValidation; + + public EndpointMockVerificationBuilder withAddressSelectorValidation(AddressSelectorWrapper.InOrderVerificationBuilder addressSelectorBuilder) { + addressSelectorVerificationBuilder = addressSelectorBuilder; + return this; + } + + public EndpointMockVerificationBuilder withTransportClientValidation(TransportClientWrapper.TransportClientWrapperVerificationBuilder transportClientValidation) { + this.transportClientValidation = transportClientValidation; + return this; + } + + public void execute(EndpointMock endpointMock) { + this.addressSelectorVerificationBuilder.execute(endpointMock.addressSelectorWrapper); + this.transportClientValidation.execute(endpointMock.transportClientWrapper); + } + } + + + public void validate(EndpointMockVerificationBuilder verificationBuilder) { + this.addressSelectorWrapper.validate(); + this.transportClientWrapper.validate(); + if (verificationBuilder != null) { + verificationBuilder.execute(this); + } + } + + public static Builder.NoSecondaryReplica noSecondaryReplicaBuilder() { + return new Builder.NoSecondaryReplica(); + } + + abstract static class Builder { + + class ReplicasWithSameSpeed extends Builder { + + URI primary; + List secondaries = new ArrayList<>(); + StoreResponse headStoreResponse; + StoreResponse readStoreResponse; + + ReplicasWithSameSpeed addPrimary(URI replicaAddress) { + primary = replicaAddress; + return this; + } + + ReplicasWithSameSpeed addSecondary(URI replicaAddress) { + secondaries.add(replicaAddress); + return this; + } + + ReplicasWithSameSpeed storeResponseOnRead(StoreResponse storeResponse) { + this.readStoreResponse = storeResponse; + return this; + } + + ReplicasWithSameSpeed storeResponseOnHead(StoreResponse storeResponse) { + this.headStoreResponse = storeResponse; + return this; + } + + public EndpointMock build() { + TransportClientWrapper.Builder.ReplicaResponseBuilder transportClientWrapperBuilder = TransportClientWrapper.Builder.replicaResponseBuilder(); + + ImmutableList replicas = ImmutableList.builder().add(primary).addAll(secondaries).build(); + + for(URI replica: replicas) { + transportClientWrapperBuilder.addReplica(replica, (i, request) -> { + if (request.getOperationType() == OperationType.Head || request.getOperationType() == OperationType.HeadFeed) { + return headStoreResponse; + } else { + return readStoreResponse; + } + }); + } + + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.Simple.create().withPrimary(primary) + .withSecondary(secondaries).build(); + + return new EndpointMock(addressSelectorWrapper, transportClientWrapperBuilder.build()) {}; + } + } + + class QuorumNotMetSecondaryReplicasDisappear { + URI primary; + Map> disappearDictionary = new HashedMap(); + public QuorumNotMetSecondaryReplicasDisappear primaryReplica(URI primaryReplica) { + this.primary = primaryReplica; + return this; + } + + public QuorumNotMetSecondaryReplicasDisappear secondaryReplicasDisappearWhen(URI secondary, + Function2WithCheckedException disappearPredicate) { + disappearDictionary.put(secondary, disappearPredicate); + return this; + } + + public QuorumNotMetSecondaryReplicasDisappear secondaryReplicasDisappearAfter(URI secondary, int attempt) { + disappearDictionary.put(secondary, (i, r) -> i >= attempt); + return this; + } + } + + static public class NoSecondaryReplica extends Builder { + private long LOCAL_LSN = 19; + private long LSN = 52; + private URI defaultPrimaryURI = URI.create("primary"); + private URI primary = defaultPrimaryURI; + private StoreResponse defaultResponse = StoreResponseBuilder.create() + .withLSN(LSN) + .withLocalLSN(LOCAL_LSN) + .withHeader(WFConstants.BackendHeaders.CURRENT_REPLICA_SET_SIZE, "1") + .withHeader(WFConstants.BackendHeaders.QUORUM_ACKED_LSN, Long.toString(LSN)) + .withHeader(WFConstants.BackendHeaders.QUORUM_ACKED_LOCAL_LSN, Long.toString(LOCAL_LSN)) + .withRequestCharge(0) + .build(); + + private StoreResponse headStoreResponse = defaultResponse; + private StoreResponse readStoreResponse = defaultResponse; + private Function1WithCheckedException storeResponseFunc; + + public NoSecondaryReplica primaryReplica(URI primaryReplica) { + this.primary = primaryReplica; + return this; + } + + public NoSecondaryReplica response(StoreResponse storeResponse) { + this.readStoreResponse = storeResponse; + this.headStoreResponse = storeResponse; + return this; + } + + public NoSecondaryReplica response(Function1WithCheckedException storeResponseFunc) { + this.storeResponseFunc = storeResponseFunc; + return this; + } + + public EndpointMock build() { + + TransportClientWrapper.Builder.ReplicaResponseBuilder transportClientWrapperBuilder = TransportClientWrapper.Builder.replicaResponseBuilder(); + + ImmutableList replicas = ImmutableList.builder().add(primary).build(); + + for(URI replica: replicas) { + transportClientWrapperBuilder.addReplica(replica, (i, request) -> { + + if (storeResponseFunc != null) { + return storeResponseFunc.apply(request); + } + + if (request.getOperationType() == OperationType.Head || request.getOperationType() == OperationType.HeadFeed) { + return headStoreResponse; + } else { + return readStoreResponse; + } + }); + } + + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.Simple.create().withPrimary(primary) + .withSecondary(ImmutableList.of()).build(); + + return new EndpointMock(addressSelectorWrapper, transportClientWrapperBuilder.build()) {}; + } + } + + static public class NoSecondaryReplica_TwoSecondaryReplicasGoLiveAfterFirstHitOnPrimary extends Builder { + private long LOCAL_LSN = 19; + private long LSN = 52; + private URI primary = URI.create("primary"); + private ImmutableList secondaryReplicas = ImmutableList.of(URI.create("secondary1"), URI.create("secondary2")); + private StoreResponse primaryDefaultResponse = StoreResponseBuilder.create() + .withLSN(LSN) + .withLocalLSN(LOCAL_LSN) + .withHeader(WFConstants.BackendHeaders.CURRENT_REPLICA_SET_SIZE, "3") + .withHeader(WFConstants.BackendHeaders.QUORUM_ACKED_LSN, Long.toString(LSN)) + .withHeader(WFConstants.BackendHeaders.QUORUM_ACKED_LOCAL_LSN, Long.toString(LOCAL_LSN)) + .withRequestCharge(0) + .build(); + + private StoreResponse secondaryDefaultResponse = StoreResponseBuilder.create() + .withLSN(LSN) + .withLocalLSN(LOCAL_LSN) + .withHeader(WFConstants.BackendHeaders.QUORUM_ACKED_LSN, Long.toString(LSN)) + .withHeader(WFConstants.BackendHeaders.QUORUM_ACKED_LOCAL_LSN, Long.toString(LOCAL_LSN)) + .withRequestCharge(0) + .build(); + Map> secondaryResponseFunc = + new HashMap<>(); + + + public NoSecondaryReplica_TwoSecondaryReplicasGoLiveAfterFirstHitOnPrimary primaryReplica(URI primaryReplica) { + this.primary = primaryReplica; + return this; + } + + public NoSecondaryReplica_TwoSecondaryReplicasGoLiveAfterFirstHitOnPrimary responseFromSecondary( + URI replica, + Function1WithCheckedException func) { + secondaryResponseFunc.put(replica, func); + return this; + } + + public EndpointMock build() { + + TransportClientWrapper.Builder.ReplicaResponseBuilder transportClientWrapperBuilder = TransportClientWrapper.Builder.replicaResponseBuilder(); + + transportClientWrapperBuilder.addReplica(primary, (i, request) -> { + return primaryDefaultResponse; + }); + + transportClientWrapperBuilder.addReplica(secondaryReplicas.get(0), (i, request) -> { + return secondaryDefaultResponse; + }); + + transportClientWrapperBuilder.addReplica(secondaryReplicas.get(1), (i, request) -> { + return secondaryDefaultResponse; + }); + + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.Simple.create().withPrimary(primary) + .withSecondary(ImmutableList.of()).build(); + + return new EndpointMock(addressSelectorWrapper, transportClientWrapperBuilder.build()){}; + } + } + + public abstract EndpointMock build() ; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ExceptionBuilder.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ExceptionBuilder.java new file mode 100644 index 0000000000000..e76abf34edbc4 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ExceptionBuilder.java @@ -0,0 +1,102 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.GoneException; +import com.azure.data.cosmos.PartitionKeyRangeGoneException; +import com.azure.data.cosmos.InvalidPartitionException; +import com.azure.data.cosmos.PartitionIsMigratingException; +import com.azure.data.cosmos.PartitionKeyRangeIsSplittingException; + +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +public class ExceptionBuilder { + private Integer status; + private List> headerEntries; + private String message; + + public static ExceptionBuilder create() { + return new ExceptionBuilder(); + } + + public ExceptionBuilder() { + headerEntries = new ArrayList<>(); + } + + public ExceptionBuilder withHeader(String key, String value) { + headerEntries.add(new AbstractMap.SimpleEntry(key, value)); + return this; + } + + public ExceptionBuilder withStatus(int status) { + this.status = status; + return this; + } + + public ExceptionBuilder withMessage(String message) { + this.message = message; + return this; + } + + public GoneException asGoneException() { + assert status == null; + GoneException dce = new GoneException(); + dce.responseHeaders().putAll(headerEntries.stream().collect(Collectors.toMap(i -> i.getKey(), i -> i.getValue()))); + return dce; + } + + public InvalidPartitionException asInvalidPartitionException() { + assert status == null; + InvalidPartitionException dce = new InvalidPartitionException(); + dce.responseHeaders().putAll(headerEntries.stream().collect(Collectors.toMap(i -> i.getKey(), i -> i.getValue()))); + return dce; + } + + public PartitionKeyRangeGoneException asPartitionKeyRangeGoneException() { + assert status == null; + PartitionKeyRangeGoneException dce = new PartitionKeyRangeGoneException(); + dce.responseHeaders().putAll(headerEntries.stream().collect(Collectors.toMap(i -> i.getKey(), i -> i.getValue()))); + return dce; + } + + + public PartitionKeyRangeIsSplittingException asPartitionKeyRangeIsSplittingException() { + assert status == null; + PartitionKeyRangeIsSplittingException dce = new PartitionKeyRangeIsSplittingException(); + dce.responseHeaders().putAll(headerEntries.stream().collect(Collectors.toMap(i -> i.getKey(), i -> i.getValue()))); + return dce; + } + + public PartitionIsMigratingException asPartitionIsMigratingException() { + assert status == null; + PartitionIsMigratingException dce = new PartitionIsMigratingException(); + dce.responseHeaders().putAll(headerEntries.stream().collect(Collectors.toMap(i -> i.getKey(), i -> i.getValue()))); + return dce; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/Function1WithCheckedException.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/Function1WithCheckedException.java new file mode 100644 index 0000000000000..f84b0cf704b74 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/Function1WithCheckedException.java @@ -0,0 +1,31 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +@FunctionalInterface +public interface Function1WithCheckedException{ + + R apply(T t) throws Exception; + +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/Function2WithCheckedException.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/Function2WithCheckedException.java new file mode 100644 index 0000000000000..b0e7b6cc0019c --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/Function2WithCheckedException.java @@ -0,0 +1,30 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +@FunctionalInterface +public interface Function2WithCheckedException{ + R apply(T1 t1, T2 t2) throws Exception; + +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/GatewayAddressCacheTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/GatewayAddressCacheTest.java new file mode 100644 index 0000000000000..c6e7fc457f412 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/GatewayAddressCacheTest.java @@ -0,0 +1,868 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.internal.AsyncDocumentClient.Builder; +import com.azure.data.cosmos.internal.*; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.internal.http.HttpClient; +import com.azure.data.cosmos.internal.http.HttpClientConfig; +import com.azure.data.cosmos.internal.routing.PartitionKeyRangeIdentity; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import io.reactivex.subscribers.TestSubscriber; +import org.mockito.Matchers; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.net.URL; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; + +public class GatewayAddressCacheTest extends TestSuiteBase { + private Database createdDatabase; + private DocumentCollection createdCollection; + + private AsyncDocumentClient client; + + @Factory(dataProvider = "clientBuilders") + public GatewayAddressCacheTest(Builder clientBuilder) { + super(clientBuilder); + } + + @DataProvider(name = "targetPartitionsKeyRangeListAndCollectionLinkParams") + public Object[][] partitionsKeyRangeListAndCollectionLinkParams() { + return new Object[][] { + // target partition key range ids, collection link + { ImmutableList.of("0"), getNameBasedCollectionLink(), Protocol.TCP}, + { ImmutableList.of("0"), getNameBasedCollectionLink(), Protocol.HTTPS}, + + { ImmutableList.of("1"), getNameBasedCollectionLink(), Protocol.HTTPS}, + { ImmutableList.of("1"), getCollectionSelfLink(), Protocol.HTTPS}, + { ImmutableList.of("3"), getNameBasedCollectionLink(), Protocol.HTTPS}, + + { ImmutableList.of("0", "1"), getNameBasedCollectionLink(), Protocol.HTTPS}, + { ImmutableList.of("1", "3"), getNameBasedCollectionLink(), Protocol.HTTPS}, + }; + } + + @DataProvider(name = "protocolProvider") + public Object[][] protocolProvider() { + return new Object[][]{ + { Protocol.HTTPS}, + { Protocol.TCP}, + }; + } + + @Test(groups = { "direct" }, dataProvider = "targetPartitionsKeyRangeListAndCollectionLinkParams", timeOut = TIMEOUT) + public void getServerAddressesViaGateway(List partitionKeyRangeIds, + String collectionLink, + Protocol protocol) throws Exception { + Configs configs = ConfigsBuilder.instance().withProtocol(protocol).build(); + // ask gateway for the addresses + URL serviceEndpoint = new URL(TestConfigurations.HOST); + IAuthorizationTokenProvider authorizationTokenProvider = (RxDocumentClientImpl) client; + + GatewayAddressCache cache = new GatewayAddressCache(serviceEndpoint, + protocol, + authorizationTokenProvider, + null, + getHttpClient(configs)); + + RxDocumentServiceRequest req = + RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Document, + collectionLink + "/docs/", + getDocumentDefinition(), new HashMap<>()); + + Mono> addresses = cache.getServerAddressesViaGatewayAsync( + req, createdCollection.resourceId(), partitionKeyRangeIds, false); + + PartitionReplicasAddressesValidator validator = new PartitionReplicasAddressesValidator.Builder() + .withProtocol(protocol) + .replicasOfPartitions(partitionKeyRangeIds) + .build(); + + validateSuccess(addresses, validator, TIMEOUT); + } + + @Test(groups = { "direct" }, dataProvider = "protocolProvider", timeOut = TIMEOUT) + public void getMasterAddressesViaGatewayAsync(Protocol protocol) throws Exception { + Configs configs = ConfigsBuilder.instance().withProtocol(protocol).build(); + // ask gateway for the addresses + URL serviceEndpoint = new URL(TestConfigurations.HOST); + IAuthorizationTokenProvider authorizationTokenProvider = (RxDocumentClientImpl) client; + + GatewayAddressCache cache = new GatewayAddressCache(serviceEndpoint, + protocol, + authorizationTokenProvider, + null, + getHttpClient(configs)); + + RxDocumentServiceRequest req = + RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Database, + "/dbs", + new Database(), new HashMap<>()); + + Mono> addresses = cache.getMasterAddressesViaGatewayAsync(req, ResourceType.Database, + null, "/dbs/", false, false, null); + + PartitionReplicasAddressesValidator validator = new PartitionReplicasAddressesValidator.Builder() + .withProtocol(protocol) + .replicasOfSamePartition() + .build(); + + validateSuccess(addresses, validator, TIMEOUT); + } + + @DataProvider(name = "targetPartitionsKeyRangeAndCollectionLinkParams") + public Object[][] partitionsKeyRangeAndCollectionLinkParams() { + return new Object[][] { + // target partition key range ids, collection link, protocol + { "0", getNameBasedCollectionLink(), Protocol.TCP}, + { "0", getNameBasedCollectionLink(), Protocol.HTTPS}, + + { "1", getNameBasedCollectionLink(), Protocol.HTTPS} , + { "1", getCollectionSelfLink(), Protocol.HTTPS}, + { "3", getNameBasedCollectionLink(), Protocol.HTTPS}, + }; + } + + @Test(groups = { "direct" }, dataProvider = "targetPartitionsKeyRangeAndCollectionLinkParams", timeOut = TIMEOUT) + public void tryGetAddresses_ForDataPartitions(String partitionKeyRangeId, String collectionLink, Protocol protocol) throws Exception { + Configs configs = ConfigsBuilder.instance().withProtocol(protocol).build(); + URL serviceEndpoint = new URL(TestConfigurations.HOST); + IAuthorizationTokenProvider authorizationTokenProvider = (RxDocumentClientImpl) client; + + GatewayAddressCache cache = new GatewayAddressCache(serviceEndpoint, + protocol, + authorizationTokenProvider, + null, + getHttpClient(configs)); + + RxDocumentServiceRequest req = + RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Document, + collectionLink, + new Database(), new HashMap<>()); + + String collectionRid = createdCollection.resourceId(); + + PartitionKeyRangeIdentity partitionKeyRangeIdentity = new PartitionKeyRangeIdentity(collectionRid, partitionKeyRangeId); + boolean forceRefreshPartitionAddresses = false; + Mono addressesInfosFromCacheObs = cache.tryGetAddresses(req, partitionKeyRangeIdentity, forceRefreshPartitionAddresses); + + ArrayList addressInfosFromCache = Lists.newArrayList(getSuccessResult(addressesInfosFromCacheObs, TIMEOUT)); + + Mono> masterAddressFromGatewayObs = cache.getServerAddressesViaGatewayAsync(req, + collectionRid, ImmutableList.of(partitionKeyRangeId), false); + List
expectedAddresses = getSuccessResult(masterAddressFromGatewayObs, TIMEOUT); + + assertSameAs(addressInfosFromCache, expectedAddresses); + } + + @DataProvider(name = "openAsyncTargetAndTargetPartitionsKeyRangeAndCollectionLinkParams") + public Object[][] openAsyncTargetAndPartitionsKeyRangeTargetAndCollectionLinkParams() { + return new Object[][] { + // openAsync target partition key range ids, target partition key range id, collection link + { ImmutableList.of("0", "1"), "0", getNameBasedCollectionLink() }, + { ImmutableList.of("0", "1"), "1", getNameBasedCollectionLink() }, + { ImmutableList.of("0", "1"), "1", getCollectionSelfLink() }, + }; + } + + @Test(groups = { "direct" }, + dataProvider = "openAsyncTargetAndTargetPartitionsKeyRangeAndCollectionLinkParams", + timeOut = TIMEOUT) + public void tryGetAddresses_ForDataPartitions_AddressCachedByOpenAsync_NoHttpRequest( + List allPartitionKeyRangeIds, + String partitionKeyRangeId, String collectionLink) throws Exception { + Configs configs = new Configs(); + HttpClientUnderTestWrapper httpClientWrapper = getHttpClientUnderTestWrapper(configs); + + URL serviceEndpoint = new URL(TestConfigurations.HOST); + IAuthorizationTokenProvider authorizationTokenProvider = (RxDocumentClientImpl) client; + + GatewayAddressCache cache = new GatewayAddressCache(serviceEndpoint, + Protocol.HTTPS, + authorizationTokenProvider, + null, + httpClientWrapper.getSpyHttpClient()); + + String collectionRid = createdCollection.resourceId(); + + List pkriList = allPartitionKeyRangeIds.stream().map( + pkri -> new PartitionKeyRangeIdentity(collectionRid, pkri)).collect(Collectors.toList()); + + cache.openAsync(createdCollection, pkriList).block(); + + assertThat(httpClientWrapper.capturedRequests).asList().hasSize(1); + httpClientWrapper.capturedRequests.clear(); + + RxDocumentServiceRequest req = + RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Document, + collectionLink, + new Database(), new HashMap<>()); + + PartitionKeyRangeIdentity partitionKeyRangeIdentity = new PartitionKeyRangeIdentity(collectionRid, partitionKeyRangeId); + boolean forceRefreshPartitionAddresses = false; + Mono addressesInfosFromCacheObs = cache.tryGetAddresses(req, partitionKeyRangeIdentity, forceRefreshPartitionAddresses); + ArrayList addressInfosFromCache = Lists.newArrayList(getSuccessResult(addressesInfosFromCacheObs, TIMEOUT)); + + // no new request is made + assertThat(httpClientWrapper.capturedRequests) + .describedAs("no http request: addresses already cached by openAsync") + .asList().hasSize(0); + + Mono> masterAddressFromGatewayObs = cache.getServerAddressesViaGatewayAsync(req, + collectionRid, ImmutableList.of(partitionKeyRangeId), false); + List
expectedAddresses = getSuccessResult(masterAddressFromGatewayObs, TIMEOUT); + + assertThat(httpClientWrapper.capturedRequests) + .describedAs("getServerAddressesViaGatewayAsync will read addresses from gateway") + .asList().hasSize(1); + + assertSameAs(addressInfosFromCache, expectedAddresses); + } + + @Test(groups = { "direct" }, + dataProvider = "openAsyncTargetAndTargetPartitionsKeyRangeAndCollectionLinkParams", + timeOut = TIMEOUT) + public void tryGetAddresses_ForDataPartitions_ForceRefresh( + List allPartitionKeyRangeIds, + String partitionKeyRangeId, + String collectionLink) throws Exception { + Configs configs = new Configs(); + HttpClientUnderTestWrapper httpClientWrapper = getHttpClientUnderTestWrapper(configs); + + URL serviceEndpoint = new URL(TestConfigurations.HOST); + IAuthorizationTokenProvider authorizationTokenProvider = (RxDocumentClientImpl) client; + + GatewayAddressCache cache = new GatewayAddressCache(serviceEndpoint, + Protocol.HTTPS, + authorizationTokenProvider, + null, + httpClientWrapper.getSpyHttpClient()); + + String collectionRid = createdCollection.resourceId(); + + List pkriList = allPartitionKeyRangeIds.stream().map( + pkri -> new PartitionKeyRangeIdentity(collectionRid, pkri)).collect(Collectors.toList()); + + cache.openAsync(createdCollection, pkriList).block(); + + assertThat(httpClientWrapper.capturedRequests).asList().hasSize(1); + httpClientWrapper.capturedRequests.clear(); + + RxDocumentServiceRequest req = + RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Document, + collectionLink, + new Database(), new HashMap<>()); + + PartitionKeyRangeIdentity partitionKeyRangeIdentity = new PartitionKeyRangeIdentity(collectionRid, partitionKeyRangeId); + Mono addressesInfosFromCacheObs = cache.tryGetAddresses(req, partitionKeyRangeIdentity, true); + ArrayList addressInfosFromCache = Lists.newArrayList(getSuccessResult(addressesInfosFromCacheObs, TIMEOUT)); + + // no new request is made + assertThat(httpClientWrapper.capturedRequests) + .describedAs("force refresh fetched from gateway") + .asList().hasSize(1); + + Mono> masterAddressFromGatewayObs = cache.getServerAddressesViaGatewayAsync(req, + collectionRid, ImmutableList.of(partitionKeyRangeId), false); + List
expectedAddresses = getSuccessResult(masterAddressFromGatewayObs, TIMEOUT); + + assertThat(httpClientWrapper.capturedRequests) + .describedAs("getServerAddressesViaGatewayAsync will read addresses from gateway") + .asList().hasSize(2); + + assertSameAs(addressInfosFromCache, expectedAddresses); + } + + @Test(groups = { "direct" }, + dataProvider = "openAsyncTargetAndTargetPartitionsKeyRangeAndCollectionLinkParams", + timeOut = TIMEOUT) + public void tryGetAddresses_ForDataPartitions_Suboptimal_Refresh( + List allPartitionKeyRangeIds, + String partitionKeyRangeId, + String collectionLink) throws Exception { + Configs configs = new Configs(); + HttpClientUnderTestWrapper httpClientWrapper = getHttpClientUnderTestWrapper(configs); + + URL serviceEndpoint = new URL(TestConfigurations.HOST); + IAuthorizationTokenProvider authorizationTokenProvider = (RxDocumentClientImpl) client; + + int suboptimalRefreshTime = 2; + + GatewayAddressCache origCache = new GatewayAddressCache(serviceEndpoint, + Protocol.HTTPS, + authorizationTokenProvider, + null, + httpClientWrapper.getSpyHttpClient(), + suboptimalRefreshTime); + + String collectionRid = createdCollection.resourceId(); + + List pkriList = allPartitionKeyRangeIds.stream().map( + pkri -> new PartitionKeyRangeIdentity(collectionRid, pkri)).collect(Collectors.toList()); + + origCache.openAsync(createdCollection, pkriList).block(); + + assertThat(httpClientWrapper.capturedRequests).asList().hasSize(1); + httpClientWrapper.capturedRequests.clear(); + + RxDocumentServiceRequest req = + RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Document, + collectionLink, + new Database(), new HashMap<>()); + + PartitionKeyRangeIdentity partitionKeyRangeIdentity = new PartitionKeyRangeIdentity(collectionRid, partitionKeyRangeId); + Mono addressesInfosFromCacheObs = origCache.tryGetAddresses(req, partitionKeyRangeIdentity, true); + ArrayList addressInfosFromCache = Lists.newArrayList(getSuccessResult(addressesInfosFromCacheObs, TIMEOUT)); + + // no new request is made + assertThat(httpClientWrapper.capturedRequests) + .describedAs("force refresh fetched from gateway") + .asList().hasSize(1); + + GatewayAddressCache spyCache = Mockito.spy(origCache); + + final AtomicInteger fetchCounter = new AtomicInteger(0); + Mockito.doAnswer(new Answer() { + @Override + public Mono> answer(InvocationOnMock invocationOnMock) throws Throwable { + + RxDocumentServiceRequest req = invocationOnMock.getArgumentAt(0, RxDocumentServiceRequest.class); + String collectionRid = invocationOnMock.getArgumentAt(1, String.class); + List partitionKeyRangeIds = invocationOnMock.getArgumentAt(2, List.class); + boolean forceRefresh = invocationOnMock.getArgumentAt(3, Boolean.class); + + int cnt = fetchCounter.getAndIncrement(); + + if (cnt == 0) { + Mono> res = origCache.getServerAddressesViaGatewayAsync(req, + collectionRid, + partitionKeyRangeIds, + forceRefresh); + + // remove one replica + return res.map(list -> removeOneReplica(list)); + } + + return origCache.getServerAddressesViaGatewayAsync(req, + collectionRid, + partitionKeyRangeIds, + forceRefresh); + } + }).when(spyCache).getServerAddressesViaGatewayAsync(Matchers.any(RxDocumentServiceRequest.class), Matchers.anyString(), + Matchers.anyList(), Matchers.anyBoolean()); + + httpClientWrapper.capturedRequests.clear(); + + // force refresh to replace existing with sub-optimal addresses + addressesInfosFromCacheObs = spyCache.tryGetAddresses(req, partitionKeyRangeIdentity, true); + AddressInformation[] suboptimalAddresses = getSuccessResult(addressesInfosFromCacheObs, TIMEOUT); + assertThat(httpClientWrapper.capturedRequests) + .describedAs("getServerAddressesViaGatewayAsync will read addresses from gateway") + .asList().hasSize(1); + httpClientWrapper.capturedRequests.clear(); + assertThat(suboptimalAddresses).hasSize(ServiceConfig.SystemReplicationPolicy.MaxReplicaSetSize - 1); + assertThat(fetchCounter.get()).isEqualTo(1); + + // no refresh, use cache + addressesInfosFromCacheObs = spyCache.tryGetAddresses(req, partitionKeyRangeIdentity, false); + suboptimalAddresses = getSuccessResult(addressesInfosFromCacheObs, TIMEOUT); + assertThat(httpClientWrapper.capturedRequests) + .describedAs("getServerAddressesViaGatewayAsync will read addresses from gateway") + .asList().hasSize(0); + assertThat(suboptimalAddresses).hasSize(ServiceConfig.SystemReplicationPolicy.MaxReplicaSetSize - 1); + assertThat(fetchCounter.get()).isEqualTo(1); + + // wait for refresh time + TimeUnit.SECONDS.sleep(suboptimalRefreshTime + 1); + + addressesInfosFromCacheObs = spyCache.tryGetAddresses(req, partitionKeyRangeIdentity, false); + AddressInformation[] addresses = getSuccessResult(addressesInfosFromCacheObs, TIMEOUT); + assertThat(addresses).hasSize(ServiceConfig.SystemReplicationPolicy.MaxReplicaSetSize); + assertThat(httpClientWrapper.capturedRequests) + .describedAs("getServerAddressesViaGatewayAsync will read addresses from gateway") + .asList().hasSize(1); + assertThat(fetchCounter.get()).isEqualTo(2); + } + + @Test(groups = { "direct" }, dataProvider = "protocolProvider",timeOut = TIMEOUT) + public void tryGetAddresses_ForMasterPartition(Protocol protocol) throws Exception { + Configs configs = ConfigsBuilder.instance().withProtocol(protocol).build(); + URL serviceEndpoint = new URL(TestConfigurations.HOST); + IAuthorizationTokenProvider authorizationTokenProvider = (RxDocumentClientImpl) client; + + GatewayAddressCache cache = new GatewayAddressCache(serviceEndpoint, + protocol, + authorizationTokenProvider, + null, + getHttpClient(configs)); + + RxDocumentServiceRequest req = + RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Database, + "/dbs", + new Database(), new HashMap<>()); + + PartitionKeyRangeIdentity partitionKeyRangeIdentity = new PartitionKeyRangeIdentity("M"); + boolean forceRefreshPartitionAddresses = false; + Mono addressesInfosFromCacheObs = cache.tryGetAddresses(req, partitionKeyRangeIdentity, forceRefreshPartitionAddresses); + + ArrayList addressInfosFromCache = Lists.newArrayList(getSuccessResult(addressesInfosFromCacheObs, TIMEOUT)); + + Mono> masterAddressFromGatewayObs = cache.getMasterAddressesViaGatewayAsync(req, ResourceType.Database, + null, "/dbs/", false, false, null); + List
expectedAddresses = getSuccessResult(masterAddressFromGatewayObs, TIMEOUT); + + assertSameAs(addressInfosFromCache, expectedAddresses); + } + + @DataProvider(name = "refreshTime") + public Object[][] refreshTime() { + return new Object[][] { + // refresh time, wait before doing tryGetAddresses + { 60, 1 }, + { 1, 2 }, + }; + } + + @Test(groups = { "direct" }, timeOut = TIMEOUT, dataProvider = "refreshTime") + public void tryGetAddresses_ForMasterPartition_MasterPartitionAddressAlreadyCached_NoNewHttpRequest( + int suboptimalPartitionForceRefreshIntervalInSeconds, + int waitTimeInBetweenAttemptsInSeconds + ) throws Exception { + Configs configs = new Configs(); + HttpClientUnderTestWrapper clientWrapper = getHttpClientUnderTestWrapper(configs); + + URL serviceEndpoint = new URL(TestConfigurations.HOST); + IAuthorizationTokenProvider authorizationTokenProvider = (RxDocumentClientImpl) client; + + + GatewayAddressCache cache = new GatewayAddressCache(serviceEndpoint, + Protocol.HTTPS, + authorizationTokenProvider, + null, + clientWrapper.getSpyHttpClient(), + suboptimalPartitionForceRefreshIntervalInSeconds); + + RxDocumentServiceRequest req = + RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Database, + "/dbs", + new Database(), new HashMap<>()); + + PartitionKeyRangeIdentity partitionKeyRangeIdentity = new PartitionKeyRangeIdentity("M"); + boolean forceRefreshPartitionAddresses = false; + + // request master partition info to ensure it is cached. + AddressInformation[] expectedAddresses = cache.tryGetAddresses(req, + partitionKeyRangeIdentity, + forceRefreshPartitionAddresses) + .block(); + + assertThat(clientWrapper.capturedRequests).asList().hasSize(1); + clientWrapper.capturedRequests.clear(); + + + TimeUnit.SECONDS.sleep(waitTimeInBetweenAttemptsInSeconds); + + Mono addressesObs = cache.tryGetAddresses(req, + partitionKeyRangeIdentity, + forceRefreshPartitionAddresses); + + AddressInformation[] actualAddresses = getSuccessResult(addressesObs, TIMEOUT); + + assertExactlyEqual(actualAddresses, expectedAddresses); + + // the cache address is used. no new http request is sent + assertThat(clientWrapper.capturedRequests).asList().hasSize(0); + } + + @Test(groups = { "direct" }, timeOut = TIMEOUT) + public void tryGetAddresses_ForMasterPartition_ForceRefresh() throws Exception { + Configs configs = new Configs(); + HttpClientUnderTestWrapper clientWrapper = getHttpClientUnderTestWrapper(configs); + + URL serviceEndpoint = new URL(TestConfigurations.HOST); + IAuthorizationTokenProvider authorizationTokenProvider = (RxDocumentClientImpl) client; + + GatewayAddressCache cache = new GatewayAddressCache(serviceEndpoint, + Protocol.HTTPS, + authorizationTokenProvider, + null, + clientWrapper.getSpyHttpClient()); + + RxDocumentServiceRequest req = + RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Database, + "/dbs", + new Database(), new HashMap<>()); + + PartitionKeyRangeIdentity partitionKeyRangeIdentity = new PartitionKeyRangeIdentity("M"); + + // request master partition info to ensure it is cached. + AddressInformation[] expectedAddresses = cache.tryGetAddresses(req, + partitionKeyRangeIdentity, + false) + .block(); + + assertThat(clientWrapper.capturedRequests).asList().hasSize(1); + clientWrapper.capturedRequests.clear(); + + Mono addressesObs = cache.tryGetAddresses(req, + partitionKeyRangeIdentity, + true); + + AddressInformation[] actualAddresses = getSuccessResult(addressesObs, TIMEOUT); + + assertExactlyEqual(actualAddresses, expectedAddresses); + + // the cache address is used. no new http request is sent + assertThat(clientWrapper.capturedRequests).asList().hasSize(1); + } + + private static List
removeOneReplica(List
addresses) { + addresses.remove(0); + return addresses; + } + + @Test(groups = { "direct" }, timeOut = TIMEOUT) + public void tryGetAddresses_SuboptimalMasterPartition_NotStaleEnough_NoRefresh() throws Exception { + Configs configs = new Configs(); + Instant start = Instant.now(); + HttpClientUnderTestWrapper clientWrapper = getHttpClientUnderTestWrapper(configs); + + URL serviceEndpoint = new URL(TestConfigurations.HOST); + IAuthorizationTokenProvider authorizationTokenProvider = (RxDocumentClientImpl) client; + + int refreshPeriodInSeconds = 10; + + GatewayAddressCache origCache = new GatewayAddressCache(serviceEndpoint, + Protocol.HTTPS, + authorizationTokenProvider, + null, + clientWrapper.getSpyHttpClient(), refreshPeriodInSeconds); + + GatewayAddressCache spyCache = Mockito.spy(origCache); + + final AtomicInteger getMasterAddressesViaGatewayAsyncInvocation = new AtomicInteger(0); + Mockito.doAnswer(new Answer() { + @Override + public Mono> answer(InvocationOnMock invocationOnMock) throws Throwable { + + RxDocumentServiceRequest request = invocationOnMock.getArgumentAt(0, RxDocumentServiceRequest.class); + ResourceType resourceType = invocationOnMock.getArgumentAt(1, ResourceType.class); + String resourceAddress = invocationOnMock.getArgumentAt(2, String.class); + String entryUrl = invocationOnMock.getArgumentAt(3, String.class); + boolean forceRefresh = invocationOnMock.getArgumentAt(4, Boolean.class); + boolean useMasterCollectionResolver = invocationOnMock.getArgumentAt(5, Boolean.class); + + int cnt = getMasterAddressesViaGatewayAsyncInvocation.getAndIncrement(); + + if (cnt == 0) { + Mono> res = origCache.getMasterAddressesViaGatewayAsync( + request, + resourceType, + resourceAddress, + entryUrl, + forceRefresh, + useMasterCollectionResolver, + null); + + // remove one replica + return res.map(list -> removeOneReplica(list)); + } + + return origCache.getMasterAddressesViaGatewayAsync( + request, + resourceType, + resourceAddress, + entryUrl, + forceRefresh, + useMasterCollectionResolver, + null); + } + }).when(spyCache).getMasterAddressesViaGatewayAsync(Matchers.any(RxDocumentServiceRequest.class), Matchers.any(ResourceType.class), Matchers.anyString(), + Matchers.anyString(), Matchers.anyBoolean(), Matchers.anyBoolean(), Matchers.anyMap()); + + + RxDocumentServiceRequest req = + RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Database, + "/dbs", + new Database(), new HashMap<>()); + + PartitionKeyRangeIdentity partitionKeyRangeIdentity = new PartitionKeyRangeIdentity("M"); + + // request master partition info to ensure it is cached. + AddressInformation[] expectedAddresses = spyCache.tryGetAddresses(req, + partitionKeyRangeIdentity, + false) + .block(); + + assertThat(clientWrapper.capturedRequests).asList().hasSize(1); + clientWrapper.capturedRequests.clear(); + + Mono addressesObs = spyCache.tryGetAddresses(req, + partitionKeyRangeIdentity, + false); + + AddressInformation[] actualAddresses = getSuccessResult(addressesObs, TIMEOUT); + + assertExactlyEqual(actualAddresses, expectedAddresses); + + // the cache address is used. no new http request is sent + assertThat(clientWrapper.capturedRequests).asList().hasSize(0); + + Instant end = Instant.now(); + assertThat(end.minusSeconds(refreshPeriodInSeconds)).isBefore(start); + } + + @Test(groups = { "direct" }, timeOut = TIMEOUT) + public void tryGetAddresses_SuboptimalMasterPartition_Stale_DoRefresh() throws Exception { + Configs configs = new Configs(); + HttpClientUnderTestWrapper clientWrapper = getHttpClientUnderTestWrapper(configs); + + URL serviceEndpoint = new URL(TestConfigurations.HOST); + IAuthorizationTokenProvider authorizationTokenProvider = (RxDocumentClientImpl) client; + + int refreshPeriodInSeconds = 1; + + GatewayAddressCache origCache = new GatewayAddressCache(serviceEndpoint, + Protocol.HTTPS, + authorizationTokenProvider, + null, + clientWrapper.getSpyHttpClient(), refreshPeriodInSeconds); + + GatewayAddressCache spyCache = Mockito.spy(origCache); + + final AtomicInteger getMasterAddressesViaGatewayAsyncInvocation = new AtomicInteger(0); + Mockito.doAnswer(new Answer() { + @Override + public Mono> answer(InvocationOnMock invocationOnMock) throws Throwable { + + System.out.print("fetch"); + + RxDocumentServiceRequest request = invocationOnMock.getArgumentAt(0, RxDocumentServiceRequest.class); + ResourceType resourceType = invocationOnMock.getArgumentAt(1, ResourceType.class); + String resourceAddress = invocationOnMock.getArgumentAt(2, String.class); + String entryUrl = invocationOnMock.getArgumentAt(3, String.class); + boolean forceRefresh = invocationOnMock.getArgumentAt(4, Boolean.class); + boolean useMasterCollectionResolver = invocationOnMock.getArgumentAt(5, Boolean.class); + + int cnt = getMasterAddressesViaGatewayAsyncInvocation.getAndIncrement(); + + if (cnt == 0) { + Mono> res = origCache.getMasterAddressesViaGatewayAsync( + request, + resourceType, + resourceAddress, + entryUrl, + forceRefresh, + useMasterCollectionResolver, + null); + + // remove one replica + return res.map(list -> removeOneReplica(list)); + } + + return origCache.getMasterAddressesViaGatewayAsync( + request, + resourceType, + resourceAddress, + entryUrl, + forceRefresh, + useMasterCollectionResolver, + null); + } + }).when(spyCache).getMasterAddressesViaGatewayAsync(Matchers.any(RxDocumentServiceRequest.class), Matchers.any(ResourceType.class), Matchers.anyString(), + Matchers.anyString(), Matchers.anyBoolean(), Matchers.anyBoolean(), Matchers.anyMap()); + + RxDocumentServiceRequest req = + RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Database, + "/dbs", + new Database(), new HashMap<>()); + + PartitionKeyRangeIdentity partitionKeyRangeIdentity = new PartitionKeyRangeIdentity("M"); + + // request master partition info to ensure it is cached. + AddressInformation[] subOptimalAddresses = spyCache.tryGetAddresses(req, + partitionKeyRangeIdentity, + false) + .block(); + + assertThat(getMasterAddressesViaGatewayAsyncInvocation.get()).isEqualTo(1); + assertThat(subOptimalAddresses).hasSize(ServiceConfig.SystemReplicationPolicy.MaxReplicaSetSize - 1); + + Instant start = Instant.now(); + TimeUnit.SECONDS.sleep(refreshPeriodInSeconds + 1); + Instant end = Instant.now(); + assertThat(end.minusSeconds(refreshPeriodInSeconds)).isAfter(start); + + assertThat(clientWrapper.capturedRequests).asList().hasSize(1); + clientWrapper.capturedRequests.clear(); + + Mono addressesObs = spyCache.tryGetAddresses(req, + partitionKeyRangeIdentity, + false); + + + AddressInformation[] actualAddresses = getSuccessResult(addressesObs, TIMEOUT); + // the cache address is used. no new http request is sent + assertThat(clientWrapper.capturedRequests).asList().hasSize(1); + assertThat(getMasterAddressesViaGatewayAsyncInvocation.get()).isEqualTo(2); + assertThat(actualAddresses).hasSize(ServiceConfig.SystemReplicationPolicy.MaxReplicaSetSize); + + List
fetchedAddresses = origCache.getMasterAddressesViaGatewayAsync(req, ResourceType.Database, + null, "/dbs/", false, false, null).block(); + + assertSameAs(ImmutableList.copyOf(actualAddresses), fetchedAddresses); + } + + public static void assertSameAs(List actual, List
expected) { + assertThat(actual).asList().hasSize(expected.size()); + for(int i = 0; i < expected.size(); i++) { + assertEqual(actual.get(i), expected.get(i)); + } + } + + private static void assertEqual(AddressInformation actual, Address expected) { + assertThat(actual.getPhysicalUri()).isEqualTo(expected.getPhyicalUri()); + assertThat(actual.getProtocolScheme()).isEqualTo(expected.getProtocolScheme().toLowerCase()); + assertThat(actual.isPrimary()).isEqualTo(expected.IsPrimary()); + } + + private static void assertEqual(AddressInformation actual, AddressInformation expected) { + assertThat(actual.getPhysicalUri()).isEqualTo(expected.getPhysicalUri()); + assertThat(actual.getProtocolName()).isEqualTo(expected.getProtocolName()); + assertThat(actual.isPrimary()).isEqualTo(expected.isPrimary()); + assertThat(actual.isPublic()).isEqualTo(expected.isPublic()); + } + + public static void assertExactlyEqual(AddressInformation[] actual, AddressInformation[] expected) { + assertExactlyEqual(Arrays.asList(actual), Arrays.asList(expected)); + } + + public static void assertExactlyEqual(List actual, List expected) { + assertThat(actual).asList().hasSize(expected.size()); + for(int i = 0; i < expected.size(); i++) { + assertEqual(actual.get(i), expected.get(i)); + } + } + + public static T getSuccessResult(Mono observable, long timeout) { + TestSubscriber testSubscriber = new TestSubscriber<>(); + observable.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + testSubscriber.assertValueCount(1); + return testSubscriber.values().get(0); + } + + public static void validateSuccess(Mono> observable, + PartitionReplicasAddressesValidator validator, long timeout) { + TestSubscriber> testSubscriber = new TestSubscriber<>(); + observable.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + testSubscriber.assertValueCount(1); + validator.validate(testSubscriber.values().get(0)); + } + + @BeforeClass(groups = { "direct" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdDatabase = SHARED_DATABASE; + + RequestOptions options = new RequestOptions(); + options.setOfferThroughput(30000); + createdCollection = createCollection(client, createdDatabase.id(), getCollectionDefinition(), options); + } + + @AfterClass(groups = { "direct" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteCollection(client, createdCollection); + safeClose(client); + } + + static protected DocumentCollection getCollectionDefinition() { + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList<>(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + + DocumentCollection collectionDefinition = new DocumentCollection(); + collectionDefinition.id("mycol"); + collectionDefinition.setPartitionKey(partitionKeyDef); + + return collectionDefinition; + } + + private HttpClient getHttpClient(Configs configs) { + return HttpClient.createFixed(new HttpClientConfig(configs)); + } + + private HttpClientUnderTestWrapper getHttpClientUnderTestWrapper(Configs configs) { + HttpClient origHttpClient = getHttpClient(configs); + return new HttpClientUnderTestWrapper(origHttpClient); + } + + public String getNameBasedCollectionLink() { + return "dbs/" + createdDatabase.id() + "/colls/" + createdCollection.id(); + } + + public String getCollectionSelfLink() { + return createdCollection.selfLink(); + } + + private Document getDocumentDefinition() { + String uuid = UUID.randomUUID().toString(); + Document doc = new Document(String.format("{ " + + "\"id\": \"%s\", " + + "\"mypk\": \"%s\", " + + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + + "}" + , uuid, uuid)); + return doc; + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/GatewayServiceConfigurationReaderTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/GatewayServiceConfigurationReaderTest.java new file mode 100644 index 0000000000000..99ce070900378 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/GatewayServiceConfigurationReaderTest.java @@ -0,0 +1,174 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.internal.AsyncDocumentClient.Builder; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.internal.DatabaseAccount; +import com.azure.data.cosmos.internal.BaseAuthorizationTokenProvider; +import com.azure.data.cosmos.internal.SpyClientUnderTestFactory; +import com.azure.data.cosmos.internal.TestSuiteBase; +import com.azure.data.cosmos.internal.http.HttpClient; +import com.azure.data.cosmos.internal.http.HttpHeaders; +import com.azure.data.cosmos.internal.http.HttpRequest; +import com.azure.data.cosmos.internal.http.HttpResponse; +import com.azure.data.cosmos.internal.TestConfigurations; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.ByteBufUtil; +import io.reactivex.subscribers.TestSubscriber; +import org.apache.commons.io.IOUtils; +import org.mockito.Mockito; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.TimeUnit; + +import static org.assertj.core.api.Assertions.assertThat; + +public class GatewayServiceConfigurationReaderTest extends TestSuiteBase { + + private static final int TIMEOUT = 8000; + private HttpClient mockHttpClient; + private BaseAuthorizationTokenProvider baseAuthorizationTokenProvider; + private ConnectionPolicy connectionPolicy; + private GatewayServiceConfigurationReader mockGatewayServiceConfigurationReader; + private GatewayServiceConfigurationReader gatewayServiceConfigurationReader; + private AsyncDocumentClient client; + private String databaseAccountJson; + private DatabaseAccount expectedDatabaseAccount; + + @Factory(dataProvider = "clientBuilders") + public GatewayServiceConfigurationReaderTest(Builder clientBuilder) { + super(clientBuilder); + } + + @BeforeClass(groups = "simple") + public void setup() throws Exception { + client = clientBuilder().build(); + SpyClientUnderTestFactory.ClientUnderTest clientUnderTest = SpyClientUnderTestFactory.createClientUnderTest(this.clientBuilder()); + HttpClient httpClient = clientUnderTest.getSpyHttpClient(); + baseAuthorizationTokenProvider = new BaseAuthorizationTokenProvider(TestConfigurations.MASTER_KEY); + connectionPolicy = ConnectionPolicy.defaultPolicy(); + mockHttpClient = Mockito.mock(HttpClient.class); + mockGatewayServiceConfigurationReader = new GatewayServiceConfigurationReader(new URI(TestConfigurations.HOST), + false, TestConfigurations.MASTER_KEY, connectionPolicy, baseAuthorizationTokenProvider, mockHttpClient); + + gatewayServiceConfigurationReader = new GatewayServiceConfigurationReader(new URI(TestConfigurations.HOST), + false, + TestConfigurations.MASTER_KEY, + connectionPolicy, + baseAuthorizationTokenProvider, + httpClient); + databaseAccountJson = IOUtils + .toString(getClass().getClassLoader().getResourceAsStream("databaseAccount.json"), "UTF-8"); + expectedDatabaseAccount = new DatabaseAccount(databaseAccountJson); + HttpResponse mockResponse = getMockResponse(databaseAccountJson); + Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class))).thenReturn(Mono.just(mockResponse)); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + @Test(groups = "simple") + public void mockInitializeReaderAsync() { + Mono databaseAccount = mockGatewayServiceConfigurationReader.initializeReaderAsync(); + validateSuccess(databaseAccount, expectedDatabaseAccount); + } + + @Test(groups = "simple") + public void mockInitializeReaderAsyncWithResourceToken() throws Exception { + HttpResponse mockResponse = getMockResponse(databaseAccountJson); + Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class))).thenReturn(Mono.just(mockResponse)); + + mockGatewayServiceConfigurationReader = new GatewayServiceConfigurationReader(new URI(TestConfigurations.HOST), + true, "SampleResourceToken", connectionPolicy, baseAuthorizationTokenProvider, mockHttpClient); + + Mono databaseAccount = mockGatewayServiceConfigurationReader.initializeReaderAsync(); + validateSuccess(databaseAccount, expectedDatabaseAccount); + } + + @Test(groups = "simple") + public void initializeReaderAsync() { + Mono databaseAccount = gatewayServiceConfigurationReader.initializeReaderAsync(); + validateSuccess(databaseAccount); + } + + public static void validateSuccess(Mono observable) { + TestSubscriber testSubscriber = new TestSubscriber(); + + observable.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + testSubscriber.assertValueCount(1); + DatabaseAccount databaseAccount = testSubscriber.values().get(0); + assertThat(BridgeInternal.getQueryEngineConfiuration(databaseAccount).size() > 0).isTrue(); + assertThat(BridgeInternal.getReplicationPolicy(databaseAccount)).isNotNull(); + assertThat(BridgeInternal.getSystemReplicationPolicy(databaseAccount)).isNotNull(); + } + + public static void validateSuccess(Mono observable, DatabaseAccount expectedDatabaseAccount) { + TestSubscriber testSubscriber = new TestSubscriber(); + + observable.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + testSubscriber.assertValueCount(1); + DatabaseAccount databaseAccount = testSubscriber.values().get(0); + assertThat(databaseAccount.id()).isEqualTo(expectedDatabaseAccount.id()); + assertThat(databaseAccount.getAddressesLink()) + .isEqualTo(expectedDatabaseAccount.getAddressesLink()); + assertThat(databaseAccount.getWritableLocations().iterator().next().getEndpoint()) + .isEqualTo(expectedDatabaseAccount.getWritableLocations().iterator().next().getEndpoint()); + assertThat(BridgeInternal.getSystemReplicationPolicy(databaseAccount).getMaxReplicaSetSize()) + .isEqualTo(BridgeInternal.getSystemReplicationPolicy(expectedDatabaseAccount).getMaxReplicaSetSize()); + assertThat(BridgeInternal.getSystemReplicationPolicy(databaseAccount).getMaxReplicaSetSize()) + .isEqualTo(BridgeInternal.getSystemReplicationPolicy(expectedDatabaseAccount).getMaxReplicaSetSize()); + assertThat(BridgeInternal.getQueryEngineConfiuration(databaseAccount)) + .isEqualTo(BridgeInternal.getQueryEngineConfiuration(expectedDatabaseAccount)); + } + + private HttpResponse getMockResponse(String databaseAccountJson) { + HttpResponse httpResponse = Mockito.mock(HttpResponse.class); + Mockito.doReturn(200).when(httpResponse).statusCode(); + Mockito.doReturn(Flux.just(ByteBufUtil.writeUtf8(ByteBufAllocator.DEFAULT, databaseAccountJson))) + .when(httpResponse).body(); + Mockito.doReturn(Mono.just(databaseAccountJson)) + .when(httpResponse).bodyAsString(StandardCharsets.UTF_8); + + Mockito.doReturn(new HttpHeaders()).when(httpResponse).headers(); + return httpResponse; + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/GatewayServiceConfiguratorReaderMock.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/GatewayServiceConfiguratorReaderMock.java new file mode 100644 index 0000000000000..9d97801ce09eb --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/GatewayServiceConfiguratorReaderMock.java @@ -0,0 +1,75 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.internal.DatabaseAccount; +import com.azure.data.cosmos.internal.ReplicationPolicy; +import org.mockito.Mockito; +import reactor.core.publisher.Mono; + +public class GatewayServiceConfiguratorReaderMock { + + public GatewayServiceConfigurationReader gatewayServiceConfigurationReader; + + public static GatewayServiceConfiguratorReaderMock from(ConsistencyLevel accountConsistencyLevel) { + return new GatewayServiceConfiguratorReaderMock(new ReplicationPolicy("{}"), new ReplicationPolicy("{}"), accountConsistencyLevel); + } + + public static GatewayServiceConfiguratorReaderMock from(ConsistencyLevel accountConsistencyLevel, + int systemMaxReplicaCount, + int systemMinReplicaCount, + int userMaxReplicaCount, + int userMinReplicaCount) { + ReplicationPolicy userRP = Mockito.mock(ReplicationPolicy.class); + Mockito.doReturn(userMaxReplicaCount).when(userRP).getMaxReplicaSetSize(); + Mockito.doReturn(userMinReplicaCount).when(userRP).getMinReplicaSetSize(); + + ReplicationPolicy systemRP = Mockito.mock(ReplicationPolicy.class); + Mockito.doReturn(systemMaxReplicaCount).when(systemRP).getMaxReplicaSetSize(); + Mockito.doReturn(systemMinReplicaCount).when(systemRP).getMinReplicaSetSize(); + + return new GatewayServiceConfiguratorReaderMock(userRP, systemRP, accountConsistencyLevel); + } + + public static GatewayServiceConfiguratorReaderMock from(ConsistencyLevel accountConsistencyLevel, int maxReplicaSize, int minReplicaCase) { + ReplicationPolicy rp = Mockito.mock(ReplicationPolicy.class); + Mockito.doReturn(maxReplicaSize).when(rp).getMaxReplicaSetSize(); + Mockito.doReturn(minReplicaCase).when(rp).getMinReplicaSetSize(); + + return new GatewayServiceConfiguratorReaderMock(rp, rp, accountConsistencyLevel); + } + + + public GatewayServiceConfiguratorReaderMock(ReplicationPolicy userReplicationPolicy, + ReplicationPolicy systemReplicationPolicy, + ConsistencyLevel defaultConsistencyLevel) { + this.gatewayServiceConfigurationReader = Mockito.mock(GatewayServiceConfigurationReader.class); + + Mockito.doReturn(Mono.just(Mockito.mock(DatabaseAccount.class))).when(this.gatewayServiceConfigurationReader).initializeReaderAsync(); + Mockito.doReturn(defaultConsistencyLevel).when(this.gatewayServiceConfigurationReader).getDefaultConsistencyLevel(); + Mockito.doReturn(systemReplicationPolicy).when(this.gatewayServiceConfigurationReader).getSystemReplicationPolicy(); + Mockito.doReturn(userReplicationPolicy).when(this.gatewayServiceConfigurationReader).getUserReplicationPolicy(); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/GlobalAddressResolverTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/GlobalAddressResolverTest.java new file mode 100644 index 0000000000000..a6a05b430d380 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/GlobalAddressResolverTest.java @@ -0,0 +1,192 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + + +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.GlobalEndpointManager; +import com.azure.data.cosmos.internal.IAuthorizationTokenProvider; +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.UserAgentContainer; +import com.azure.data.cosmos.internal.caches.RxCollectionCache; +import com.azure.data.cosmos.internal.caches.RxPartitionKeyRangeCache; +import com.azure.data.cosmos.internal.http.HttpClient; +import com.azure.data.cosmos.internal.routing.CollectionRoutingMap; +import com.azure.data.cosmos.internal.routing.PartitionKeyInternalHelper; +import com.azure.data.cosmos.internal.routing.PartitionKeyRangeIdentity; +import org.apache.commons.collections4.list.UnmodifiableList; +import org.mockito.Matchers; +import org.mockito.Mockito; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.assertj.core.api.Assertions.assertThat; + +; + +public class GlobalAddressResolverTest { + + private HttpClient httpClient; + private GlobalEndpointManager endpointManager; + private IAuthorizationTokenProvider authorizationTokenProvider; + private UserAgentContainer userAgentContainer; + private RxCollectionCache collectionCache; + private GatewayServiceConfigurationReader serviceConfigReader; + private RxPartitionKeyRangeCache routingMapProvider; + private ConnectionPolicy connectionPolicy; + private URL urlforRead1; + private URL urlforRead2; + private URL urlforRead3; + + private URL urlforWrite1; + private URL urlforWrite2; + private URL urlforWrite3; + + @BeforeClass(groups = "unit") + public void setup() throws Exception { + urlforRead1 = new URL("http://testRead1.com/"); + urlforRead2 = new URL("http://testRead2.com/"); + urlforRead3 = new URL("http://testRead3.com/"); + urlforWrite1 = new URL("http://testWrite1.com/"); + urlforWrite2 = new URL("http://testWrite2.com/"); + urlforWrite3 = new URL("http://testWrite3.com/"); + + connectionPolicy = new ConnectionPolicy(); + connectionPolicy.enableReadRequestsFallback(true); + httpClient = Mockito.mock(HttpClient.class); + endpointManager = Mockito.mock(GlobalEndpointManager.class); + + List readEndPointList = new ArrayList<>(); + readEndPointList.add(urlforRead1); + readEndPointList.add(urlforRead2); + readEndPointList.add(urlforRead3); + UnmodifiableList readList = new UnmodifiableList(readEndPointList); + + List writeEndPointList = new ArrayList<>(); + writeEndPointList.add(urlforWrite1); + writeEndPointList.add(urlforWrite2); + writeEndPointList.add(urlforWrite3); + UnmodifiableList writeList = new UnmodifiableList(writeEndPointList); + + Mockito.when(endpointManager.getReadEndpoints()).thenReturn(readList); + Mockito.when(endpointManager.getWriteEndpoints()).thenReturn(writeList); + + authorizationTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + + DocumentCollection collectionDefinition = new DocumentCollection(); + collectionDefinition.id(UUID.randomUUID().toString()); + collectionCache = Mockito.mock(RxCollectionCache.class); + Mockito.when(collectionCache.resolveCollectionAsync(Matchers.any(RxDocumentServiceRequest.class))).thenReturn(Mono.just(collectionDefinition)); + routingMapProvider = Mockito.mock(RxPartitionKeyRangeCache.class); + userAgentContainer = Mockito.mock(UserAgentContainer.class); + serviceConfigReader = Mockito.mock(GatewayServiceConfigurationReader.class); + + } + + @Test(groups = "unit") + public void resolveAsync() throws Exception { + + GlobalAddressResolver globalAddressResolver = new GlobalAddressResolver(httpClient, endpointManager, Protocol.HTTPS, authorizationTokenProvider, collectionCache, routingMapProvider, + userAgentContainer, + serviceConfigReader, connectionPolicy); + RxDocumentServiceRequest request; + request = RxDocumentServiceRequest.createFromName( + OperationType.Read, + "dbs/db/colls/coll/docs/doc1", + ResourceType.Document); + + Set urlsBeforeResolve = globalAddressResolver.addressCacheByEndpoint.keySet(); + assertThat(urlsBeforeResolve.size()).isEqualTo(5); + assertThat(urlsBeforeResolve.contains(urlforRead3)).isFalse();//Last read will be removed from addressCacheByEndpoint after 5 endpoints + assertThat(urlsBeforeResolve.contains(urlforRead2)).isTrue(); + + URL testUrl = new URL("http://Test.com/"); + Mockito.when(endpointManager.resolveServiceEndpoint(Matchers.any(RxDocumentServiceRequest.class))).thenReturn(testUrl); + globalAddressResolver.resolveAsync(request, true); + Set urlsAfterResolve = globalAddressResolver.addressCacheByEndpoint.keySet(); + assertThat(urlsAfterResolve.size()).isEqualTo(5); + assertThat(urlsAfterResolve.contains(urlforRead2)).isFalse();//Last read will be removed from addressCacheByEndpoint after 5 endpoints + assertThat(urlsBeforeResolve.contains(testUrl)).isTrue();//New endpoint will be added in addressCacheByEndpoint + } + + @Test(groups = "unit") + public void openAsync() throws Exception { + GlobalAddressResolver globalAddressResolver = new GlobalAddressResolver(httpClient, endpointManager, Protocol.HTTPS, authorizationTokenProvider, collectionCache, routingMapProvider, + userAgentContainer, + serviceConfigReader, connectionPolicy); + Map addressCacheByEndpoint = Mockito.spy(globalAddressResolver.addressCacheByEndpoint); + GlobalAddressResolver.EndpointCache endpointCache = new GlobalAddressResolver.EndpointCache(); + GatewayAddressCache gatewayAddressCache = Mockito.mock(GatewayAddressCache.class); + AtomicInteger numberOfTaskCompleted = new AtomicInteger(0); + endpointCache.addressCache = gatewayAddressCache; + globalAddressResolver.addressCacheByEndpoint.clear(); + globalAddressResolver.addressCacheByEndpoint.put(urlforRead1, endpointCache); + globalAddressResolver.addressCacheByEndpoint.put(urlforRead2, endpointCache); + + + DocumentCollection documentCollection = new DocumentCollection(); + documentCollection.id("TestColl"); + documentCollection.resourceId("IXYFAOHEBPM="); + CollectionRoutingMap collectionRoutingMap = Mockito.mock(CollectionRoutingMap.class); + PartitionKeyRange range = new PartitionKeyRange("0", PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, + PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey); + List partitionKeyRanges = new ArrayList<>(); + partitionKeyRanges.add(range); + Mockito.when(collectionRoutingMap.getOrderedPartitionKeyRanges()).thenReturn(partitionKeyRanges); + Mono collectionRoutingMapSingle = Mono.just(collectionRoutingMap); + Mockito.when(routingMapProvider.tryLookupAsync(Mockito.any(), Mockito.any(), Mockito.any())).thenReturn(collectionRoutingMapSingle); + + List ranges = new ArrayList<>(); + for (PartitionKeyRange partitionKeyRange : (List) collectionRoutingMap.getOrderedPartitionKeyRanges()) { + PartitionKeyRangeIdentity partitionKeyRangeIdentity = new PartitionKeyRangeIdentity(documentCollection.resourceId(), partitionKeyRange.id()); + ranges.add(partitionKeyRangeIdentity); + } + + Mono completable = Mono.fromCallable(new Callable() { + @Override + public Void call() throws Exception { + numberOfTaskCompleted.getAndIncrement(); + return null; + } + }); + Mockito.when(gatewayAddressCache.openAsync(documentCollection, ranges)).thenReturn(completable); + + globalAddressResolver.openAsync(documentCollection).block(); + assertThat(numberOfTaskCompleted.get()).isEqualTo(2); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/GoneAndRetryWithRetryPolicyTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/GoneAndRetryWithRetryPolicyTest.java new file mode 100644 index 0000000000000..4601b7df252e1 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/GoneAndRetryWithRetryPolicyTest.java @@ -0,0 +1,161 @@ +/* + * + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.BadRequestException; +import com.azure.data.cosmos.GoneException; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.IRetryPolicy; +import com.azure.data.cosmos.InvalidPartitionException; +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.PartitionIsMigratingException; +import com.azure.data.cosmos.PartitionKeyRangeIsSplittingException; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.directconnectivity.GoneAndRetryWithRetryPolicy; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * This test file will cover various exception on GoneAndRetryWithRetryPolicy. + * + */ +public class GoneAndRetryWithRetryPolicyTest { + protected static final int TIMEOUT = 60000; + + /** + * Retry with GoneException , retried 4 times and verified the returned + * shouldRetryResult. ShouldRetryResult + */ + @Test(groups = { "unit" }, timeOut = TIMEOUT) + public void shouldRetryWithGoneException() { + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Document); + GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30); + Mono singleShouldRetry = goneAndRetryWithRetryPolicy + .shouldRetry(new GoneException()); + IRetryPolicy.ShouldRetryResult shouldRetryResult = singleShouldRetry.block(); + assertThat(shouldRetryResult.shouldRetry).isTrue(); + assertThat(shouldRetryResult.policyArg.getValue0()).isTrue(); + assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(1); + assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(0); + + singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(new GoneException()); + shouldRetryResult = singleShouldRetry.block(); + assertThat(shouldRetryResult.shouldRetry).isTrue(); + assertThat(shouldRetryResult.policyArg.getValue0()).isTrue(); + assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(2); + assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(1); + + singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(new GoneException()); + shouldRetryResult = singleShouldRetry.block(); + assertThat(shouldRetryResult.shouldRetry).isTrue(); + assertThat(shouldRetryResult.policyArg.getValue0()).isTrue(); + assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(3); + assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(2); + + singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(new GoneException()); + shouldRetryResult = singleShouldRetry.block(); + assertThat(shouldRetryResult.shouldRetry).isTrue(); + assertThat(shouldRetryResult.policyArg.getValue0()).isTrue(); + assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(4); + assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(4); + + } + + /** + * Retry with PartitionIsMigratingException + */ + @Test(groups = { "unit" }, timeOut = TIMEOUT) + public void shouldRetryWithPartitionIsMigratingException() { + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Document); + GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30); + Mono singleShouldRetry = goneAndRetryWithRetryPolicy + .shouldRetry(new PartitionIsMigratingException()); + IRetryPolicy.ShouldRetryResult shouldRetryResult = singleShouldRetry.block(); + assertThat(shouldRetryResult.shouldRetry).isTrue(); + assertThat(request.forceCollectionRoutingMapRefresh).isTrue(); + assertThat(shouldRetryResult.policyArg.getValue0()).isTrue(); + } + + /** + * Retry with InvalidPartitionException + */ + @Test(groups = { "unit" }, timeOut = TIMEOUT) + public void shouldRetryWithInvalidPartitionException() { + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Document); + GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30); + Mono singleShouldRetry = goneAndRetryWithRetryPolicy + .shouldRetry(new InvalidPartitionException()); + IRetryPolicy.ShouldRetryResult shouldRetryResult = singleShouldRetry.block(); + assertThat(shouldRetryResult.shouldRetry).isTrue(); + assertThat(request.requestContext.quorumSelectedLSN).isEqualTo(-1); + assertThat(request.requestContext.resolvedPartitionKeyRange).isNull(); + assertThat(request.requestContext.globalCommittedSelectedLSN).isEqualTo(-1); + assertThat(shouldRetryResult.policyArg.getValue0()).isFalse(); + + goneAndRetryWithRetryPolicy.shouldRetry(new InvalidPartitionException()); + // It will retry max till 3 attempts + shouldRetryResult = goneAndRetryWithRetryPolicy.shouldRetry(new InvalidPartitionException()).block(); + assertThat(shouldRetryResult.shouldRetry).isFalse(); + CosmosClientException clientException = (CosmosClientException) shouldRetryResult.exception; + assertThat(clientException.statusCode()).isEqualTo(HttpConstants.StatusCodes.SERVICE_UNAVAILABLE); + + } + + /** + * Retry with PartitionKeyRangeIsSplittingException + */ + @Test(groups = { "unit" }, timeOut = TIMEOUT) + public void shouldRetryWithPartitionKeyRangeIsSplittingException() { + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Document); + GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30); + Mono singleShouldRetry = goneAndRetryWithRetryPolicy + .shouldRetry(new PartitionKeyRangeIsSplittingException()); + IRetryPolicy.ShouldRetryResult shouldRetryResult = singleShouldRetry.block(); + assertThat(shouldRetryResult.shouldRetry).isTrue(); + assertThat(request.forcePartitionKeyRangeRefresh).isTrue(); + assertThat(request.requestContext.resolvedPartitionKeyRange).isNull(); + assertThat(request.requestContext.quorumSelectedLSN).isEqualTo(-1); + assertThat(shouldRetryResult.policyArg.getValue0()).isFalse(); + + } + + /** + * No retry on bad request exception + */ + @Test(groups = { "unit" }, timeOut = TIMEOUT) + public void shouldRetryWithGenericException() { + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Document); + GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30); + Mono singleShouldRetry = goneAndRetryWithRetryPolicy + .shouldRetry(new BadRequestException()); + IRetryPolicy.ShouldRetryResult shouldRetryResult = singleShouldRetry.block(); + assertThat(shouldRetryResult.shouldRetry).isFalse(); + } + +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/HttpClientMockWrapper.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/HttpClientMockWrapper.java new file mode 100644 index 0000000000000..2f19b24068f02 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/HttpClientMockWrapper.java @@ -0,0 +1,181 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.directconnectivity.WFConstants; +import com.azure.data.cosmos.internal.http.HttpClient; +import com.azure.data.cosmos.internal.http.HttpHeaders; +import com.azure.data.cosmos.internal.http.HttpRequest; +import com.azure.data.cosmos.internal.http.HttpResponse; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.ByteBufUtil; +import org.mockito.Mockito; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +; + +public class HttpClientMockWrapper { + public static HttpClientBehaviourBuilder httpClientBehaviourBuilder() { + return new HttpClientBehaviourBuilder(); + } + + public static class HttpClientBehaviourBuilder { + private int status; + private String content; + private HttpHeaders httpHeaders = new HttpHeaders(); + private Exception networkFailure; + + public HttpClientBehaviourBuilder withNetworkFailure(Exception networkFailure) { + this.networkFailure = networkFailure; + return this; + } + + public HttpClientBehaviourBuilder withStatus(int status) { + this.status = status; + return this; + } + + public HttpClientBehaviourBuilder withHeaders(HttpHeaders httpHeaders) { + this.httpHeaders = httpHeaders; + return this; + } + + public HttpClientBehaviourBuilder withHeaders(String... pairs) { + if (pairs.length % 2 != 0) { + throw new IllegalArgumentException(); + } + + for(int i = 0; i < pairs.length/ 2; i++) { + this.httpHeaders.set(pairs[2*i], pairs[2*i +1]); + } + + return this; + } + + public HttpClientBehaviourBuilder withContent(String content) { + this.content = content; + return this; + } + + public HttpClientBehaviourBuilder withHeaderLSN(long lsn) { + this.httpHeaders.set(WFConstants.BackendHeaders.LSN, Long.toString(lsn)); + return this; + } + + public HttpClientBehaviourBuilder withHeaderPartitionKeyRangeId(String partitionKeyRangeId) { + this.httpHeaders.set(WFConstants.BackendHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId); + return this; + } + + public HttpClientBehaviourBuilder withHeaderSubStatusCode(int subStatusCode) { + this.httpHeaders.set(WFConstants.BackendHeaders.SUB_STATUS, Integer.toString(subStatusCode)); + return this; + } + + public HttpResponse asHttpResponse() { + if (this.networkFailure != null) { + return null; + } + + HttpResponse resp = Mockito.mock(HttpResponse.class); + Mockito.doReturn(this.status).when(resp).statusCode(); + Mockito.doReturn(Flux.just(ByteBufUtil.writeUtf8(ByteBufAllocator.DEFAULT, this.content))).when(resp).body(); + Mockito.doReturn(Mono.just(this.content)).when(resp).bodyAsString(StandardCharsets.UTF_8); + Mockito.doReturn(this.httpHeaders).when(resp).headers(); + return resp; + } + + public Exception asNetworkFailure() { + return this.networkFailure; + } + + @Override + public String toString() { + return "HttpClientBehaviourBuilder{" + + "status=" + status + + ", content='" + content + '\'' + + ", httpHeaders=" + httpHeaders + + ", networkFailure=" + networkFailure + + '}'; + } + } + + private final HttpClient httpClient; + private final List requests = Collections.synchronizedList(new ArrayList<>()); + + public HttpClientMockWrapper(long responseAfterMillis, HttpResponse httpResponse) { + this(responseAfterMillis, httpResponse, null); + } + + private static Mono httpResponseOrException(HttpResponse httpResponse, Exception e) { + assert ((httpResponse != null && e == null) || (httpResponse == null && e != null)); + return httpResponse != null ? Mono.just(httpResponse) : Mono.error(e); + } + + public HttpClientMockWrapper(long responseAfterMillis, Exception e) { + this(responseAfterMillis, null, e); + } + + public HttpClientMockWrapper(HttpResponse httpResponse) { + this(0, httpResponse); + } + + private HttpClientMockWrapper(long responseAfterMillis, final HttpResponse httpResponse, final Exception e) { + httpClient = Mockito.mock(HttpClient.class); + assert httpResponse == null || e == null; + + Mockito.doAnswer(invocationOnMock -> { + HttpRequest httpRequest = invocationOnMock.getArgumentAt(0, HttpRequest.class); + requests.add(httpRequest); + if (responseAfterMillis <= 0) { + return httpResponseOrException(httpResponse, e); + } else { + return Mono.delay(Duration.ofMillis(responseAfterMillis)).flatMap(t -> httpResponseOrException(httpResponse, e)); + } + }).when(httpClient).send(Mockito.any(HttpRequest.class)); + } + + public HttpClientMockWrapper(HttpClientBehaviourBuilder builder) { + this(0, builder.asHttpResponse(), builder.asNetworkFailure()); + } + + public HttpClientMockWrapper(Exception e) { + this(0, e); + } + + public HttpClient getClient() { + return httpClient; + } + + public List getCapturedInvocation() { + return requests; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/HttpTransportClientTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/HttpTransportClientTest.java new file mode 100644 index 0000000000000..517a3fca3fb41 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/HttpTransportClientTest.java @@ -0,0 +1,653 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.BadRequestException; +import com.azure.data.cosmos.ConflictException; +import com.azure.data.cosmos.ForbiddenException; +import com.azure.data.cosmos.GoneException; +import com.azure.data.cosmos.LockedException; +import com.azure.data.cosmos.MethodNotAllowedException; +import com.azure.data.cosmos.PartitionKeyRangeGoneException; +import com.azure.data.cosmos.PreconditionFailedException; +import com.azure.data.cosmos.RequestEntityTooLargeException; +import com.azure.data.cosmos.RequestRateTooLargeException; +import com.azure.data.cosmos.RequestTimeoutException; +import com.azure.data.cosmos.RetryWithException; +import com.azure.data.cosmos.ServiceUnavailableException; +import com.azure.data.cosmos.UnauthorizedException; +import com.azure.data.cosmos.internal.directconnectivity.HttpTransportClient; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.Configs; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.InternalServerErrorException; +import com.azure.data.cosmos.InvalidPartitionException; +import com.azure.data.cosmos.NotFoundException; +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.PartitionIsMigratingException; +import com.azure.data.cosmos.PartitionKeyRangeIsSplittingException; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.UserAgentContainer; +import com.azure.data.cosmos.internal.directconnectivity.HttpTransportClient; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.http.HttpClient; +import com.azure.data.cosmos.internal.http.HttpHeaders; +import com.azure.data.cosmos.internal.http.HttpRequest; +import com.azure.data.cosmos.internal.http.HttpResponse; +import com.azure.data.cosmos.internal.FailureValidator; +import io.netty.channel.ConnectTimeoutException; +import io.reactivex.subscribers.TestSubscriber; +import org.assertj.core.api.Assertions; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.net.URI; +import java.net.UnknownHostException; +import java.util.HashMap; +import java.util.concurrent.TimeUnit; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; + +; + +/** + * Tests validating {@link HttpTransportClient} + */ +public class HttpTransportClientTest { + private final static Configs configs = new Configs(); + private final static int TIMEOUT = 1000; + + private final URI physicalAddress = URI.create( + "https://by4prdddc03-docdb-1.documents.azure.com:9056" + + "/apps/b76af614-5421-4318-4c9e-33056ff5a2bf/services/e7c8d429-c379-40c9-9486-65b89b70be2f" + + "/partitions/5f5b8766-3bdf-4713-b85a-a55ac2ccd62c/replicas/131828696163674404p/"); + + private final long lsn = 5; + private final String partitionKeyRangeId = "3"; + + @Test(groups = "unit") + public void getResourceFeedUri_Document() throws Exception { + RxDocumentServiceRequest req = RxDocumentServiceRequest.createFromName( + OperationType.Create, "dbs/db/colls/col", ResourceType.Document); + URI res = HttpTransportClient.getResourceFeedUri(req.getResourceType(), physicalAddress, req); + assertThat(res.toString()).isEqualTo(physicalAddress.toString() + HttpUtils.urlEncode("dbs/db/colls/col/docs")); + } + + @Test(groups = "unit") + public void getResourceFeedUri_Attachment() throws Exception { + RxDocumentServiceRequest req = RxDocumentServiceRequest.createFromName( + OperationType.Create, "dbs/db/colls/col", ResourceType.Attachment); + URI res = HttpTransportClient.getResourceFeedUri(req.getResourceType(), physicalAddress, req); + assertThat(res.toString()).isEqualTo(physicalAddress.toString() + HttpUtils.urlEncode("dbs/db/colls/col/attachments")); + } + + @Test(groups = "unit") + public void getResourceFeedUri_Collection() throws Exception { + RxDocumentServiceRequest req = RxDocumentServiceRequest.createFromName( + OperationType.Create, "dbs/db", ResourceType.DocumentCollection); + URI res = HttpTransportClient.getResourceFeedUri(req.getResourceType(), physicalAddress, req); + assertThat(res.toString()).isEqualTo(physicalAddress.toString() + HttpUtils.urlEncode("dbs/db/colls")); + } + + @Test(groups = "unit") + public void getResourceFeedUri_Conflict() throws Exception { + RxDocumentServiceRequest req = RxDocumentServiceRequest.createFromName( + OperationType.Create, "/dbs/db/colls/col", ResourceType.Conflict); + URI res = HttpTransportClient.getResourceFeedUri(req.getResourceType(), physicalAddress, req); + assertThat(res.toString()).isEqualTo(physicalAddress.toString() + HttpUtils.urlEncode("dbs/db/colls/col/conflicts")); + } + + @Test(groups = "unit") + public void getResourceFeedUri_Database() throws Exception { + RxDocumentServiceRequest req = RxDocumentServiceRequest.createFromName( + OperationType.Create, "/", ResourceType.Database); + URI res = HttpTransportClient.getResourceFeedUri(req.getResourceType(), physicalAddress, req); + assertThat(res.toString()).isEqualTo(physicalAddress.toString() + "dbs"); + } + + public static HttpTransportClient getHttpTransportClientUnderTest(int requestTimeout, + UserAgentContainer userAgent, + HttpClient httpClient) { + class HttpTransportClientUnderTest extends HttpTransportClient { + public HttpTransportClientUnderTest(int requestTimeout, UserAgentContainer userAgent) { + super(configs, requestTimeout, userAgent); + } + + @Override + HttpClient createHttpClient(int requestTimeout) { + return httpClient; + } + } + + return new HttpTransportClientUnderTest(requestTimeout, userAgent); + } + + @Test(groups = "unit") + public void validateDefaultHeaders() { + HttpResponse mockedResponse = new HttpClientMockWrapper.HttpClientBehaviourBuilder() + .withContent("").withStatus(200) + .withHeaders(new HttpHeaders()) + .asHttpResponse(); + HttpClientMockWrapper httpClientMockWrapper = new HttpClientMockWrapper(mockedResponse); + + UserAgentContainer userAgentContainer = new UserAgentContainer(); + userAgentContainer.setSuffix("i am suffix"); + + HttpTransportClient transportClient = getHttpTransportClientUnderTest(100, + userAgentContainer, + httpClientMockWrapper.getClient()); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName( + OperationType.Create, "dbs/db/colls/col", ResourceType.Document); + request.setContentBytes(new byte[0]); + + transportClient.invokeStoreAsync(physicalAddress, request).block(); + + assertThat(httpClientMockWrapper.getCapturedInvocation()).asList().hasSize(1); + HttpRequest httpRequest = httpClientMockWrapper.getCapturedInvocation().get(0); + + assertThat(httpRequest.headers().value(HttpConstants.HttpHeaders.USER_AGENT)).endsWith("i am suffix"); + assertThat(httpRequest.headers().value(HttpConstants.HttpHeaders.CACHE_CONTROL)).isEqualTo("no-cache"); + assertThat(httpRequest.headers().value(HttpConstants.HttpHeaders.ACCEPT)).isEqualTo("application/json"); + assertThat(httpRequest.headers().value(HttpConstants.HttpHeaders.VERSION)).isEqualTo(HttpConstants.Versions.CURRENT_VERSION); + + } + + @DataProvider(name = "fromMockedHttpResponseToExpectedDocumentClientException") + public Object[][] fromMockedHttpResponseToExpectedDocumentClientException() { + return new Object[][]{ + { + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withContent("").withStatus(401) + .withHeaderLSN(lsn) + .withHeaderPartitionKeyRangeId(partitionKeyRangeId), + + FailureValidator.builder() + .instanceOf(UnauthorizedException.class) + .resourceAddress("dbs/db/colls/col") + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + }, + { + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withContent("").withStatus(403) + .withHeaderLSN(lsn) + .withHeaderPartitionKeyRangeId(partitionKeyRangeId), + + FailureValidator.builder() + .instanceOf(ForbiddenException.class) + .resourceAddress("dbs/db/colls/col") + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + }, + { + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withContent("").withStatus(404) + .withHeaderLSN(lsn) + .withHeaderPartitionKeyRangeId(partitionKeyRangeId), + + FailureValidator.builder() + .instanceOf(NotFoundException.class) + .resourceAddress("dbs/db/colls/col") + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + + }, + { + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withContent("").withStatus(404) + .withHeaderLSN(lsn) + .withHeaderPartitionKeyRangeId(partitionKeyRangeId) + .withHeaders(HttpConstants.HttpHeaders.CONTENT_TYPE, "text/html"), + + FailureValidator.builder() + .instanceOf(GoneException.class) + .resourceAddress("dbs/db/colls/col") + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + + }, + { + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withContent("").withStatus(400) + .withHeaderLSN(lsn) + .withHeaderPartitionKeyRangeId(partitionKeyRangeId), + + FailureValidator.builder() + .instanceOf(BadRequestException.class) + .resourceAddress("dbs/db/colls/col") + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + }, + { + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withContent("").withStatus(405) + .withHeaderLSN(lsn) + .withHeaderPartitionKeyRangeId(partitionKeyRangeId), + + FailureValidator.builder() + .instanceOf(MethodNotAllowedException.class) + .resourceAddress("dbs/db/colls/col") + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + }, + { + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withContent("").withStatus(409) + .withHeaderLSN(lsn) + .withHeaderPartitionKeyRangeId(partitionKeyRangeId), + + FailureValidator.builder() + .instanceOf(ConflictException.class) + .resourceAddress("dbs/db/colls/col") + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + }, + { + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withContent("").withStatus(412) + .withHeaderLSN(lsn) + .withHeaderPartitionKeyRangeId(partitionKeyRangeId), + + FailureValidator.builder() + .instanceOf(PreconditionFailedException.class) + .resourceAddress("dbs/db/colls/col") + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + }, + { + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withContent("").withStatus(412) + .withHeaderLSN(lsn) + .withHeaderPartitionKeyRangeId(partitionKeyRangeId), + + FailureValidator.builder() + .instanceOf(PreconditionFailedException.class) + .resourceAddress("dbs/db/colls/col") + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + }, + { + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withContent("").withStatus(413) + .withHeaderLSN(lsn) + .withHeaderPartitionKeyRangeId(partitionKeyRangeId), + + FailureValidator.builder() + .instanceOf(RequestEntityTooLargeException.class) + .resourceAddress("dbs/db/colls/col") + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + }, + { + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withContent("").withStatus(423) + .withHeaderLSN(lsn) + .withHeaderPartitionKeyRangeId(partitionKeyRangeId), + + FailureValidator.builder() + .instanceOf(LockedException.class) + .resourceAddress("dbs/db/colls/col") + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + }, + { + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withContent("").withStatus(503) + .withHeaderLSN(lsn) + .withHeaderPartitionKeyRangeId(partitionKeyRangeId), + + FailureValidator.builder() + .instanceOf(ServiceUnavailableException.class) + .resourceAddress("dbs/db/colls/col") + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + }, + { + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withContent("").withStatus(408) + .withHeaderLSN(lsn) + .withHeaderPartitionKeyRangeId(partitionKeyRangeId), + + FailureValidator.builder() + .instanceOf(RequestTimeoutException.class) + .resourceAddress("dbs/db/colls/col") + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + }, + { + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withContent("").withStatus(449) + .withHeaderLSN(lsn) + .withHeaderPartitionKeyRangeId(partitionKeyRangeId), + + FailureValidator.builder() + .instanceOf(RetryWithException.class) + .resourceAddress("dbs/db/colls/col") + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + }, + { + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withContent("").withStatus(429) + .withHeaderLSN(lsn) + .withHeaderPartitionKeyRangeId(partitionKeyRangeId), + + FailureValidator.builder() + .instanceOf(RequestRateTooLargeException.class) + .resourceAddress("dbs/db/colls/col") + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + }, + { + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withContent("").withStatus(500) + .withHeaderLSN(lsn) + .withHeaderPartitionKeyRangeId(partitionKeyRangeId), + + FailureValidator.builder() + .instanceOf(InternalServerErrorException.class) + .resourceAddress("dbs/db/colls/col") + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + }, + { + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withContent("").withStatus(410) + .withHeaderLSN(lsn) + .withHeaderPartitionKeyRangeId(partitionKeyRangeId) + .withHeaderSubStatusCode(HttpConstants.SubStatusCodes.NAME_CACHE_IS_STALE), + + FailureValidator.builder() + .instanceOf(InvalidPartitionException.class) + .resourceAddress("dbs/db/colls/col") + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + }, + { + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withContent("").withStatus(410) + .withHeaderLSN(lsn) + .withHeaderPartitionKeyRangeId(partitionKeyRangeId) + .withHeaderSubStatusCode(HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE), + + FailureValidator.builder() + .instanceOf(PartitionKeyRangeGoneException.class) + .resourceAddress("dbs/db/colls/col") + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + }, + { + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withContent("").withStatus(410) + .withHeaderLSN(lsn) + .withHeaderPartitionKeyRangeId(partitionKeyRangeId) + .withHeaderSubStatusCode(HttpConstants.SubStatusCodes.COMPLETING_SPLIT), + + FailureValidator.builder() + .instanceOf(PartitionKeyRangeIsSplittingException.class) + .resourceAddress("dbs/db/colls/col") + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + }, + { + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withContent("").withStatus(410) + .withHeaderLSN(lsn) + .withHeaderPartitionKeyRangeId(partitionKeyRangeId) + .withHeaderSubStatusCode(HttpConstants.SubStatusCodes.COMPLETING_PARTITION_MIGRATION), + + FailureValidator.builder() + .instanceOf(PartitionIsMigratingException.class) + .resourceAddress("dbs/db/colls/col") + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + }, + { + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withContent("").withStatus(410) + .withHeaderLSN(lsn) + .withHeaderPartitionKeyRangeId(partitionKeyRangeId) + .withHeaderSubStatusCode(0), + + FailureValidator.builder() + .instanceOf(GoneException.class) + .resourceAddress("dbs/db/colls/col") + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + }, + }; + } + + /** + * Validates the error handling behaviour of HttpTransportClient for https status codes >= 400 + * @param mockedResponseBuilder + * @param failureValidatorBuilder + */ + @Test(groups = "unit", dataProvider = "fromMockedHttpResponseToExpectedDocumentClientException") + public void failuresWithHttpStatusCodes(HttpClientMockWrapper.HttpClientBehaviourBuilder mockedResponseBuilder, + FailureValidator.Builder failureValidatorBuilder) { + HttpClientMockWrapper httpClientMockWrapper = new HttpClientMockWrapper(mockedResponseBuilder); + UserAgentContainer userAgentContainer = new UserAgentContainer(); + HttpTransportClient transportClient = getHttpTransportClientUnderTest( + 100, + userAgentContainer, + httpClientMockWrapper.getClient()); + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName( + OperationType.Create, "dbs/db/colls/col", ResourceType.Document); + request.setContentBytes(new byte[0]); + + Mono storeResp = transportClient.invokeStoreAsync( + physicalAddress, + request); + + validateFailure(storeResp, failureValidatorBuilder.build()); + } + + @DataProvider(name = "fromMockedNetworkFailureToExpectedDocumentClientException") + public Object[][] fromMockedNetworkFailureToExpectedDocumentClientException() { + return new Object[][]{ + // create request, retriable network exception + { + createRequestFromName( + OperationType.Create, "dbs/db/colls/col", ResourceType.Document), + + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withNetworkFailure(new UnknownHostException()), + + FailureValidator.builder() + .instanceOf(GoneException.class) + }, + + // create request, retriable network exception + { + createRequestFromName( + OperationType.Create, "dbs/db/colls/col", ResourceType.Document), + + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withNetworkFailure(new UnknownHostException()), + + FailureValidator.builder() + .instanceOf(GoneException.class) + }, + + // create request, retriable network exception + { + createRequestFromName( + OperationType.Create, "dbs/db/colls/col", ResourceType.Document), + + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withNetworkFailure(new ConnectTimeoutException()), + + FailureValidator.builder() + .instanceOf(GoneException.class) + }, + + // read request, retriable network exception + { + createRequestFromName( + OperationType.Read, "dbs/db/colls/col", ResourceType.Document), + + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withNetworkFailure(new ConnectTimeoutException()), + + FailureValidator.builder() + .instanceOf(GoneException.class) + }, + + // create request, non-retriable network exception + { + createRequestFromName( + OperationType.Create, "dbs/db/colls/col", ResourceType.Document), + + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withNetworkFailure(new RuntimeException()), + + FailureValidator.builder() + .instanceOf(ServiceUnavailableException.class) + }, + + // read request, non-retriable network exception + { + createRequestFromName( + OperationType.Read, "dbs/db/colls/col", ResourceType.Document), + + HttpClientMockWrapper. + httpClientBehaviourBuilder() + .withNetworkFailure(new RuntimeException()), + + FailureValidator.builder() + .instanceOf(GoneException.class) + }, + }; + } + + /** + * Validates the error handling behaviour of HttpTransportClient for network failures from which http status codes + * cannot be derived. For example Socket Connection failure. + * @param request + * @param mockedResponseBuilder + * @param failureValidatorBuilder + */ + @Test(groups = "unit", dataProvider = "fromMockedNetworkFailureToExpectedDocumentClientException") + public void networkFailures(RxDocumentServiceRequest request, + HttpClientMockWrapper.HttpClientBehaviourBuilder mockedResponseBuilder, + FailureValidator.Builder failureValidatorBuilder) { + HttpClientMockWrapper httpClientMockWrapper = new HttpClientMockWrapper(mockedResponseBuilder); + UserAgentContainer userAgentContainer = new UserAgentContainer(); + HttpTransportClient transportClient = getHttpTransportClientUnderTest( + 100, + userAgentContainer, + httpClientMockWrapper.getClient()); + + Mono storeResp = transportClient.invokeStoreAsync( + physicalAddress, + request); + + validateFailure(storeResp, failureValidatorBuilder.build()); + } + + private static RxDocumentServiceRequest createRequestFromName( + OperationType operationType, + String resourceFullName, + ResourceType resourceType) { + return createRequestFromName(operationType, resourceFullName, resourceType, new byte[0]); + } + + private static RxDocumentServiceRequest createRequestFromName( + OperationType operationType, + String resourceFullName, + ResourceType resourceType, + byte[] content) { + RxDocumentServiceRequest req = RxDocumentServiceRequest.create( + operationType, + resourceType, + resourceFullName, + new HashMap<>()); + + req.setContentBytes(content); + return req; + } + + public void validateSuccess(Mono single, StoreResponseValidator validator) { + validateSuccess(single, validator, TIMEOUT); + } + + public static void validateSuccess(Mono single, + StoreResponseValidator validator, long timeout) { + + TestSubscriber testSubscriber = new TestSubscriber<>(); + single.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + testSubscriber.assertValueCount(1); + validator.validate(testSubscriber.values().get(0)); + } + + public void validateFailure(Mono single, + FailureValidator validator) { + validateFailure(single, validator, TIMEOUT); + } + + public static void validateFailure(Mono single, + FailureValidator validator, long timeout) { + + TestSubscriber testSubscriber = new TestSubscriber<>(); + single.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNotComplete(); + testSubscriber.assertTerminated(); + Assertions.assertThat(testSubscriber.errorCount()).isEqualTo(1); + validator.validate(testSubscriber.errors().get(0)); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/HttpUtilsTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/HttpUtilsTest.java new file mode 100644 index 0000000000000..9a3e804c3f799 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/HttpUtilsTest.java @@ -0,0 +1,64 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2019 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.directconnectivity.HttpUtils; +import com.azure.data.cosmos.internal.http.HttpHeaders; +import com.azure.data.cosmos.internal.http.HttpResponse; +import org.mockito.Mockito; +import org.testng.annotations.Test; + +import java.util.List; +import java.util.Map.Entry; +import java.util.Set; + +import static org.assertj.core.api.Assertions.assertThat; + +public class HttpUtilsTest { + + private static final String OWNER_FULL_NAME_VALUE = "dbs/RxJava.SDKTest.SharedDatabase_20190304T121302_iZc/colls/+%20-_,:.%7C~b2d67001-9000-454e-a140-abceb1756c48%20+-_,:.%7C~"; + + @Test(groups = { "unit" }) + public void verifyConversionOfHttpResponseHeadersToMap() { + HttpHeaders headersMap = new HttpHeaders(1); + headersMap.set(HttpConstants.HttpHeaders.OWNER_FULL_NAME, OWNER_FULL_NAME_VALUE); + + HttpResponse httpResponse = Mockito.mock(HttpResponse.class); + Mockito.when(httpResponse.headers()).thenReturn(headersMap); + HttpHeaders httpResponseHeaders = httpResponse.headers(); + Set> resultHeadersSet = HttpUtils.asMap(httpResponseHeaders).entrySet(); + + assertThat(resultHeadersSet.size()).isEqualTo(1); + Entry entry = resultHeadersSet.iterator().next(); + assertThat(entry.getKey()).isEqualTo(HttpConstants.HttpHeaders.OWNER_FULL_NAME); + assertThat(entry.getValue()).isEqualTo(HttpUtils.urlDecode(OWNER_FULL_NAME_VALUE)); + + List> resultHeadersList = HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()); + assertThat(resultHeadersList.size()).isEqualTo(1); + entry = resultHeadersSet.iterator().next(); + assertThat(entry.getKey()).isEqualTo(HttpConstants.HttpHeaders.OWNER_FULL_NAME); + assertThat(entry.getValue()).isEqualTo(HttpUtils.urlDecode(OWNER_FULL_NAME_VALUE)); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/MultiStoreResultValidator.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/MultiStoreResultValidator.java new file mode 100644 index 0000000000000..fe1652ca17eaf --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/MultiStoreResultValidator.java @@ -0,0 +1,176 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.internal.FailureValidator; +import com.google.common.base.Predicates; +import org.apache.commons.lang3.mutable.MutableObject; +import org.assertj.core.description.Description; +import org.assertj.core.description.TextDescription; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.BiFunction; +import java.util.function.Predicate; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.AssertionsForClassTypes.fail; + +/** + * this is meant to be used when there reading multiple replicas for the same thing + */ +public interface MultiStoreResultValidator { + + static Builder create() { + return new Builder(); + } + + void validate(List storeResults); + + class Builder { + private List validators = new ArrayList<>(); + + public MultiStoreResultValidator build() { + return new MultiStoreResultValidator() { + + @SuppressWarnings({"rawtypes", "unchecked"}) + @Override + public void validate(List storeResults) { + for (MultiStoreResultValidator validator : validators) { + validator.validate(storeResults); + } + } + }; + } + + public Builder validateEachWith(StoreResultValidator storeResultValidator) { + validators.add(new MultiStoreResultValidator() { + + @Override + public void validate(List storeResults) { + for(StoreResult srr: storeResults) { + storeResultValidator.validate(srr); + } + } + }); + return this; + } + + public Builder validateEachWith(StoreResponseValidator storeResponseValidator) { + validators.add(new MultiStoreResultValidator() { + + @Override + public void validate(List storeResults) { + for(StoreResult srr: storeResults) { + try { + storeResponseValidator.validate(srr.toResponse()); + } catch (CosmosClientException e) { + fail(e.getMessage()); + } + } + } + }); + return this; + } + + public Builder withMinimumLSN(long minimumLSN) { + this.validateEachWith(StoreResultValidator.create().withMinLSN(minimumLSN).build()); + return this; + } + + public Builder withAggregate(BiFunction aggregator, + T initialValue, + Predicate finalValuePredicate, + Description description) { + MutableObject total = new MutableObject<>(initialValue); + validators.add(new MultiStoreResultValidator() { + + @Override + public void validate(List storeResults) { + for(StoreResult srr: storeResults) { + total.setValue(aggregator.apply(srr, total.getValue())); + } + + assertThat(finalValuePredicate.test(total.getValue())) + .describedAs(Description.mostRelevantDescription(description, + String.format("actual value %s.", + total.getValue().toString()))) + .isTrue(); + } + }); + return this; + } + + public Builder withTotalRequestCharge(double totalExpectedRC) { + this.withAggregate((srr, v) -> srr.requestCharge + v.doubleValue(), + 0d, + Predicates.equalTo(totalExpectedRC), + new TextDescription("total request charge is expected to be %f", totalExpectedRC)); + return this; + } + + public Builder withNonZeroRequestCharge() { + + this.withAggregate((srr, v) -> srr.requestCharge + v.doubleValue(), + 0d, + aDouble -> aDouble > 0, + new TextDescription("total request charge expected to be greater than 0")); + return this; + } + + public Builder validateEachWith(FailureValidator failureValidator) { + validators.add(new MultiStoreResultValidator() { + + @Override + public void validate(List storeResults) { + for(StoreResult srr: storeResults) { + try { + failureValidator.validate(srr.getException()); + } catch (CosmosClientException e) { + fail(e.getMessage()); + } + } + } + }); + return this; + } + + public Builder noFailure() { + this.validateEachWith(StoreResultValidator.create().isValid().noException().build()); + return this; + } + + public Builder withSize(int expectedNumber) { + validators.add(new MultiStoreResultValidator() { + + @Override + public void validate(List storeResults) { + assertThat(storeResults).hasSize(expectedNumber); + } + }); + return this; + } + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/MurmurHash3_32Test.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/MurmurHash3_32Test.java new file mode 100644 index 0000000000000..05cd764baa415 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/MurmurHash3_32Test.java @@ -0,0 +1,115 @@ +/* + * + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.routing.MurmurHash3_32; +import com.google.common.hash.HashFunction; +import com.google.common.hash.Hashing; +import org.apache.commons.lang3.RandomUtils; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +import java.io.UnsupportedEncodingException; +import java.nio.charset.Charset; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * validates {@link MurmurHash3_32} against Google's murmur3_32 implementation. + */ +public class MurmurHash3_32Test { + + private MurmurHash3_32 murmurHash3_32; + + @BeforeClass(groups = "unit") + public void setup() { + murmurHash3_32 = new MurmurHash3_32(); + } + + @Test(groups = "unit") + public void murmurHash3_32_EmptyByteArray() { + byte[] byteArray = new byte[0]; + int actualHash = murmurHash3_32.hash(byteArray, byteArray.length, 0); + + HashFunction googleMurmur3_32 = Hashing.murmur3_32(0); + int expectedHash = googleMurmur3_32.hashBytes(byteArray).asInt(); + + assertThat(actualHash).isEqualTo(expectedHash); + } + + @Test(groups = "unit") + public void murmurHash3_32_String() { + byte[] byteArray = new String("test").getBytes(Charset.forName("UTF-8")); + int actualHash = murmurHash3_32.hash(byteArray, byteArray.length, 0); + + HashFunction googleMurmur3_32 = Hashing.murmur3_32(0); + int expectedHash = googleMurmur3_32.hashBytes(byteArray).asInt(); + + assertThat(actualHash).isEqualTo(expectedHash); + } + + @Test(groups = "unit") + public void murmurHash3_32_NonLatin() throws UnsupportedEncodingException { + String nonLatin = "абвгдеёжзийклмнопрстуфхцчшщъыьэюяабвгдеёжзийклмнопрстуфхцчшщъыьэюяабвгдеёжзийклмнопрстуфхцчшщъыьэюяабвгдеёжзийклмнопрстуфхцчшщъыьэюя"; + for(int i = 0; i < nonLatin.length() + 1; i++) { + byte[] byteArray = nonLatin.substring(0, i).getBytes("UTF-8"); + int actualHash = murmurHash3_32.hash(byteArray, byteArray.length, 0); + + HashFunction googleMurmur3_32 = Hashing.murmur3_32(0); + int expectedHash = googleMurmur3_32.hashBytes(byteArray).asInt(); + + assertThat(actualHash).isEqualTo(expectedHash); + } + } + + @Test(groups = "unit") + public void murmurHash3_32_ZeroByteArray() { + byte[] byteArray = new byte[3]; + int actualHash = murmurHash3_32.hash(byteArray, byteArray.length, 0); + + HashFunction googleMurmur3_32 = Hashing.murmur3_32(0); + int expectedHash = googleMurmur3_32.hashBytes(byteArray).asInt(); + + assertThat(actualHash).isEqualTo(expectedHash); + } + + @Test(groups = "unit") + public void murmurHash3_32_RandomBytesOfAllSizes() { + for(int i = 0; i < 1000; i++) { + byte[] byteArray = randomBytes(i); + + int actualHash = murmurHash3_32.hash(byteArray, byteArray.length, 0); + + HashFunction googleMurmur3_32 = Hashing.murmur3_32(0); + int expectedHash = googleMurmur3_32.hashBytes(byteArray).asInt(); + + assertThat(actualHash).isEqualTo(expectedHash); + } + } + + private byte[] randomBytes(int count) { + return RandomUtils.nextBytes(count); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/PartitionKeyInternalTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/PartitionKeyInternalTest.java new file mode 100644 index 0000000000000..b93d46d986bde --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/PartitionKeyInternalTest.java @@ -0,0 +1,473 @@ +/* + * + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.CommonsBridgeInternal; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.PartitionKind; +import com.azure.data.cosmos.internal.Undefined; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.routing.PartitionKeyInternal; +import com.azure.data.cosmos.internal.routing.PartitionKeyInternalHelper; +import com.azure.data.cosmos.internal.routing.PartitionKeyInternalUtils; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import java.util.ArrayList; +import java.util.function.BiFunction; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; +import static org.assertj.core.api.AssertionsForClassTypes.fail; + +public class PartitionKeyInternalTest { + + /** + * Tests serialization of empty partition key. + */ + @Test(groups="unit") + public void emptyPartitionKey() { + String json = "[]"; + PartitionKeyInternal partitionKey = PartitionKeyInternal.fromJsonString(json); + assertThat(partitionKey).isEqualTo(PartitionKeyInternal.getEmpty()); + assertThat(partitionKey.toJson()).isEqualTo("[]"); + } + + /** + * Tests serialization of various types. + */ + @Test(groups="unit") + public void variousTypes() { + String json = "[\"aa\", null, true, false, {}, 5, 5.5]"; + PartitionKeyInternal partitionKey = PartitionKeyInternal.fromJsonString(json); + assertThat(partitionKey).isEqualTo( + PartitionKeyInternal.fromObjectArray( + Lists.newArrayList(new Object[]{"aa", null, true, false, Undefined.Value(), 5, 5.5}), true)); + + assertThat(partitionKey.toJson()).isEqualTo("[\"aa\",null,true,false,{},5.0,5.5]"); + } + + /** + * Tests deserialization of empty string + */ + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void deserializeEmptyString() { + PartitionKeyInternal.fromJsonString(""); + } + + /** + * Tests deserialization of null + */ + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void deserializeNull() { + PartitionKeyInternal.fromJsonString(null); + } + + /** + * Tests deserialization of invalid partition key + */ + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void invalidString() { + PartitionKeyInternal.fromJsonString("[aa]"); + } + + + /** + * Tests deserialization of invalid partition key + */ + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void invalidNumber() { + PartitionKeyInternal.fromJsonString("[1.a]"); + } + + /** + * Tests deserialization of invalid partition key + */ + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void missingBraces() { + PartitionKeyInternal.fromJsonString("[{]"); + } + + /** + * Missing Value + */ + @Test(groups = "unit") + public void missingValue() { + try { + PartitionKeyInternal.fromJsonString(""); + fail("should throw"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage()).isEqualTo( + String.format( + RMResources.UnableToDeserializePartitionKeyValue, "")); + } + } + + /** + * Tests serialization of infinity value. + */ + @Test(groups = "unit") + public void maxValue() { + String json = "\"Infinity\""; + PartitionKeyInternal partitionKey = PartitionKeyInternal.fromJsonString(json); + assertThat(partitionKey).isEqualTo(PartitionKeyInternal.ExclusiveMaximum); + } + + /** + * Tests serialization of minimum value. + */ + @Test(groups = "unit") + public void minValue() { + String json = "[]"; + PartitionKeyInternal partitionKey = PartitionKeyInternal.fromJsonString(json); + assertThat(partitionKey).isEqualTo(PartitionKeyInternal.InclusiveMinimum); + } + + /** + * Tests serialization of undefined value. + */ + @Test(groups = "unit") + public void undefinedValue() { + String json = "[]"; + PartitionKeyInternal partitionKey = PartitionKeyInternal.fromJsonString(json); + assertThat(partitionKey).isEqualTo(PartitionKeyInternal.Empty); + } + + /** + * Tests JsonConvert.DefaultSettings that could cause indentation. + */ + @Test(groups="unit") + public void jsonConvertDefaultSettings() { + String json = "[123.0]"; + PartitionKeyInternal partitionKey = PartitionKeyInternal.fromJsonString(json); + assertThat(partitionKey.toJson()).isEqualTo(json); + } + + /** + * Tests unicode characters in partition key + */ + @Test(groups="unit") + public void unicodeCharacters() { + String json = "[\"电脑\"]"; + PartitionKeyInternal partitionKey = PartitionKeyInternal.fromJsonString(json); + assertThat(partitionKey.toJson()).isEqualTo("[\"\u7535\u8111\"]"); + } + + /** + * Tests partition key value comparisons. + */ + @Test(groups="unit") + public void comparison() { + verifyComparison("[]", "[]", 0); + verifyComparison("[]", "[{}]", -1); + verifyComparison("[]", "[false]", -1); + verifyComparison("[]", "[true]", -1); + verifyComparison("[]", "[null]", -1); + verifyComparison("[]", "[2]", -1); + verifyComparison("[]", "[\"aa\"]", -1); + verifyComparison("[]", "\"Infinity\"", -1); + + verifyComparison("[{}]", "[]", 1); + verifyComparison("[{}]", "[{}]", 0); + verifyComparison("[{}]", "[false]", -1); + verifyComparison("[{}]", "[true]", -1); + verifyComparison("[{}]", "[null]", -1); + verifyComparison("[{}]", "[2]", -1); + verifyComparison("[{}]", "[\"aa\"]", -1); + verifyComparison("[{}]", "\"Infinity\"", -1); + + verifyComparison("[false]", "[]", 1); + verifyComparison("[false]", "[{}]", 1); + verifyComparison("[false]", "[null]", 1); + verifyComparison("[false]", "[false]", 0); + verifyComparison("[false]", "[true]", -1); + verifyComparison("[false]", "[2]", -1); + verifyComparison("[false]", "[\"aa\"]", -1); + verifyComparison("[false]", "\"Infinity\"", -1); + + verifyComparison("[true]", "[]", 1); + verifyComparison("[true]", "[{}]", 1); + verifyComparison("[true]", "[null]", 1); + verifyComparison("[true]", "[false]", 1); + verifyComparison("[true]", "[true]", 0); + verifyComparison("[true]", "[2]", -1); + verifyComparison("[true]", "[\"aa\"]", -1); + verifyComparison("[true]", "\"Infinity\"", -1); + + verifyComparison("[null]", "[]", 1); + verifyComparison("[null]", "[{}]", 1); + verifyComparison("[null]", "[null]", 0); + verifyComparison("[null]", "[false]", -1); + verifyComparison("[null]", "[true]", -1); + verifyComparison("[null]", "[2]", -1); + verifyComparison("[null]", "[\"aa\"]", -1); + verifyComparison("[null]", "\"Infinity\"", -1); + + verifyComparison("[2]", "[]", 1); + verifyComparison("[2]", "[{}]", 1); + verifyComparison("[2]", "[null]", 1); + verifyComparison("[2]", "[false]", 1); + verifyComparison("[2]", "[true]", 1); + verifyComparison("[1]", "[2]", -1); + verifyComparison("[2]", "[2]", 0); + verifyComparison("[3]", "[2]", 1); + verifyComparison("[2.1234344]", "[2]", 1); + verifyComparison("[2]", "[\"aa\"]", -1); + verifyComparison("[2]", "\"Infinity\"", -1); + + verifyComparison("[\"aa\"]", "[]", 1); + verifyComparison("[\"aa\"]", "[{}]", 1); + verifyComparison("[\"aa\"]", "[null]", 1); + verifyComparison("[\"aa\"]", "[false]", 1); + verifyComparison("[\"aa\"]", "[true]", 1); + verifyComparison("[\"aa\"]", "[2]", 1); + verifyComparison("[\"\"]", "[\"aa\"]", -1); + verifyComparison("[\"aa\"]", "[\"aa\"]", 0); + verifyComparison("[\"b\"]", "[\"aa\"]", 1); + verifyComparison("[\"aa\"]", "\"Infinity\"", -1); + + verifyComparison("\"Infinity\"", "[]", 1); + verifyComparison("\"Infinity\"", "[{}]", 1); + verifyComparison("\"Infinity\"", "[null]", 1); + verifyComparison("\"Infinity\"", "[false]", 1); + verifyComparison("\"Infinity\"", "[true]", 1); + verifyComparison("\"Infinity\"", "[2]", 1); + verifyComparison("\"Infinity\"", "[\"aa\"]", 1); + verifyComparison("\"Infinity\"", "\"Infinity\"", 0); + } + + /** + * Tests that invalid partition key value will throw an exception. + */ + @Test(groups = "unit", expectedExceptions = IllegalArgumentException.class) + public void invalidPartitionKeyValue() { + PartitionKeyInternal.fromObjectArray( + Lists.newArrayList(new Object[]{2, true, new StringBuilder()}), true); + } + + /** + * Tests {@link PartitionKeyInternal#contains(PartitionKeyInternal)} method. + */ + @Test(groups="unit") + public void contains() { + BiFunction verifyContains = (parentPartitionKey, childPartitionKey) -> + PartitionKeyInternal.fromJsonString(parentPartitionKey) + .contains(PartitionKeyInternal.fromJsonString(childPartitionKey)); + + assertThat(verifyContains.apply("[]", "[]")).isTrue(); + assertThat(verifyContains.apply("[]", "[{}]")).isTrue(); + assertThat(verifyContains.apply("[]", "[null]")).isTrue(); + assertThat(verifyContains.apply("[]", "[true]")).isTrue(); + assertThat(verifyContains.apply("[]", "[false]")).isTrue(); + assertThat(verifyContains.apply("[]", "[2]")).isTrue(); + assertThat(verifyContains.apply("[]", "[\"fdfd\"]")).isTrue(); + + assertThat(verifyContains.apply("[2]", "[]")).isFalse(); + assertThat(verifyContains.apply("[2]", "[2]")).isTrue(); + assertThat(verifyContains.apply("[2]", "[2, \"USA\"]")).isTrue(); + assertThat(verifyContains.apply("[1]", "[2, \"USA\"]")).isFalse(); + } + + @Test(groups="unit") + public void invalidPartitionKeyValueNonStrict() { + assertThat(PartitionKeyInternal.fromObjectArray(new Object[]{2, true, Undefined.Value()}, true)) + .isEqualTo( + PartitionKeyInternal.fromObjectArray(new Object[]{2, true, new StringBuilder()}, false)); + } + + /** + * Tests constructing effective partition key value. + */ + @Test(groups="unit") + public void hashEffectivePartitionKey() { + + assertThat(PartitionKeyInternalHelper.getEffectivePartitionKeyString(PartitionKeyInternal.InclusiveMinimum, new PartitionKeyDefinition())) + .isEqualTo(PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey); + + assertThat( + PartitionKeyInternalHelper.getEffectivePartitionKeyString(PartitionKeyInternal.ExclusiveMaximum, new PartitionKeyDefinition())) + .isEqualTo(PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey); + + PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); + partitionKeyDefinition.paths(Lists.newArrayList("/A", "/B", "/C", "/E", "/F", "/G")); + + PartitionKeyInternal partitionKey = PartitionKeyInternal.fromObjectArray( + new Object[]{2, true, false, null, Undefined.Value(), "Привет!"}, true); + String effectivePartitionKey = PartitionKeyInternalHelper.getEffectivePartitionKeyString(partitionKey, partitionKeyDefinition); + + assertThat(effectivePartitionKey).isEqualTo("05C1D19581B37C05C0000302010008D1A0D281D1B9D1B3D1B6D2832200"); + } + + @DataProvider(name = "v2ParamProvider") + public Object[][] v2ParamProvider() { + return new Object[][] { + {"[5.0]", "19C08621B135968252FB34B4CF66F811"}, + { "[5.12312419050912359123]", "0EF2E2D82460884AF0F6440BE4F726A8"}, + {"[\"redmond\"]", "22E342F38A486A088463DFF7838A5963"}, + {"[true]", "0E711127C5B5A8E4726AC6DD306A3E59"}, + {"[false]", "2FE1BE91E90A3439635E0E9E37361EF2"}, + {"[]", ""}, + {"[null]", "378867E4430E67857ACE5C908374FE16"}, + {"[{}]", "11622DAA78F835834610ABE56EFF5CB5"}, + {"[5.0, \"redmond\", true, null]", "3032DECBE2AB1768D8E0AEDEA35881DF"}, + {"[\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"]", + "36375D21568760E891C9CB7002D5E059"}, + }; + } + + /** + * Tests binary encoding of partition key + */ + @Test(groups="unit", dataProvider = "v2ParamProvider") + public void partitionKeyBinaryEncodingV2(String partitionKeyRangeJson, String expectedHexEncoding) { + validateEffectivePartitionKeyV2(partitionKeyRangeJson, expectedHexEncoding); + } + + /** + * Tests that effective partition key produced by us and the backend is the same. + */ + @Test(groups="unit") + public void managedNativeCompatibility() { + PartitionKeyInternal partitionKey = + PartitionKeyInternal.fromJsonString("[\"по-русски\",null,true,false,{},5.5]"); + + PartitionKeyDefinition pkDefinition = new PartitionKeyDefinition(); + pkDefinition.paths(ImmutableList.of("/field1", "/field2", "/field3", "/field4", "/field5", "/field6")); + + String effectivePartitionKey = PartitionKeyInternalHelper.getEffectivePartitionKeyString(partitionKey, pkDefinition); + assertThat("05C1D39FA55F0408D1C0D1BF2ED281D284D282D282D1BBD1B9000103020005C016").isEqualTo(effectivePartitionKey); + + String latin = "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"; + String nonLatin = "абвгдеёжзийклмнопрстуфхцчшщъыьэюяабвгдеёжзийклмнопрстуфхцчшщъыьэюяабвгдеёжзийклмнопрстуфхцчшщъыьэюяабвгдеёжзийклмнопрстуфхцчшщъыьэюя"; + + verifyEffectivePartitionKeyEncoding(latin, 99, "05C19B2DC38FC00862636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F7071727374757600", false); + verifyEffectivePartitionKeyEncoding(latin, 99, "072D8FA3228DD2A6C0A7129C845700E6", true); + + verifyEffectivePartitionKeyEncoding(latin, 100, "05C1DD5D8149640862636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767700", false); + verifyEffectivePartitionKeyEncoding(latin, 100, "023D5F0B62EBEF22A43564F267193B4D", true); + + verifyEffectivePartitionKeyEncoding(latin, 101, "05C1DD5D8149640862636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767700", false); + verifyEffectivePartitionKeyEncoding(latin, 101, "357D83181DB32D35F58CDA3C9F2E0742", true); + + verifyEffectivePartitionKeyEncoding(latin, 102, "05C1DD5D8149640862636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767700", false); + verifyEffectivePartitionKeyEncoding(latin, 102, "12B320F72959AB449FD8E090C6B23B88", true); + + verifyEffectivePartitionKeyEncoding(latin, 103, "05C1DD5D8149640862636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767700", false); + verifyEffectivePartitionKeyEncoding(latin, 103, "25FD21A31C69A8C8AD994F7FAC2B2B9F", true); + + verifyEffectivePartitionKeyEncoding(latin, 104, "05C1DD5D8149640862636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767700", false); + verifyEffectivePartitionKeyEncoding(latin, 104, "1DC6FB1CF6E1228C506AA6C8735023C4", true); + + verifyEffectivePartitionKeyEncoding(latin, 105, "05C1DD5D8149640862636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767700", false); + verifyEffectivePartitionKeyEncoding(latin, 105, "308E1E7870956CE5D9BDAD01200E09BD", true); + + verifyEffectivePartitionKeyEncoding(latin, 106, "05C1DD5D8149640862636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767700", false); + verifyEffectivePartitionKeyEncoding(latin, 106, "362E21ABDEA7179DBDF7BF549DD8303B", true); + + verifyEffectivePartitionKeyEncoding(latin, 107, "05C1DD5D8149640862636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767700", false); + verifyEffectivePartitionKeyEncoding(latin, 107, "1EBE932ECEFA4F53CE339D31B6BF53FD", true); + + verifyEffectivePartitionKeyEncoding(latin, 108, "05C1DD5D8149640862636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767700", false); + verifyEffectivePartitionKeyEncoding(latin, 108, "3BFA3A6E9CBABA0EF756AEDEC66B1B3C", true); + + verifyEffectivePartitionKeyEncoding(latin, 109, "05C1DD5D8149640862636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767700", false); + verifyEffectivePartitionKeyEncoding(latin, 109, "2880BF78DE0CE2CD1B0120EDA22601C4", true); + + verifyEffectivePartitionKeyEncoding(latin, 110, "05C1DD5D8149640862636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767700", false); + verifyEffectivePartitionKeyEncoding(latin, 110, "1F3577D1D9CA7FC56100AED11F4DC646", true); + + verifyEffectivePartitionKeyEncoding(latin, 111, "05C1DD5D8149640862636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767700", false); + verifyEffectivePartitionKeyEncoding(latin, 111, "205A9EB61F3B063E61C6ED655C9220E6", true); + + verifyEffectivePartitionKeyEncoding(latin, 112, "05C1DD5D8149640862636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767700", false); + verifyEffectivePartitionKeyEncoding(latin, 112, "1152A43F1A852AFDDD4518C9CDD48616", true); + + verifyEffectivePartitionKeyEncoding(latin, 113, "05C1DD5D8149640862636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767700", false); + verifyEffectivePartitionKeyEncoding(latin, 113, "38E2EB2EF54012B5CA40CDA34F1C7736", true); + + verifyEffectivePartitionKeyEncoding(latin, 114, "05C1DD5D8149640862636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767700", false); + verifyEffectivePartitionKeyEncoding(latin, 114, "19BCC416843B9085DBBC18E8C7C80D72", true); + + verifyEffectivePartitionKeyEncoding(latin, 115, "05C1DD5D8149640862636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767700", false); + verifyEffectivePartitionKeyEncoding(latin, 115, "03F1BB89FD8E9747B047281E80FA2E84", true); + + verifyEffectivePartitionKeyEncoding(latin, 116, "05C1DD5D8149640862636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767778797A7B62636465666768696A6B6C6D6E6F707172737475767700", false); + verifyEffectivePartitionKeyEncoding(latin, 116, "2BA0757B833F3922A3CBBB6DDA3803B4", true); + + verifyEffectivePartitionKeyEncoding(nonLatin, 49, "05C1C1BD37FE08D1B1D1B2D1B3D1B4D1B5D1B6D292D1B7D1B8D1B9D1BAD1BBD1BCD1BDD1BED1BFD1C0D281D282D283D284D285D286D287D288D289D28AD28BD28CD28DD28ED28FD290D1B1D1B2D1B3D1B4D1B5D1B6D292D1B7D1B8D1B9D1BAD1BBD1BCD1BDD1BED1BF00", false); + verifyEffectivePartitionKeyEncoding(nonLatin, 49, "3742C1AF65AFA809282539F4BCDF2F6F", true); + + verifyEffectivePartitionKeyEncoding(nonLatin, 50, "05C1B339EF472008D1B1D1B2D1B3D1B4D1B5D1B6D292D1B7D1B8D1B9D1BAD1BBD1BCD1BDD1BED1BFD1C0D281D282D283D284D285D286D287D288D289D28AD28BD28CD28DD28ED28FD290D1B1D1B2D1B3D1B4D1B5D1B6D292D1B7D1B8D1B9D1BAD1BBD1BCD1BDD1BED1BFD1C000", false); + verifyEffectivePartitionKeyEncoding(nonLatin, 50, "399CF1F141E066E09CC7557EA7F0977A", true); + + verifyEffectivePartitionKeyEncoding(nonLatin, 51, "05C1EB1F29DBFA08D1B1D1B2D1B3D1B4D1B5D1B6D292D1B7D1B8D1B9D1BAD1BBD1BCD1BDD1BED1BFD1C0D281D282D283D284D285D286D287D288D289D28AD28BD28CD28DD28ED28FD290D1B1D1B2D1B3D1B4D1B5D1B6D292D1B7D1B8D1B9D1BAD1BBD1BCD1BDD1BED1BFD1C0D2", false); + verifyEffectivePartitionKeyEncoding(nonLatin, 51, "2D63C2F5FDAC6EFE5660CD509A723A90", true); + + verifyEffectivePartitionKeyEncoding(nonLatin, 99, "05C1E72F79C71608D1B1D1B2D1B3D1B4D1B5D1B6D292D1B7D1B8D1B9D1BAD1BBD1BCD1BDD1BED1BFD1C0D281D282D283D284D285D286D287D288D289D28AD28BD28CD28DD28ED28FD290D1B1D1B2D1B3D1B4D1B5D1B6D292D1B7D1B8D1B9D1BAD1BBD1BCD1BDD1BED1BFD1C0D2", false); + verifyEffectivePartitionKeyEncoding(nonLatin, 99, "1E9836D9BCB67FDB2B5C984BD40AFAF9", true); + + verifyEffectivePartitionKeyEncoding(nonLatin, 100, "05C1E3653D9F3E08D1B1D1B2D1B3D1B4D1B5D1B6D292D1B7D1B8D1B9D1BAD1BBD1BCD1BDD1BED1BFD1C0D281D282D283D284D285D286D287D288D289D28AD28BD28CD28DD28ED28FD290D1B1D1B2D1B3D1B4D1B5D1B6D292D1B7D1B8D1B9D1BAD1BBD1BCD1BDD1BED1BFD1C0D2", false); + verifyEffectivePartitionKeyEncoding(nonLatin, 100, "16102F19448867537E51BB4377962AF9", true); + + verifyEffectivePartitionKeyEncoding(nonLatin, 101, "05C1E3653D9F3E08D1B1D1B2D1B3D1B4D1B5D1B6D292D1B7D1B8D1B9D1BAD1BBD1BCD1BDD1BED1BFD1C0D281D282D283D284D285D286D287D288D289D28AD28BD28CD28DD28ED28FD290D1B1D1B2D1B3D1B4D1B5D1B6D292D1B7D1B8D1B9D1BAD1BBD1BCD1BDD1BED1BFD1C0D2", false); + verifyEffectivePartitionKeyEncoding(nonLatin, 101, "0B6D25D07748AB9CA0F523D4BAD146C8", true); + } + + private static void validateEffectivePartitionKeyV2(String partitionKeyRangeJson, String expectedHexEncoding) { + PartitionKeyInternal partitionKey = PartitionKeyInternal.fromJsonString(partitionKeyRangeJson); + + PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); + partitionKeyDefinition.kind(PartitionKind.HASH); + CommonsBridgeInternal.setV2(partitionKeyDefinition); + ArrayList paths = new ArrayList(); + for (int i = 0; i < partitionKey.getComponents().size(); i++) { + paths.add("/path" + i); + } + + if (paths.size() > 0) { + partitionKeyDefinition.paths(paths); + } + + String hexEncodedEffectivePartitionKey = PartitionKeyInternalHelper.getEffectivePartitionKeyString(partitionKey, partitionKeyDefinition); + assertThat(hexEncodedEffectivePartitionKey).isEqualTo(expectedHexEncoding); + } + + private void verifyComparison(String leftKey, String rightKey, int result) { + assertThat(PartitionKeyInternal.fromJsonString(leftKey). + compareTo(PartitionKeyInternal.fromJsonString(rightKey))).isEqualTo(result); + } + + private static void verifyEffectivePartitionKeyEncoding(String buffer, int length, String expectedValue, boolean v2) { + PartitionKeyDefinition pkDefinition = new PartitionKeyDefinition(); + pkDefinition.paths(ImmutableList.of("/field1")); + if (v2) { + CommonsBridgeInternal.setV2(pkDefinition); + } + + PartitionKeyInternal pk = PartitionKeyInternalUtils.createPartitionKeyInternal(buffer.substring(0, length)); + assertThat(PartitionKeyInternalHelper.getEffectivePartitionKeyString(pk, pkDefinition)).isEqualTo(expectedValue); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/PartitionKeyTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/PartitionKeyTest.java new file mode 100644 index 0000000000000..d793868d99213 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/PartitionKeyTest.java @@ -0,0 +1,102 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.internal.Undefined; +import com.azure.data.cosmos.internal.RMResources; +import com.azure.data.cosmos.internal.routing.PartitionKeyInternalHelper; +import com.google.common.collect.ImmutableList; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; +import static org.assertj.core.api.AssertionsForClassTypes.fail; + +public class PartitionKeyTest { + + @DataProvider(name = "paramProvider") + public Object[][] paramProvider() { + return new Object[][] { + { Undefined.Value(), "[{}]" }, + { null, "[null]"}, + { false, "[false]"}, + { true, "[true]"}, + { 123.456, "[123.456]"}, + { 5, "[5.0]"}, + { "PartitionKeyValue", "[\"PartitionKeyValue\"]"}, + }; + } + + /** + * Simple test for @{@link PartitionKey}. + */ + @Test(groups = "unit", dataProvider = "paramProvider") + public void partitionKey(Object partitionKey, String partitionKeyAsJson) { + assertThat(new PartitionKey(partitionKey).toString()).isEqualTo(partitionKeyAsJson); + } + + /** + * Test equals override for @{@link PartitionKey} + */ + @Test(groups = "unit", dataProvider = "paramProvider") + public void partitionKeyCompare(Object partitionKey, String partitionKeyAsJson) { + assertThat(new PartitionKey(partitionKey)).isEqualTo(PartitionKey.fromJsonString(partitionKeyAsJson)); + } + + /** + * too few partition key values. + */ + @Test(groups = "unit") + public void tooFewPartitionKeyComponents() { + PartitionKeyDefinition pkd = new PartitionKeyDefinition(); + pkd.paths(ImmutableList.of("/pk1", "/pk2")); + PartitionKey pk = PartitionKey.fromJsonString("[\"PartitionKeyValue\"]"); + + try { + PartitionKeyInternalHelper.getEffectivePartitionKeyString(pk.getInternalPartitionKey(), pkd); + fail("should throw"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage()).isEqualTo(RMResources.TooFewPartitionKeyComponents); + } + } + + /** + * too many partition key values. + */ + @Test(groups = "unit") + public void tooManyPartitionKeyComponents() { + PartitionKeyDefinition pkd = new PartitionKeyDefinition(); + pkd.paths(ImmutableList.of("/pk1")); + PartitionKey pk = PartitionKey.fromJsonString("[true, false]"); + + try { + PartitionKeyInternalHelper.getEffectivePartitionKeyString(pk.getInternalPartitionKey(), pkd); + fail("should throw"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage()).isEqualTo(RMResources.TooManyPartitionKeyComponents); + } + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/PartitionReplicasAddressesValidator.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/PartitionReplicasAddressesValidator.java new file mode 100644 index 0000000000000..9a7e053035fb7 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/PartitionReplicasAddressesValidator.java @@ -0,0 +1,162 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.directconnectivity.Address; +import com.azure.data.cosmos.internal.directconnectivity.Protocol; +import com.azure.data.cosmos.internal.directconnectivity.Address; +import com.azure.data.cosmos.internal.directconnectivity.Protocol; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * This is a helper class for validating partition replicas' addresses for tests. + */ +public interface PartitionReplicasAddressesValidator { + + int MAX_REPLICA_SIZE = 4; + + void validate(Collection
addresses); + + class Builder { + private List validators = new ArrayList<>(); + + public PartitionReplicasAddressesValidator build() { + return new PartitionReplicasAddressesValidator() { + + public void validate(Collection
addresses) { + for (PartitionReplicasAddressesValidator validator : validators) { + validator.validate(addresses); + } + } + }; + } + + public Builder size(final int expectedCount) { + + validators.add(new PartitionReplicasAddressesValidator() { + @Override + public void validate(Collection
addresses) { + assertThat(addresses).hasSize(expectedCount); + } + }); + return this; + } + + public Builder forEach(AddressValidator validator) { + + validators.add(new PartitionReplicasAddressesValidator() { + @Override + public void validate(Collection
addresses) { + + for (Address address : addresses) { + validator.validate(address); + } + + } + }); + return this; + } + + public Builder httpsProtocol() { + this.forEach(new AddressValidator.Builder().httpsProtocol().build()); + return this; + } + + public Builder withProtocol(Protocol protocol) { + this.forEach(new AddressValidator.Builder().protocol(protocol).build()); + return this; + } + + public Builder replicasOfPartition(String partitionKeyRangeId) { + validators.add(new PartitionReplicasAddressesValidator() { + @Override + public void validate(Collection
addresses) { + + // if running against prod due to upgrade etc, we may have occasionally 3 or 4 replicas. + assertThat(addresses).size().isGreaterThanOrEqualTo(MAX_REPLICA_SIZE - 1).isLessThanOrEqualTo(MAX_REPLICA_SIZE); + assertThat(addresses.stream().filter(a -> a.IsPrimary()).count()).isEqualTo(1); + + Address a = addresses.iterator().next(); + + AddressValidator validator = new AddressValidator.Builder() + .withPartitionKeyRangeId(partitionKeyRangeId) + .withRid(a.resourceId()) + .build(); + + for (Address address : addresses) { + validator.validate(address); + } + } + }); + return this; + } + + public Builder replicasOfSamePartition() { + validators.add(new PartitionReplicasAddressesValidator() { + @Override + public void validate(Collection
addresses) { + + // if running against prod due to upgrade etc, we may have occasionally 3 or 4 replicas. + assertThat(addresses).size().isGreaterThanOrEqualTo(MAX_REPLICA_SIZE - 1).isLessThanOrEqualTo(MAX_REPLICA_SIZE); + assertThat(addresses.stream().filter(a -> a.IsPrimary()).count()).isEqualTo(1); + + Address a = addresses.iterator().next(); + + AddressValidator validator = new AddressValidator.Builder() + .withPartitionKeyRangeId(a.getParitionKeyRangeId()) + .withRid(a.resourceId()) + .build(); + + for (Address address : addresses) { + validator.validate(address); + } + } + }); + return this; + } + + public Builder replicasOfPartitions(Collection partitionKeyRangeIds) { + validators.add(new PartitionReplicasAddressesValidator() { + @Override + public void validate(Collection
addresses) { + + for (String pki : partitionKeyRangeIds) { + List
partitionReplicas = addresses.stream() + .filter(a -> pki.equals(a.getParitionKeyRangeId())) + .collect(Collectors.toList()); + + PartitionReplicasAddressesValidator v = new Builder().replicasOfPartition(pki).build(); + v.validate(partitionReplicas); + } + } + }); + return this; + } + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/QuorumReaderTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/QuorumReaderTest.java new file mode 100644 index 0000000000000..a502c26de2419 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/QuorumReaderTest.java @@ -0,0 +1,668 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.ISessionContainer; +import com.azure.data.cosmos.internal.*; +import com.google.common.base.Stopwatch; +import com.google.common.collect.ImmutableList; +import io.reactivex.subscribers.TestSubscriber; +import org.mockito.Mockito; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.math.BigDecimal; +import java.math.RoundingMode; +import java.net.URI; +import java.time.Duration; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.assertj.core.api.Assertions.assertThat; + +public class QuorumReaderTest { + private final Duration timeResolution = Duration.ofMillis(10); + private final Configs configs; + + public QuorumReaderTest() { + configs = new Configs(); + } + + @DataProvider(name = "simpleReadStrongArgProvider") + public Object[][] simpleReadStrongArgProvider() { + return new Object[][]{ + //int replicaCountToRead, ReadMode readMode, Long lsn, Long localLSN + { 1, ReadMode.Strong, 51l, 18l }, + { 2, ReadMode.Strong, 51l, 18l }, + { 3, ReadMode.Strong, 51l, 18l }, + + { 2, ReadMode.Any, 51l, 18l }, + { 1, ReadMode.Any, 51l, 18l }, + + { 2, ReadMode.Any, null, 18l }, + { 1, ReadMode.Any, null, 18l }, + }; + } + + private StoreResponse storeResponse(Long lsn, Long localLSN, Double rc) { + StoreResponseBuilder srb = StoreResponseBuilder.create(); + if (rc != null) { + srb.withRequestCharge(rc); + } + + if (lsn != null) { + srb.withLSN(lsn); + } + + if (localLSN != null) { + srb.withLocalLSN(localLSN); + } + + return srb.build(); + } + + @Test(groups = "unit", dataProvider = "simpleReadStrongArgProvider") + public void basicReadStrong_AllReplicasSameLSN(int replicaCountToRead, ReadMode readMode, Long lsn, Long localLSN) { + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + URI primaryReplicaURI = URI.create("primary"); + ImmutableList secondaryReplicaURIs = ImmutableList.of(URI.create("secondary1"), URI.create("secondary2"), URI.create("secondary3")); + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.Simple.create() + .withPrimary(primaryReplicaURI) + .withSecondary(secondaryReplicaURIs) + .build(); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + + request.requestContext = new DocumentServiceRequestContext(); + request.requestContext.timeoutHelper = Mockito.mock(TimeoutHelper.class); + request.requestContext.resolvedPartitionKeyRange = Mockito.mock(PartitionKeyRange.class); + request.requestContext.requestChargeTracker = new RequestChargeTracker(); + + BigDecimal requestChargePerRead = new BigDecimal(1.1); + + StoreResponse primaryResponse = storeResponse(lsn, localLSN, requestChargePerRead.doubleValue()); + StoreResponse secondaryResponse1 = storeResponse(lsn, localLSN, requestChargePerRead.doubleValue()); + StoreResponse secondaryResponse2 = storeResponse(lsn, localLSN, requestChargePerRead.doubleValue()); + StoreResponse secondaryResponse3 = storeResponse(lsn, localLSN, requestChargePerRead.doubleValue()); + + TransportClientWrapper transportClientWrapper = TransportClientWrapper.Builder.uriToResultBuilder() + .storeResponseOn(primaryReplicaURI, OperationType.Read, ResourceType.Document, primaryResponse, false) + .storeResponseOn(secondaryReplicaURIs.get(0), OperationType.Read, ResourceType.Document, secondaryResponse1, false) + .storeResponseOn(secondaryReplicaURIs.get(1), OperationType.Read, ResourceType.Document, secondaryResponse2, false) + .storeResponseOn(secondaryReplicaURIs.get(2), OperationType.Read, ResourceType.Document, secondaryResponse3, false) + .build(); + + StoreReader storeReader = new StoreReader(transportClientWrapper.transportClient, addressSelectorWrapper.addressSelector, sessionContainer); + + GatewayServiceConfigurationReader serviceConfigurator = Mockito.mock(GatewayServiceConfigurationReader.class); + IAuthorizationTokenProvider authTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + QuorumReader quorumReader = new QuorumReader(configs, transportClientWrapper.transportClient, addressSelectorWrapper.addressSelector, storeReader, serviceConfigurator, authTokenProvider); + + Mono storeResponseSingle = quorumReader.readStrongAsync(request, replicaCountToRead, readMode); + + StoreResponseValidator.Builder validatorBuilder = StoreResponseValidator.create() + .withBELocalLSN(localLSN) + .withRequestCharge(requestChargePerRead.multiply(BigDecimal.valueOf(replicaCountToRead)).setScale(2, RoundingMode.FLOOR).doubleValue()); + + if (lsn != null) { + validatorBuilder.withBELSN(lsn); + } + + validateSuccess(storeResponseSingle, validatorBuilder.build()); + + transportClientWrapper.validate() + .verifyNumberOfInvocations(replicaCountToRead); + addressSelectorWrapper.validate() + .verifyNumberOfForceCachRefresh(0) + .verifyVesolvePrimaryUriAsyncCount(0) + .verifyTotalInvocations(1); + } + + @DataProvider(name = "readStrong_RequestBarrierArgProvider") + public Object[][] readStrong_RequestBarrierArgProvider() { + return new Object[][]{ + { 1 }, + { 2 }, + { configs.getMaxNumberOfReadBarrierReadRetries() - 1 }, + { configs.getMaxNumberOfReadBarrierReadRetries() }, + }; + } + + @Test(groups = "unit", dataProvider = "readStrong_RequestBarrierArgProvider") + public void readStrong_OnlySecondary_RequestBarrier_Success(int numberOfBarrierRequestTillCatchUp) { + // scenario: we get lsn l1, l2 where l1 > l2 + // we do barrier request and send it to all replicas till we have two replicas with at least l1 lsn + + ReadMode readMode = ReadMode.Strong; + int replicaCountToRead = 2; + + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + URI primaryReplicaURI = URI.create("primary"); + ImmutableList secondaryReplicaURIs = ImmutableList.of(URI.create("secondary1"), URI.create("secondary2")); + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.Simple.create() + .withPrimary(primaryReplicaURI) + .withSecondary(secondaryReplicaURIs) + .build(); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + + request.requestContext = new DocumentServiceRequestContext(); + request.requestContext.timeoutHelper = Mockito.mock(TimeoutHelper.class); + request.requestContext.resolvedPartitionKeyRange = Mockito.mock(PartitionKeyRange.class); + request.requestContext.requestChargeTracker = new RequestChargeTracker(); + + BigDecimal requestChargePerRead = new BigDecimal(1.1); + BigDecimal requestChargePerHead = BigDecimal.ZERO; + + long expectedQuorumLsn = 53; + long expectedQuorumLocalLSN = 20; + + StoreResponse primaryResponse = StoreResponseBuilder.create() + .withLSN(expectedQuorumLsn - 2) + .withLocalLSN(expectedQuorumLocalLSN - 2) + .withRequestCharge(requestChargePerRead) + .build(); + + TransportClientWrapper.Builder.UriToResultBuilder builder = TransportClientWrapper.Builder.uriToResultBuilder() + .storeResponseOn(primaryReplicaURI, OperationType.Read, ResourceType.Document, primaryResponse, false); + + // slow replica + StoreResponse readResponse = StoreResponseBuilder.create() + .withLSN(expectedQuorumLsn - 1) + .withLocalLSN(expectedQuorumLocalLSN -1) + .withRequestCharge(requestChargePerRead) + .build(); + builder.storeResponseOn(secondaryReplicaURIs.get(0), OperationType.Read, ResourceType.Document, readResponse, false); + + for(int i = 0; i < numberOfBarrierRequestTillCatchUp; i++) { + int lsnIncrement = (i == numberOfBarrierRequestTillCatchUp - 1) ? 1 : 0; + readResponse = StoreResponseBuilder.create() + .withLSN(expectedQuorumLsn - 1 + lsnIncrement) + .withLocalLSN(expectedQuorumLocalLSN - 1 + lsnIncrement) + .withRequestCharge(requestChargePerRead) + .build(); + builder.storeResponseOn(secondaryReplicaURIs.get(0), OperationType.Read, ResourceType.Document, readResponse, false); + + StoreResponse headResponse = StoreResponseBuilder.create() + .withLSN(expectedQuorumLsn - 1 + lsnIncrement) + .withLocalLSN(expectedQuorumLocalLSN - 1 + lsnIncrement) + .withRequestCharge(requestChargePerHead) + .build(); + builder.storeResponseOn(secondaryReplicaURIs.get(0), OperationType.Head, ResourceType.DocumentCollection, headResponse, false); + } + + // faster replica + readResponse = StoreResponseBuilder.create() + .withLSN(expectedQuorumLsn) + .withLocalLSN(expectedQuorumLocalLSN) + .withRequestCharge(requestChargePerRead) + .build(); + builder.storeResponseOn(secondaryReplicaURIs.get(1), OperationType.Read, ResourceType.Document, readResponse, false); + for(int i = 0; i < numberOfBarrierRequestTillCatchUp; i++) { + StoreResponse headResponse = StoreResponseBuilder.create() + .withLSN(expectedQuorumLsn + 10 * (i + 1)) + .withLocalLSN(expectedQuorumLocalLSN + 10 * (i + 1)) + .withRequestCharge(requestChargePerHead) + .build(); + builder.storeResponseOn(secondaryReplicaURIs.get(1), OperationType.Head, ResourceType.DocumentCollection, headResponse, false); + } + + TransportClientWrapper transportClientWrapper = builder.build(); + + StoreReader storeReader = new StoreReader(transportClientWrapper.transportClient, addressSelectorWrapper.addressSelector, sessionContainer); + + GatewayServiceConfigurationReader serviceConfigurator = Mockito.mock(GatewayServiceConfigurationReader.class); + IAuthorizationTokenProvider authTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + QuorumReader quorumReader = new QuorumReader(configs, transportClientWrapper.transportClient, addressSelectorWrapper.addressSelector, storeReader, serviceConfigurator, authTokenProvider); + + int expectedNumberOfReads = 2; + int expectedNumberOfHeads = 2 * numberOfBarrierRequestTillCatchUp; + + double expectedRequestCharge = requestChargePerRead.multiply(BigDecimal.valueOf(expectedNumberOfReads)).add( + requestChargePerHead.multiply(BigDecimal.valueOf(expectedNumberOfHeads))).setScale(4, RoundingMode.FLOOR).doubleValue(); + + Stopwatch stopwatch = Stopwatch.createStarted(); + + Mono storeResponseSingle = quorumReader.readStrongAsync(request, replicaCountToRead, readMode); + + StoreResponseValidator validator = StoreResponseValidator.create() + .withBELSN(expectedQuorumLsn) + .withRequestCharge(expectedRequestCharge) + .build(); + + validateSuccess(storeResponseSingle, validator); + + assertThat(stopwatch.elapsed().plus(timeResolution)).isGreaterThanOrEqualTo(Duration.ofMillis( + numberOfBarrierRequestTillCatchUp * configs.getDelayBetweenReadBarrierCallsInMs())); + + transportClientWrapper.validate() + .verifyNumberOfInvocations(expectedNumberOfReads + expectedNumberOfHeads); + addressSelectorWrapper.validate() + .verifyNumberOfForceCachRefresh(0) + .verifyVesolvePrimaryUriAsyncCount(0) + .verifyTotalInvocations(1 + numberOfBarrierRequestTillCatchUp); + + AddressSelectorWrapper.InOrderVerification.Verifier addressSelectorVerifier = AddressSelectorWrapper.InOrderVerification.Verifier.builder() + .resolveAllUriAsync_IncludePrimary(false) + .resolveAllUriAsync_ForceRefresh(false) + .build(); + + addressSelectorWrapper.getInOrderVerification() + .verifyNumberOfInvocations(1 + numberOfBarrierRequestTillCatchUp) + .verifyOnAll(addressSelectorVerifier); + + DocumentServiceRequestValidator requestValidator = DocumentServiceRequestValidator.builder() + .add(DocumentServiceRequestContextValidator.builder() + .qurorumSelectedLSN(0l) + .globalCommittedSelectedLSN(0l) + .storeResponses(null) + .build()) + .build(); + requestValidator.validate(request); + } + + @DataProvider(name = "readStrong_SecondaryReadBarrierExhausted_ReadBarrierOnPrimary_SuccessArgProvider") + public Object[][] readStrong_SecondaryReadBarrierExhausted_ReadBarrierOnPrimary_SuccessArgProvider() { + return new Object[][]{ + { 1 }, + { 2 }, + { configs.getMaxNumberOfReadBarrierReadRetries() - 1 }, + { configs.getMaxNumberOfReadBarrierReadRetries() }, + }; + } + + @Test(groups = "unit", dataProvider = "readStrong_SecondaryReadBarrierExhausted_ReadBarrierOnPrimary_SuccessArgProvider") + public void readStrong_SecondaryReadBarrierExhausted_ReadBarrierOnPrimary_Success(int numberOfHeadBarriersWithPrimaryIncludedTillQuorumMet) { + // scenario: we exhaust all barrier request retries on secondaries + // after that we start barrier requests including the primary + + int numberOfBarrierRequestTillCatchUp = configs.getMaxNumberOfReadBarrierReadRetries() + numberOfHeadBarriersWithPrimaryIncludedTillQuorumMet; + + ReadMode readMode = ReadMode.Strong; + int replicaCountToRead = 2; + + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + URI primaryReplicaURI = URI.create("primary"); + ImmutableList secondaryReplicaURIs = ImmutableList.of(URI.create("secondary1"), URI.create("secondary2")); + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.Simple.create() + .withPrimary(primaryReplicaURI) + .withSecondary(secondaryReplicaURIs) + .build(); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + + request.requestContext = new DocumentServiceRequestContext(); + request.requestContext.timeoutHelper = Mockito.mock(TimeoutHelper.class); + request.requestContext.resolvedPartitionKeyRange = Mockito.mock(PartitionKeyRange.class); + request.requestContext.requestChargeTracker = new RequestChargeTracker(); + + BigDecimal requestChargePerRead = new BigDecimal(1.1); + BigDecimal requestChargePerHead = BigDecimal.ZERO; + + TransportClientWrapper.Builder.UriToResultBuilder builder = TransportClientWrapper.Builder.uriToResultBuilder(); + + long expectedQuorumLsn = 53; + long expectedQuorumLocalLSN = 20; + + for(int i = 0; i < numberOfHeadBarriersWithPrimaryIncludedTillQuorumMet; i++) { + int lsnIncrement = (i == numberOfHeadBarriersWithPrimaryIncludedTillQuorumMet - 1) ? 1 : 0; + StoreResponse headResponse = StoreResponseBuilder.create() + .withLSN(expectedQuorumLsn - 1 + lsnIncrement) + .withLocalLSN(expectedQuorumLocalLSN - 1 + lsnIncrement) + .withRequestCharge(requestChargePerHead) + .build(); + builder.storeResponseOn(primaryReplicaURI, OperationType.Head, ResourceType.DocumentCollection, headResponse, false); + } + + // slow replica + StoreResponse readResponse = StoreResponseBuilder.create() + .withLSN(expectedQuorumLsn - 1) + .withLocalLSN(expectedQuorumLocalLSN - 1) + .withRequestCharge(requestChargePerRead) + .build(); + builder.storeResponseOn(secondaryReplicaURIs.get(0), OperationType.Read, ResourceType.Document, readResponse, false); + + for(int i = 0; i < numberOfBarrierRequestTillCatchUp; i++) { + int lsnIncrement = (i == numberOfBarrierRequestTillCatchUp - 1) ? 1 : 0; + readResponse = StoreResponseBuilder.create() + .withLSN(expectedQuorumLsn - 1 + lsnIncrement) + .withLocalLSN(expectedQuorumLocalLSN - 1 + lsnIncrement) + .withRequestCharge(requestChargePerRead) + .build(); + builder.storeResponseOn(secondaryReplicaURIs.get(0), OperationType.Read, ResourceType.Document, readResponse, false); + + StoreResponse headResponse = StoreResponseBuilder.create() + .withLSN(expectedQuorumLsn - 1 + lsnIncrement) + .withLocalLSN(expectedQuorumLocalLSN - 1 + lsnIncrement) + .withRequestCharge(requestChargePerHead) + .build(); + builder.storeResponseOn(secondaryReplicaURIs.get(0), OperationType.Head, ResourceType.DocumentCollection, headResponse, false); + } + + // faster replica + readResponse = StoreResponseBuilder.create() + .withLSN(expectedQuorumLsn) + .withLocalLSN(expectedQuorumLocalLSN) + .withRequestCharge(requestChargePerRead) + .build(); + builder.storeResponseOn(secondaryReplicaURIs.get(1), OperationType.Read, ResourceType.Document, readResponse, false); + for(int i = 0; i < numberOfBarrierRequestTillCatchUp; i++) { + StoreResponse headResponse = StoreResponseBuilder.create() + .withLSN(expectedQuorumLsn + 10 * (i + 1)) + .withLocalLSN(expectedQuorumLocalLSN + 10 * (i + 1)) + .withRequestCharge(requestChargePerHead) + .build(); + builder.storeResponseOn(secondaryReplicaURIs.get(1), OperationType.Head, ResourceType.DocumentCollection, headResponse, false); + } + + TransportClientWrapper transportClientWrapper = builder.build(); + + StoreReader storeReader = new StoreReader(transportClientWrapper.transportClient, addressSelectorWrapper.addressSelector, sessionContainer); + + GatewayServiceConfigurationReader serviceConfigurator = Mockito.mock(GatewayServiceConfigurationReader.class); + IAuthorizationTokenProvider authTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + QuorumReader quorumReader = new QuorumReader(configs, transportClientWrapper.transportClient, addressSelectorWrapper.addressSelector, storeReader, serviceConfigurator, authTokenProvider); + + int beforeSecondariesRetriesExhausted_expectedNumberOfReads = 2; + int beforeSecondariesRetriesExhausted_expectedNumberOfHeads = 2 * configs.getMaxNumberOfReadBarrierReadRetries(); + + int numberOfHeadRetriesRequestWhenPrimaryIncluded = 3 * numberOfHeadBarriersWithPrimaryIncludedTillQuorumMet; + + double expectedRequestCharge = requestChargePerRead.multiply(BigDecimal.valueOf(beforeSecondariesRetriesExhausted_expectedNumberOfReads)) + .add(requestChargePerHead.multiply(BigDecimal.valueOf(beforeSecondariesRetriesExhausted_expectedNumberOfHeads))) + .setScale(4, RoundingMode.FLOOR).doubleValue(); + + Stopwatch stopwatch = Stopwatch.createStarted(); + + Mono storeResponseSingle = quorumReader.readStrongAsync(request, replicaCountToRead, readMode); + + StoreResponseValidator validator = StoreResponseValidator.create() + .withBELSN(expectedQuorumLsn) + .withRequestCharge(expectedRequestCharge) + .build(); + + validateSuccess(storeResponseSingle, validator); + + assertThat(stopwatch.elapsed().plus(timeResolution)).isGreaterThanOrEqualTo(Duration.ofMillis( + numberOfBarrierRequestTillCatchUp * configs.getDelayBetweenReadBarrierCallsInMs())); + + transportClientWrapper.validate() + .verifyNumberOfInvocations(beforeSecondariesRetriesExhausted_expectedNumberOfReads + + beforeSecondariesRetriesExhausted_expectedNumberOfHeads + + numberOfHeadRetriesRequestWhenPrimaryIncluded); + addressSelectorWrapper.validate() + .verifyNumberOfForceCachRefresh(0) + .verifyVesolvePrimaryUriAsyncCount(0) + .verifyTotalInvocations(1 + numberOfBarrierRequestTillCatchUp); + + AddressSelectorWrapper.InOrderVerification.Verifier primaryNotIncludedVerifier = AddressSelectorWrapper + .InOrderVerification.Verifier.builder() + .resolveAllUriAsync_IncludePrimary(false) + .resolveAllUriAsync_ForceRefresh(false) + .build(); + + AddressSelectorWrapper.InOrderVerification.Verifier primaryIncludedVerifier = AddressSelectorWrapper + .InOrderVerification.Verifier.builder() + .resolveAllUriAsync_IncludePrimary(true) + .resolveAllUriAsync_ForceRefresh(false) + .build(); + + int numberOfAddressResolutionWithoutPrimary = configs.getMaxNumberOfReadBarrierReadRetries()+ 1; + int numberOfAddressResolutionWithPrimary = 1; + + AddressSelectorWrapper.InOrderVerification ov = addressSelectorWrapper.getInOrderVerification(); + + for(int i = 0; i < numberOfAddressResolutionWithoutPrimary; i++) { + ov.verifyNext(primaryNotIncludedVerifier); + } + + for(int i = 0; i < numberOfAddressResolutionWithPrimary; i++) { + ov.verifyNext(primaryIncludedVerifier); + } + + DocumentServiceRequestValidator requestValidator = DocumentServiceRequestValidator.builder() + .add(DocumentServiceRequestContextValidator.builder() + .qurorumSelectedLSN(0l) + .globalCommittedSelectedLSN(0l) + .storeResponses(null) + .build()) + .build(); + requestValidator.validate(request); + } + + @Test(groups = "unit") + public void readStrong_QuorumNotSelected_ReadPrimary() { + // scenario: attempts to read from secondaries, + // only one secondary is available so ends in QuorumNotSelected State + // reads from Primary and succeeds + + ReadMode readMode = ReadMode.Strong; + int replicaCountToRead = 2; + + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + URI primaryReplicaURI = URI.create("primary"); + ImmutableList secondaryReplicaURIs = ImmutableList.of(URI.create("secondary1")); + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.Simple.create() + .withPrimary(primaryReplicaURI) + .withSecondary(secondaryReplicaURIs) + .build(); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + + request.requestContext = new DocumentServiceRequestContext(); + request.requestContext.timeoutHelper = Mockito.mock(TimeoutHelper.class); + request.requestContext.resolvedPartitionKeyRange = Mockito.mock(PartitionKeyRange.class); + request.requestContext.requestChargeTracker = new RequestChargeTracker(); + + BigDecimal requestChargePerRead = new BigDecimal(1.1); + BigDecimal requestChargePerHead = BigDecimal.ZERO; + + TransportClientWrapper.Builder.UriToResultBuilder builder = TransportClientWrapper.Builder.uriToResultBuilder(); + + long primaryLSN = 52; + long primaryLocalLSN = 19; + + StoreResponse headResponse = StoreResponseBuilder.create() + .withLSN(primaryLSN) + .withLocalLSN(primaryLocalLSN) + .withHeader(WFConstants.BackendHeaders.CURRENT_REPLICA_SET_SIZE, "2") + .withHeader(WFConstants.BackendHeaders.QUORUM_ACKED_LSN, Long.toString(primaryLSN)) + .withHeader(WFConstants.BackendHeaders.QUORUM_ACKED_LOCAL_LSN, Long.toString(primaryLocalLSN)) + .withRequestCharge(requestChargePerRead) + .build(); + builder.storeResponseOn(primaryReplicaURI, OperationType.Read, ResourceType.Document, headResponse, false); + + StoreResponse readResponse = StoreResponseBuilder.create() + .withLSN(primaryLSN) + .withLocalLSN(primaryLocalLSN) + .withRequestCharge(requestChargePerRead) + .build(); + builder.storeResponseOn(secondaryReplicaURIs.get(0), OperationType.Read, ResourceType.Document, readResponse, false); + + TransportClientWrapper transportClientWrapper = builder.build(); + + StoreReader storeReader = new StoreReader(transportClientWrapper.transportClient, addressSelectorWrapper.addressSelector, sessionContainer); + + GatewayServiceConfigurationReader serviceConfigurator = Mockito.mock(GatewayServiceConfigurationReader.class); + IAuthorizationTokenProvider authTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + QuorumReader quorumReader = new QuorumReader(configs, transportClientWrapper.transportClient, addressSelectorWrapper.addressSelector, storeReader, serviceConfigurator, authTokenProvider); + + double expectedRequestCharge = requestChargePerRead.multiply(BigDecimal.valueOf(1)) + .add(requestChargePerHead.multiply(BigDecimal.valueOf(0))) + .setScale(4, RoundingMode.FLOOR).doubleValue(); + + Mono storeResponseSingle = quorumReader.readStrongAsync(request, replicaCountToRead, readMode); + + StoreResponseValidator validator = StoreResponseValidator.create() + .withBELSN(primaryLSN) + .withBELocalLSN(primaryLocalLSN) + .withRequestCharge(expectedRequestCharge) + .build(); + + validateSuccess(storeResponseSingle, validator); + + transportClientWrapper.validate() + .verifyNumberOfInvocations(1); + + addressSelectorWrapper.validate() + .verifyNumberOfForceCachRefresh(0) + .verifyVesolvePrimaryUriAsyncCount(1) + .verifyTotalInvocations(2); + + AddressSelectorWrapper.InOrderVerification.Verifier primaryNotIncludedVerifier = AddressSelectorWrapper + .InOrderVerification.Verifier.builder() + .resolveAllUriAsync_IncludePrimary(false) + .resolveAllUriAsync_ForceRefresh(false) + .build(); + + AddressSelectorWrapper.InOrderVerification.Verifier resolvePrimaryVerifier = AddressSelectorWrapper + .InOrderVerification.Verifier.builder() + .resolvePrimaryUriAsync() + .build(); + + AddressSelectorWrapper.InOrderVerification ov = addressSelectorWrapper.getInOrderVerification(); + ov.verifyNext(primaryNotIncludedVerifier); + ov.verifyNext(resolvePrimaryVerifier); + + DocumentServiceRequestValidator requestValidator = DocumentServiceRequestValidator.builder() + .add(DocumentServiceRequestContextValidator.builder() + .qurorumSelectedLSN(0l) + .globalCommittedSelectedLSN(0l) + .storeResponses(null) + .build()) + .build(); + requestValidator.validate(request); + } + + @DataProvider(name = "readPrimaryArgProvider") + public Object[][] readPrimaryArgProvider() { + return new Object[][]{ + // endpoint, verifier for endpoint expected result, verifying the StoreResponse returned + { + EndpointMock.noSecondaryReplicaBuilder() + .response(StoreResponseBuilder.create() + .withLSN(52) + .withLocalLSN(19) + .withHeader(WFConstants.BackendHeaders.CURRENT_REPLICA_SET_SIZE, "1") + .withHeader(WFConstants.BackendHeaders.QUORUM_ACKED_LSN, "19") + .withHeader(WFConstants.BackendHeaders.QUORUM_ACKED_LOCAL_LSN, "19") + .withRequestCharge(0) + .build()) + .build(), + + EndpointMock.EndpointMockVerificationBuilder.builder() + .withAddressSelectorValidation(AddressSelectorWrapper + .InOrderVerificationBuilder + .create() + .verifyNumberOfInvocations(2) + .verifyNext(AddressSelectorWrapper.InOrderVerification.Verifier.builder() + .resolveAllUriAsync_IncludePrimary(false) + .resolveAllUriAsync_ForceRefresh(false) + .build()) + .verifyNext(AddressSelectorWrapper.InOrderVerification.Verifier.builder() + .resolvePrimaryUriAsync() + .build())) + .withTransportClientValidation(TransportClientWrapper.TransportClientWrapperVerificationBuilder.create().verifyNumberOfInvocations(1)), + + StoreResponseValidator.create() + .withBELSN(52) + .build() + } + }; + } + + @Test(groups = "unit", dataProvider = "readPrimaryArgProvider") + public void readPrimary(EndpointMock endpointMock, + EndpointMock.EndpointMockVerificationBuilder verification, + StoreResponseValidator storeResponseValidator) { + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + + GatewayServiceConfigurationReader serviceConfigurator = Mockito.mock(GatewayServiceConfigurationReader.class); + IAuthorizationTokenProvider authTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + + QuorumReader quorumReader = new QuorumReader(configs, endpointMock.transportClientWrapper.transportClient, + endpointMock.addressSelectorWrapper.addressSelector, + new StoreReader(endpointMock.transportClientWrapper.transportClient, + endpointMock.addressSelectorWrapper.addressSelector, + sessionContainer), + serviceConfigurator, + authTokenProvider); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + + request.requestContext = new DocumentServiceRequestContext(); + request.requestContext.timeoutHelper = Mockito.mock(TimeoutHelper.class); + request.requestContext.resolvedPartitionKeyRange = Mockito.mock(PartitionKeyRange.class); + request.requestContext.requestChargeTracker = new RequestChargeTracker(); + + int replicaCountToRead = 1; + ReadMode readMode = ReadMode.Strong; + Mono storeResponseSingle = quorumReader.readStrongAsync(request, replicaCountToRead, readMode); + + validateSuccess(storeResponseSingle, storeResponseValidator); + endpointMock.validate(verification); + } + + public static void validateSuccess(Mono> single, + MultiStoreResultValidator validator) { + validateSuccess(single, validator, 10000); + } + + public static void validateSuccess(Mono> single, + MultiStoreResultValidator validator, + long timeout) { + TestSubscriber> testSubscriber = new TestSubscriber<>(); + + single.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + testSubscriber.assertValueCount(1); + validator.validate(testSubscriber.values().get(0)); + } + + public static void validateSuccess(Mono single, + StoreResponseValidator validator) { + validateSuccess(single, validator, 10000); + } + + public static void validateSuccess(Mono single, + StoreResponseValidator validator, + long timeout) { + TestSubscriber testSubscriber = new TestSubscriber<>(); + + single.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + testSubscriber.assertValueCount(1); + validator.validate(testSubscriber.values().get(0)); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ReflectionUtils.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ReflectionUtils.java new file mode 100644 index 0000000000000..a3fd8b6a3e4ef --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ReflectionUtils.java @@ -0,0 +1,92 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.internal.RxDocumentClientImpl; +import com.azure.data.cosmos.internal.http.HttpClient; +import org.apache.commons.lang3.reflect.FieldUtils; + +/** + * + * TransportClient transportClient = ReflectionUtils.getDirectHttpsHttpClient(documentClient); + * TransportClient spyTransportClient = Mockito.spy(transportClient); + * ReflectionUtils.setTransportClient(documentClient, spyTransportClient); + * + * // use the documentClient + * // do assertion on the request and response spyTransportClient recieves using Mockito + */ +public class ReflectionUtils { + + private static void set(Object object, T newValue, String fieldName) { + try { + FieldUtils.writeField(object, fieldName, newValue, true); + } catch (IllegalAccessException e) { + throw new RuntimeException(e); + } + } + + private static T get(Class klass, Object object, String fieldName) { + try { + return (T) FieldUtils.readField(object, fieldName, true); + } catch (IllegalAccessException e) { + throw new RuntimeException(e); + } + } + + public static ServerStoreModel getServerStoreModel(RxDocumentClientImpl client) { + return get(ServerStoreModel.class, client, "storeModel"); + } + + public static StoreClient getStoreClient(RxDocumentClientImpl client) { + ServerStoreModel serverStoreModel = getServerStoreModel(client); + return get(StoreClient.class, serverStoreModel, "storeClient"); + } + + public static TransportClient getTransportClient(RxDocumentClientImpl client) { + StoreClient storeClient = getStoreClient(client); + return get(TransportClient.class, storeClient, "transportClient"); + } + + public static HttpClient getDirectHttpsHttpClient(RxDocumentClientImpl client) { + TransportClient transportClient = getTransportClient(client); + assert transportClient instanceof HttpTransportClient; + return get(HttpClient.class, transportClient, "httpClient"); + } + + public static void setDirectHttpsHttpClient(RxDocumentClientImpl client, HttpClient newHttpClient) { + TransportClient transportClient = getTransportClient(client); + assert transportClient instanceof HttpTransportClient; + set(transportClient, newHttpClient, "httpClient"); + } + + public static AsyncDocumentClient getAsyncDocumentClient(CosmosClient client) { + return get(AsyncDocumentClient.class, client, "asyncDocumentClient"); + } + + public static void setAsyncDocumentClient(CosmosClient client, RxDocumentClientImpl rxClient) { + set(client, rxClient, "asyncDocumentClient"); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ReplicaAddressFactory.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ReplicaAddressFactory.java new file mode 100644 index 0000000000000..ecba77dc0d190 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ReplicaAddressFactory.java @@ -0,0 +1,70 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.directconnectivity.AddressInformation; +import com.azure.data.cosmos.internal.directconnectivity.Protocol; +import com.azure.data.cosmos.internal.directconnectivity.AddressInformation; +import com.azure.data.cosmos.internal.directconnectivity.Protocol; +import org.apache.commons.lang3.RandomStringUtils; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +public class ReplicaAddressFactory { + private static String TEMPLATE = "https://by4prdddc03-docdb-1.documents.azure.com:9056" + + "/apps/%s/services/e7c8d429-c379-40c9-9486-65b89b70be2f" + + "/partitions/%s/replicas/%s/"; + + public static String createPartitionPhysicalURI(String partitionId, boolean isPrimary) { + return String.format(TEMPLATE, UUID.randomUUID(), partitionId, RandomStringUtils.randomNumeric(18) + (isPrimary ? "p" : "s")); + } + + public static String createPrimaryPhysicalURI(String partitionId) { + return createPartitionPhysicalURI(partitionId, true); + } + + public static String createSecondaryPhysicalURI(String partitionId) { + return createPartitionPhysicalURI(partitionId, false); + } + + public static AddressInformation createAddressInformation(String partitionId, boolean isPrimary, Protocol protocol) { + String loc = createPartitionPhysicalURI(partitionId, isPrimary); + return new AddressInformation(true, isPrimary, loc, protocol); + } + + public static List createPartitionAddressInformation(String partitionId, + boolean includePrimary, + int numberOfAllReplicas, + Protocol protocol) { + List addressInformationList = new ArrayList<>(); + for (boolean isPrimary = includePrimary; numberOfAllReplicas > 0; numberOfAllReplicas--) { + addressInformationList.add(createAddressInformation(partitionId, isPrimary, protocol)); + isPrimary = false; + } + + return addressInformationList; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ReplicatedResourceClientPartitionSplitTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ReplicatedResourceClientPartitionSplitTest.java new file mode 100644 index 0000000000000..1db2a0d577ee1 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ReplicatedResourceClientPartitionSplitTest.java @@ -0,0 +1,210 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.GoneException; +import com.azure.data.cosmos.internal.*; +import com.azure.data.cosmos.PartitionKeyRangeIsSplittingException; +import io.reactivex.subscribers.TestSubscriber; +import org.assertj.core.api.Assertions; +import org.mockito.Mockito; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.net.URI; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +public class ReplicatedResourceClientPartitionSplitTest { + protected static final int TIMEOUT = 120000; + + @DataProvider(name = "partitionIsSplittingArgProvider") + public Object[][] partitionIsSplittingArgProvider() { + return new Object[][]{ + // Consistency mode, number of partition splitting exception till split migration completes + { ConsistencyLevel.EVENTUAL, 1}, + { ConsistencyLevel.EVENTUAL, 2}, + { ConsistencyLevel.EVENTUAL, Integer.MAX_VALUE }, // server side partition split operation never completes + }; + } + + @Test(groups = { "unit" }, dataProvider = "partitionIsSplittingArgProvider", timeOut = TIMEOUT) + public void partitionSplit_RefreshCache_Read(ConsistencyLevel consistencyLevel, int partitionIsSplitting) { + URI secondary1AddressBeforeMove = URI.create("secondary"); + URI secondary1AddressAfterMove = URI.create("secondaryNew"); + + URI primaryAddressBeforeMove = URI.create("primary"); + URI primaryAddressAfterMove = URI.create("primaryNew"); + + String partitionKeyRangeIdBeforeSplit = "1"; + String partitionKeyRangeIdAfterSplit = "2"; + + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.ReplicaMoveBuilder.create(Protocol.HTTPS) + .withPrimaryMove(primaryAddressBeforeMove, primaryAddressAfterMove) + .withSecondaryMove(secondary1AddressBeforeMove, secondary1AddressAfterMove) + .newPartitionKeyRangeIdOnRefresh(r -> partitionKeyRangeWithId(partitionKeyRangeIdAfterSplit)) + .build(); + + long lsn = 54; + long localLsn = 18; + + StoreResponse primaryResponse = StoreResponseBuilder.create() + .withLSN(lsn) + .withLocalLSN(localLsn) + .withHeader(WFConstants.BackendHeaders.QUORUM_ACKED_LOCAL_LSN, Long.toString(localLsn)) + .withHeader(WFConstants.BackendHeaders.CURRENT_REPLICA_SET_SIZE, partitionKeyRangeIdAfterSplit) + .withRequestCharge(1.1) + .build(); + StoreResponse secondaryResponse1 = StoreResponseBuilder.create() + .withLSN(lsn) + .withLocalLSN(localLsn) + .withHeader(WFConstants.BackendHeaders.QUORUM_ACKED_LOCAL_LSN, Long.toString(localLsn)) + .withHeader(WFConstants.BackendHeaders.CURRENT_REPLICA_SET_SIZE, partitionKeyRangeIdAfterSplit) + .withRequestCharge(1.1) + .build(); + + TransportClientWrapper.Builder.UriToResultBuilder transportClientWrapperBuilder = TransportClientWrapper.Builder.uriToResultBuilder(); + + PartitionKeyRangeIsSplittingException splittingException = new PartitionKeyRangeIsSplittingException(); + if (partitionIsSplitting == Integer.MAX_VALUE) { + transportClientWrapperBuilder + .exceptionOn(primaryAddressBeforeMove, OperationType.Read, ResourceType.Document, splittingException, true) + .exceptionOn(secondary1AddressBeforeMove, OperationType.Read, ResourceType.Document, splittingException, true); + } else { + for (int i = 0; i < partitionIsSplitting; i++) { + transportClientWrapperBuilder + .exceptionOn(primaryAddressBeforeMove, OperationType.Read, ResourceType.Document, splittingException, false) + .exceptionOn(secondary1AddressBeforeMove, OperationType.Read, ResourceType.Document, splittingException, false); + } + } + + GoneException goneException = new GoneException(); + transportClientWrapperBuilder + .exceptionOn(primaryAddressBeforeMove, OperationType.Read, ResourceType.Document, goneException, true) + .exceptionOn(secondary1AddressBeforeMove, OperationType.Read, ResourceType.Document, goneException, true) + .storeResponseOn(primaryAddressAfterMove, OperationType.Read, ResourceType.Document, secondaryResponse1, true) + .storeResponseOn(secondary1AddressAfterMove, OperationType.Read, ResourceType.Document, primaryResponse, true); + + + TransportClientWrapper transportClientWrapper = transportClientWrapperBuilder.build(); + + GatewayServiceConfiguratorReaderMock gatewayServiceConfigurationReaderWrapper = GatewayServiceConfiguratorReaderMock.from(ConsistencyLevel.STRONG, + 4, + 3, + 4, + 3); + + SessionContainer sessionContainer = new SessionContainer("test"); + + IAuthorizationTokenProvider authorizationTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + ReplicatedResourceClient resourceClient = new ReplicatedResourceClient(new Configs(), + addressSelectorWrapper.addressSelector, + sessionContainer, + transportClientWrapper.transportClient, + gatewayServiceConfigurationReaderWrapper.gatewayServiceConfigurationReader, + authorizationTokenProvider, + false, + false); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + request.requestContext = new DocumentServiceRequestContext(); + request.requestContext.resolvedPartitionKeyRange = partitionKeyRangeWithId(partitionKeyRangeIdBeforeSplit); + request.getHeaders().put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); + + Function> prepareRequestAsyncDelegate = null; + Mono storeResponseObs = resourceClient.invokeAsync(request, prepareRequestAsyncDelegate); + + if (partitionIsSplitting < Integer.MAX_VALUE) { + + StoreResponseValidator validator = StoreResponseValidator.create() + .withBELSN(lsn) + .withRequestCharge(1.1) + .build(); + validateSuccess(storeResponseObs, validator); + + addressSelectorWrapper.verifyNumberOfForceCacheRefreshGreaterThanOrEqualTo(1); + } else { + FailureValidator validator = FailureValidator.builder().instanceOf(CosmosClientException.class) + .statusCode(503).build(); + validateFailure(storeResponseObs, validator, TIMEOUT); + } + } + + public static void validateSuccess(Mono> single, + MultiStoreResultValidator validator) { + validateSuccess(single, validator, TIMEOUT); + } + + public static void validateSuccess(Mono> single, + MultiStoreResultValidator validator, long timeout) { + TestSubscriber> testSubscriber = new TestSubscriber<>(); + + single.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + testSubscriber.assertValueCount(1); + validator.validate(testSubscriber.values().get(0)); + } + + public static void validateSuccess(Mono single, + StoreResponseValidator validator) { + validateSuccess(single, validator, TIMEOUT); + } + + public static void validateSuccess(Mono single, + StoreResponseValidator validator, long timeout) { + TestSubscriber testSubscriber = new TestSubscriber<>(); + + single.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + testSubscriber.assertValueCount(1); + validator.validate(testSubscriber.values().get(0)); + } + + + public static void validateFailure(Mono single, FailureValidator validator, long timeout) { + + TestSubscriber testSubscriber = new TestSubscriber<>(); + single.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNotComplete(); + testSubscriber.assertTerminated(); + Assertions.assertThat(testSubscriber.errorCount()).isEqualTo(1); + validator.validate(testSubscriber.errors().get(0)); + } + + private PartitionKeyRange partitionKeyRangeWithId(String id) { + PartitionKeyRange partitionKeyRange = Mockito.mock(PartitionKeyRange.class); + Mockito.doReturn(id).when(partitionKeyRange).id(); + return partitionKeyRange; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ReplicatedResourceClientTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ReplicatedResourceClientTest.java new file mode 100644 index 0000000000000..19bb302ee0762 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/ReplicatedResourceClientTest.java @@ -0,0 +1,95 @@ +/* + * + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.GoneException; +import com.azure.data.cosmos.internal.directconnectivity.*; +import com.azure.data.cosmos.internal.Configs; +import com.azure.data.cosmos.internal.IAuthorizationTokenProvider; +import com.azure.data.cosmos.internal.OperationType; +import com.azure.data.cosmos.internal.ResourceType; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.FailureValidator; +import com.azure.data.cosmos.internal.directconnectivity.*; +import io.reactivex.subscribers.TestSubscriber; +import org.assertj.core.api.Assertions; +import org.mockito.Matchers; +import org.mockito.Mockito; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.util.concurrent.TimeUnit; + +public class ReplicatedResourceClientTest { + protected static final int TIMEOUT = 60000; + private IAddressResolver addressResolver; + private TransportClient transportClient; + private boolean enableReadRequestsFallback; + public boolean forceAddressRefresh; + private GatewayServiceConfigurationReader serviceConfigReader; + private IAuthorizationTokenProvider authorizationTokenProvider; + + @BeforeClass(groups = "unit") + public void setup() throws Exception { + addressResolver = Mockito.mock(IAddressResolver.class); + transportClient = Mockito.mock(TransportClient.class); + serviceConfigReader = Mockito.mock(GatewayServiceConfigurationReader.class); + authorizationTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + } + + /** + * This test will verify that Gone exception will be retired + * fixed number of time before throwing error. + */ + @Test(groups = { "unit" }, timeOut = TIMEOUT) + public void invokeAsyncWithGoneException() { + Configs configs = new Configs(); + ReplicatedResourceClient resourceClient = new ReplicatedResourceClient(configs, new AddressSelector(addressResolver, Protocol.HTTPS), null, + transportClient, serviceConfigReader, authorizationTokenProvider, enableReadRequestsFallback, false); + FailureValidator validator = FailureValidator.builder().instanceOf(CosmosClientException.class).build(); + RxDocumentServiceRequest request = Mockito.spy(RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Document)); + + Mockito.when(addressResolver.resolveAsync(Matchers.any(), Matchers.anyBoolean())) + .thenReturn(Mono.error(new GoneException())); + Mono response = resourceClient.invokeAsync(request, null); + + validateFailure(response, validator, TIMEOUT); + //method will fail 7 time (first try ,last try , and 5 retries within 30 sec(1,2,4,8,15 wait)) + Mockito.verify(addressResolver, Mockito.times(7)).resolveAsync(Matchers.any(), Matchers.anyBoolean()); + } + + public static void validateFailure(Mono single, FailureValidator validator, long timeout) { + + TestSubscriber testSubscriber = new TestSubscriber<>(); + single.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNotComplete(); + testSubscriber.assertTerminated(); + Assertions.assertThat(testSubscriber.errorCount()).isEqualTo(1); + validator.validate(testSubscriber.errors().get(0)); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/RntbdTransportClientTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/RntbdTransportClientTest.java new file mode 100644 index 0000000000000..094e599e2bd36 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/RntbdTransportClientTest.java @@ -0,0 +1,934 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.ConflictException; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.ForbiddenException; +import com.azure.data.cosmos.GoneException; +import com.azure.data.cosmos.LockedException; +import com.azure.data.cosmos.MethodNotAllowedException; +import com.azure.data.cosmos.PartitionKeyRangeGoneException; +import com.azure.data.cosmos.PreconditionFailedException; +import com.azure.data.cosmos.RequestEntityTooLargeException; +import com.azure.data.cosmos.RequestRateTooLargeException; +import com.azure.data.cosmos.RequestTimeoutException; +import com.azure.data.cosmos.RetryWithException; +import com.azure.data.cosmos.ServiceUnavailableException; +import com.azure.data.cosmos.UnauthorizedException; +import com.azure.data.cosmos.internal.directconnectivity.RntbdTransportClient; +import com.azure.data.cosmos.internal.directconnectivity.ServerProperties; +import com.azure.data.cosmos.internal.directconnectivity.RntbdTransportClient; +import com.azure.data.cosmos.internal.directconnectivity.ServerProperties; +import com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdContext; +import com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdContextNegotiator; +import com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdContextRequest; +import com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdEndpoint; +import com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdRequest; +import com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdRequestArgs; +import com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdRequestEncoder; +import com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdRequestManager; +import com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdRequestRecord; +import com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdRequestTimer; +import com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdResponse; +import com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdResponseDecoder; +import com.azure.data.cosmos.internal.directconnectivity.rntbd.RntbdUUID; +import com.azure.data.cosmos.BadRequestException; +import com.azure.data.cosmos.internal.*; +import com.azure.data.cosmos.InternalServerErrorException; +import com.azure.data.cosmos.InvalidPartitionException; +import com.azure.data.cosmos.NotFoundException; +import com.azure.data.cosmos.PartitionIsMigratingException; +import com.azure.data.cosmos.PartitionKeyRangeIsSplittingException; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableMap; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandler; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.logging.LogLevel; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import io.reactivex.subscribers.TestSubscriber; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.net.ConnectException; +import java.net.URI; +import java.time.Duration; +import java.util.Arrays; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; + +import static com.azure.data.cosmos.internal.HttpConstants.HttpHeaders; +import static com.azure.data.cosmos.internal.HttpConstants.HttpMethods; +import static com.azure.data.cosmos.internal.HttpConstants.SubStatusCodes; +import static org.assertj.core.api.Assertions.assertThat; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; + +public final class RntbdTransportClientTest { + + private static final Logger logger = LoggerFactory.getLogger(RntbdTransportClientTest.class); + private static final int lsn = 5; + private static final ByteBuf noContent = Unpooled.wrappedBuffer(new byte[0]); + private static final String partitionKeyRangeId = "3"; + private static final URI physicalAddress = URI.create("rntbd://host:10251/replica-path/"); + private static final Duration requestTimeout = Duration.ofSeconds(1000); + + @DataProvider(name = "fromMockedNetworkFailureToExpectedDocumentClientException") + public Object[][] fromMockedNetworkFailureToExpectedDocumentClientException() { + + return new Object[][] { + }; + } + + @DataProvider(name = "fromMockedRntbdResponseToExpectedDocumentClientException") + public Object[][] fromMockedRntbdResponseToExpectedDocumentClientException() { + + return new Object[][] { + { + // 1 BadRequestException + + FailureValidator.builder() + .instanceOf(BadRequestException.class) + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + .resourceAddress(null), + RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.DocumentCollection, + "/dbs/db/colls/col", + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId + )), + new RntbdResponse( + RntbdUUID.EMPTY, + 400, + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId, + HttpHeaders.TRANSPORT_REQUEST_ID, Long.toString(1L) + ), + noContent) + }, + { + // 2 UnauthorizedException + + FailureValidator.builder() + .instanceOf(UnauthorizedException.class) + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + .resourceAddress(null), + RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.DocumentCollection, + "/dbs/db/colls/col", + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId + )), + new RntbdResponse( + RntbdUUID.EMPTY, + 401, + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId, + HttpHeaders.TRANSPORT_REQUEST_ID, Long.toString(2L) + ), + noContent) + }, + { + // 3 ForbiddenException + + FailureValidator.builder() + .instanceOf(ForbiddenException.class) + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + .resourceAddress(null), + RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.DocumentCollection, + "/dbs/db/colls/col", + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId + )), + new RntbdResponse( + RntbdUUID.EMPTY, + 403, + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId, + HttpHeaders.TRANSPORT_REQUEST_ID, Long.toString(3L) + ), + noContent) + }, + { + // 4 NotFoundException + + FailureValidator.builder() + .instanceOf(NotFoundException.class) + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + .resourceAddress(null), + RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.DocumentCollection, + "/dbs/db/colls/col", + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId + )), + new RntbdResponse( + RntbdUUID.EMPTY, + 404, + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId, + HttpHeaders.TRANSPORT_REQUEST_ID, Long.toString(4L) + ), + noContent) + }, + { + // 5 MethodNotAllowedException + + FailureValidator.builder() + .instanceOf(MethodNotAllowedException.class) + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + .resourceAddress(null), + RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.DocumentCollection, + "/dbs/db/colls/col", + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId + )), + new RntbdResponse( + RntbdUUID.EMPTY, + 405, + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId, + HttpHeaders.TRANSPORT_REQUEST_ID, Long.toString(5L) + ), + noContent) + }, + { + // 6 RequestTimeoutException + + FailureValidator.builder() + .instanceOf(RequestTimeoutException.class) + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + .resourceAddress(null), + RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.DocumentCollection, + "/dbs/db/colls/col", + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId + )), + new RntbdResponse( + RntbdUUID.EMPTY, + 408, + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId, + HttpHeaders.TRANSPORT_REQUEST_ID, Long.toString(6L) + ), + noContent) + }, + { + // 7 ConflictException + + FailureValidator.builder() + .instanceOf(ConflictException.class) + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + .resourceAddress(null), + RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.DocumentCollection, + "/dbs/db/colls/col", + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId + )), + new RntbdResponse( + RntbdUUID.EMPTY, + 409, + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId, + HttpHeaders.TRANSPORT_REQUEST_ID, Long.toString(7L) + ), + noContent) + }, + { + // 8 InvalidPartitionException + + FailureValidator.builder() + .instanceOf(InvalidPartitionException.class) + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + .resourceAddress(null), + RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.DocumentCollection, + "/dbs/db/colls/col", + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId + )), + new RntbdResponse( + RntbdUUID.EMPTY, + 410, + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId, + HttpHeaders.SUB_STATUS, Integer.toString(SubStatusCodes.NAME_CACHE_IS_STALE), + HttpHeaders.TRANSPORT_REQUEST_ID, Long.toString(8L) + ), + noContent) + }, + { + // 9 PartitionKeyRangeGoneException + + FailureValidator.builder() + .instanceOf(PartitionKeyRangeGoneException.class) + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + .resourceAddress(null), + RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.DocumentCollection, + "/dbs/db/colls/col", + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId + )), + new RntbdResponse( + RntbdUUID.EMPTY, + 410, + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId, + HttpHeaders.SUB_STATUS, Integer.toString(SubStatusCodes.PARTITION_KEY_RANGE_GONE), + HttpHeaders.TRANSPORT_REQUEST_ID, Long.toString(9L) + ), + noContent) + }, + { + // 10 PartitionKeyRangeIsSplittingException + + FailureValidator.builder() + .instanceOf(PartitionKeyRangeIsSplittingException.class) + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + .resourceAddress(null), + RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.DocumentCollection, + "/dbs/db/colls/col", + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId + )), + new RntbdResponse( + RntbdUUID.EMPTY, + 410, + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId, + HttpHeaders.SUB_STATUS, Integer.toString(SubStatusCodes.COMPLETING_SPLIT), + HttpHeaders.TRANSPORT_REQUEST_ID, Long.toString(10L) + ), + noContent) + }, + { + // 11 PartitionIsMigratingException + + FailureValidator.builder() + .instanceOf(PartitionIsMigratingException.class) + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + .resourceAddress(null), + RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.DocumentCollection, + "/dbs/db/colls/col", + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId + )), + new RntbdResponse( + RntbdUUID.EMPTY, + 410, + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId, + HttpHeaders.SUB_STATUS, Integer.toString(SubStatusCodes.COMPLETING_PARTITION_MIGRATION), + HttpHeaders.TRANSPORT_REQUEST_ID, Long.toString(11L) + ), + noContent) + }, + { + // 12 GoneException + + FailureValidator.builder() + .instanceOf(GoneException.class) + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + .resourceAddress(null), + RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.DocumentCollection, + "/dbs/db/colls/col", + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId + )), + new RntbdResponse( + RntbdUUID.EMPTY, + 410, + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId, + HttpHeaders.SUB_STATUS, String.valueOf(SubStatusCodes.UNKNOWN), + HttpHeaders.TRANSPORT_REQUEST_ID, Long.toString(12L) + ), + noContent) + }, + { + // 13 PreconditionFailedException + + FailureValidator.builder() + .instanceOf(PreconditionFailedException.class) + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + .resourceAddress(null), + RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.DocumentCollection, + "/dbs/db/colls/col", + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId + )), + new RntbdResponse( + RntbdUUID.EMPTY, + 412, + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId, + HttpHeaders.TRANSPORT_REQUEST_ID, Long.toString(13L) + ), + noContent) + }, + { + // 14 RequestEntityTooLargeException + + FailureValidator.builder() + .instanceOf(RequestEntityTooLargeException.class) + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + .resourceAddress(null), + RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.DocumentCollection, + "/dbs/db/colls/col", + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId + )), + new RntbdResponse( + RntbdUUID.EMPTY, + 413, + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId, + HttpHeaders.TRANSPORT_REQUEST_ID, Long.toString(14L) + ), + noContent) + }, + { + // 15 LockedException + + FailureValidator.builder() + .instanceOf(LockedException.class) + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + .resourceAddress(null), + RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.DocumentCollection, + "/dbs/db/colls/col", + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId + )), + new RntbdResponse( + RntbdUUID.EMPTY, + 423, + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId, + HttpHeaders.TRANSPORT_REQUEST_ID, Long.toString(15L) + ), + noContent) + }, + { + // 16 RequestRateTooLargeException + + FailureValidator.builder() + .instanceOf(RequestRateTooLargeException.class) + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + .resourceAddress(null), + RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.DocumentCollection, + "/dbs/db/colls/col", + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId + )), + new RntbdResponse( + RntbdUUID.EMPTY, + 429, + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId, + HttpHeaders.TRANSPORT_REQUEST_ID, Long.toString(16L) + ), + noContent) + }, + { + // 17 RetryWithException + + FailureValidator.builder() + .instanceOf(RetryWithException.class) + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + .resourceAddress(null), + RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.DocumentCollection, + "/dbs/db/colls/col", + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId + )), + new RntbdResponse( + RntbdUUID.EMPTY, + 449, + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId, + HttpHeaders.TRANSPORT_REQUEST_ID, Long.toString(17L) + ), + noContent) + }, + { + // 18 InternalServerErrorException + + FailureValidator.builder() + .instanceOf(InternalServerErrorException.class) + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + .resourceAddress(null), + RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.DocumentCollection, + "/dbs/db/colls/col", + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId + )), + new RntbdResponse( + RntbdUUID.EMPTY, + 500, + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId, + HttpHeaders.TRANSPORT_REQUEST_ID, Long.toString(18L) + ), + noContent) + }, + { + // 19 ServiceUnavailableException + + FailureValidator.builder() + .instanceOf(ServiceUnavailableException.class) + .lsn(lsn) + .partitionKeyRangeId(partitionKeyRangeId) + .resourceAddress(null), + RxDocumentServiceRequest.create( + OperationType.Read, + ResourceType.DocumentCollection, + "/dbs/db/colls/col", + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId + )), + new RntbdResponse( + RntbdUUID.EMPTY, + 503, + ImmutableMap.of( + HttpHeaders.LSN, Integer.toString(lsn), + HttpHeaders.PARTITION_KEY_RANGE_ID, partitionKeyRangeId, + HttpHeaders.TRANSPORT_REQUEST_ID, Long.toString(19L) + ), + noContent) + }, + }; + } + + /** + * Verifies that a request for a non-existent resource produces a {@link }GoneException} + */ + @Test(enabled = false, groups = { "direct" }) + public void verifyGoneResponseMapsToGoneException() throws Exception { + + final RntbdTransportClient.Options options = new RntbdTransportClient.Options.Builder(requestTimeout).build(); + final SslContext sslContext = SslContextBuilder.forClient().build(); + + try (final RntbdTransportClient transportClient = new RntbdTransportClient(options, sslContext)) { + + final BaseAuthorizationTokenProvider authorizationTokenProvider = new BaseAuthorizationTokenProvider( + RntbdTestConfiguration.AccountKey + ); + + final URI physicalAddress = new URI("rntbd://" + + RntbdTestConfiguration.RntbdAuthority + + "/apps/DocDbApp/services/DocDbMaster0/partitions/780e44f4-38c8-11e6-8106-8cdcd42c33be/replicas/1p/" + ); + + final ImmutableMap.Builder builder = ImmutableMap.builder(); + + builder.put(HttpHeaders.X_DATE, Utils.nowAsRFC1123()); + + final String token = authorizationTokenProvider.generateKeyAuthorizationSignature(HttpMethods.GET, + Paths.DATABASE_ACCOUNT_PATH_SEGMENT, + ResourceType.DatabaseAccount, + builder.build() + ); + + builder.put(HttpHeaders.AUTHORIZATION, token); + + final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + ResourceType.DatabaseAccount, + Paths.DATABASE_ACCOUNT_PATH_SEGMENT, + builder.build() + ); + + final Mono responseMono = transportClient.invokeStoreAsync(physicalAddress, request); + + responseMono.subscribe(response -> { }, error -> { + final String format = "Expected %s, not %s"; + assertTrue(error instanceof GoneException, String.format(format, GoneException.class, error.getClass())); + final Throwable cause = error.getCause(); + if (cause != null) { + // assumption: cosmos isn't listening on 10251 + assertTrue(cause instanceof ConnectException, String.format(format, ConnectException.class, error.getClass())); + } + }); + + } catch (final Exception error) { + final String message = String.format("%s: %s", error.getClass(), error.getMessage()); + fail(message, error); + } + } + + /** + * Validates the error handling behavior of {@link RntbdTransportClient} for network failures + *

+ * These are the exceptions that cannot be derived from server responses. They are mapped from Netty channel + * failures simulated by {@link FakeChannel}. + * + * @param builder A feature validator builder to confirm that response is correctly mapped to an exception + * @param request An RNTBD request instance + * @param exception An exception mapping + */ + @Test(enabled = false, groups = { "unit" }, dataProvider = "fromMockedNetworkFailureToExpectedDocumentClientException") + public void verifyNetworkFailure( + final FailureValidator.Builder builder, + final RxDocumentServiceRequest request, + final CosmosClientException exception + ) { + // TODO: DANOBLE: Implement RntbdTransportClientTest.verifyNetworkFailure + // Links: + // https://msdata.visualstudio.com/CosmosDB/_workitems/edit/378750 + throw new UnsupportedOperationException("TODO: DANOBLE: Implement this test"); + } + + /** + * Validates the error handling behavior of the {@link RntbdTransportClient} for HTTP status codes >= 400 + * + * @param builder A feature validator builder to confirm that response is correctly mapped to an exception + * @param request An RNTBD request instance + * @param response The RNTBD response instance to be returned as a result of the request + */ + @Test(enabled = true, groups = { "unit" }, dataProvider = "fromMockedRntbdResponseToExpectedDocumentClientException") + public void verifyRequestFailures( + final FailureValidator.Builder builder, + final RxDocumentServiceRequest request, + final RntbdResponse response + ) { + final UserAgentContainer userAgent = new UserAgentContainer(); + final Duration timeout = Duration.ofMillis(1000); + + try (final RntbdTransportClient client = getRntbdTransportClientUnderTest(userAgent, timeout, response)) { + + final Mono responseMono; + + try { + responseMono = client.invokeStoreAsync(physicalAddress, request); + } catch (final Exception error) { + throw new AssertionError(String.format("%s: %s", error.getClass(), error)); + } + + this.validateFailure(responseMono, builder.build()); + } + } + + private static RntbdTransportClient getRntbdTransportClientUnderTest( + final UserAgentContainer userAgent, + final Duration requestTimeout, + final RntbdResponse expected + ) { + + final RntbdTransportClient.Options options = new RntbdTransportClient.Options.Builder(requestTimeout) + .userAgent(userAgent) + .build(); + + final SslContext sslContext; + + try { + sslContext = SslContextBuilder.forClient().build(); + } catch (final Exception error) { + throw new AssertionError(String.format("%s: %s", error.getClass(), error.getMessage())); + } + + return new RntbdTransportClient(new FakeEndpoint.Provider(options, sslContext, expected)); + } + + private void validateFailure(final Mono responseMono, final FailureValidator validator) { + validateFailure(responseMono, validator, requestTimeout.toMillis()); + } + + private static void validateFailure( + final Mono mono, final FailureValidator validator, final long timeout + ) { + + final TestSubscriber subscriber = new TestSubscriber<>(); + mono.subscribe(subscriber); + + subscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + assertThat(subscriber.errorCount()).isEqualTo(1); + subscriber.assertSubscribed(); + subscriber.assertNoValues(); + validator.validate(subscriber.errors().get(0)); + } + + // region Types + + private static final class FakeChannel extends EmbeddedChannel { + + private static final ServerProperties serverProperties = new ServerProperties("agent", "3.0.0"); + private final BlockingQueue responses; + + FakeChannel(final BlockingQueue responses, final ChannelHandler... handlers) { + super(handlers); + this.responses = responses; + } + + @Override + protected void handleInboundMessage(final Object message) { + super.handleInboundMessage(message); + assertTrue(message instanceof ByteBuf); + } + + @Override + protected void handleOutboundMessage(final Object message) { + + assertTrue(message instanceof ByteBuf); + + final ByteBuf out = Unpooled.buffer(); + final ByteBuf in = (ByteBuf) message; + + // This is the end of the outbound pipeline and so we can do what we wish with the outbound message + + if (in.getUnsignedIntLE(4) == 0) { + + final RntbdContextRequest request = RntbdContextRequest.decode(in.copy()); + final RntbdContext rntbdContext = RntbdContext.from(request, serverProperties, HttpResponseStatus.OK); + + rntbdContext.encode(out); + + } else { + + final RntbdRequest rntbdRequest = RntbdRequest.decode(in.copy()); + final RntbdResponse rntbdResponse; + + try { + rntbdResponse = this.responses.take(); + } catch (final Exception error) { + throw new AssertionError(String.format("%s: %s", error.getClass(), error.getMessage())); + } + + assertEquals(rntbdRequest.getTransportRequestId(), rntbdResponse.getTransportRequestId()); + rntbdResponse.encode(out); + out.setBytes(8, in.slice(8, 16)); // Overwrite activityId + } + + this.writeInbound(out); + } + } + + private static final class FakeEndpoint implements RntbdEndpoint { + + final RntbdRequestTimer requestTimer; + final FakeChannel fakeChannel; + final URI physicalAddress; + + private FakeEndpoint( + final Config config, final RntbdRequestTimer timer, final URI physicalAddress, + final RntbdResponse... expected + ) { + + final ArrayBlockingQueue responses = new ArrayBlockingQueue<>( + expected.length, true, Arrays.asList(expected) + ); + + RntbdRequestManager requestManager = new RntbdRequestManager(30); + this.physicalAddress = physicalAddress; + this.requestTimer = timer; + + this.fakeChannel = new FakeChannel(responses, + new RntbdContextNegotiator(requestManager, config.getUserAgent()), + new RntbdRequestEncoder(), + new RntbdResponseDecoder(), + requestManager + ); + } + + @Override + public String getName() { + return "FakeEndpoint"; + } + + @Override + public void close() { + this.fakeChannel.close().syncUninterruptibly(); + } + + @Override + public RntbdRequestRecord request(final RntbdRequestArgs requestArgs) { + final RntbdRequestRecord requestRecord = new RntbdRequestRecord(requestArgs, this.requestTimer); + this.fakeChannel.writeOutbound(requestRecord); + return requestRecord; + } + + static class Provider implements RntbdEndpoint.Provider { + + final Config config; + final RntbdResponse expected; + final RntbdRequestTimer timer; + + Provider(RntbdTransportClient.Options options, SslContext sslContext, RntbdResponse expected) { + this.config = new Config(options, sslContext, LogLevel.WARN); + this.timer = new RntbdRequestTimer(config.getRequestTimeout()); + this.expected = expected; + } + + @Override + public void close() throws RuntimeException { + this.timer.close(); + } + + @Override + public Config config() { + return this.config; + } + + @Override + public int count() { + return 1; + } + + @Override + public RntbdEndpoint get(URI physicalAddress) { + return new FakeEndpoint(config, timer, physicalAddress, expected); + } + + @Override + public Stream list() { + return Stream.empty(); + } + } + } + + private static final class RntbdTestConfiguration { + + static String AccountHost = System.getProperty("ACCOUNT_HOST", + StringUtils.defaultString( + Strings.emptyToNull(System.getenv().get("ACCOUNT_HOST")), + "https://localhost:8081/" + ) + ); + + static String AccountKey = System.getProperty("ACCOUNT_KEY", + StringUtils.defaultString( + Strings.emptyToNull(System.getenv().get("ACCOUNT_KEY")), + "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==" + ) + ); + + static String RntbdAuthority = System.getProperty("rntbd.authority", + StringUtils.defaultString( + Strings.emptyToNull(System.getenv().get("RNTBD_AUTHORITY")), + String.format("%s:10251", URI.create(AccountHost).getHost()) + ) + ); + + private RntbdTestConfiguration() { + } + } + + // endregion +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/StoreReaderDotNetTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/StoreReaderDotNetTest.java new file mode 100644 index 0000000000000..5891bde18a51b --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/StoreReaderDotNetTest.java @@ -0,0 +1,891 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.GoneException; +import com.azure.data.cosmos.internal.ISessionContainer; +import com.azure.data.cosmos.ServiceUnavailableException; +import com.azure.data.cosmos.internal.*; +import com.azure.data.cosmos.InvalidPartitionException; +import io.reactivex.subscribers.TestSubscriber; +import org.apache.commons.lang3.StringUtils; +import org.mockito.Matchers; +import org.mockito.Mockito; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.net.URI; +import java.net.URISyntaxException; +import java.time.Duration; +import java.util.ArrayDeque; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Queue; +import java.util.concurrent.TimeUnit; + +import static org.assertj.core.api.Assertions.assertThat; + +public class StoreReaderDotNetTest { + private static final Logger logger = LoggerFactory.getLogger(StoreReaderDotNetTest.class); + @Test(groups = "unit") + public void addressCache() { + // create a real document service request + RxDocumentServiceRequest entity = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Document); + + // setup mocks for address information + AddressInformation[] addressInformation = new AddressInformation[3]; + for (int i = 0; i < 3; i++) { + addressInformation[i] = new AddressInformation(true, true, "http://replica-" + i, Protocol.HTTPS); + } + + IAddressResolver mockAddressCache = Mockito.mock(IAddressResolver.class); + + Mockito.doReturn(Mono.just(addressInformation)) + .when(mockAddressCache) + .resolveAsync(Mockito.any(RxDocumentServiceRequest.class), Mockito.eq(false)); + + // validate that the mock works + AddressInformation[] addressInfo = mockAddressCache.resolveAsync(entity, false).block(); + assertThat(addressInfo[0]).isEqualTo(addressInformation[0]); + } + + /** + * Tests for TransportClient + */ + @Test(groups = "unit") + public void transportClient() { + // create a real document service request + RxDocumentServiceRequest entity = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Document); + + // setup mocks for address information + AddressInformation[] addressInformation = new AddressInformation[3]; + + // construct URIs that look like the actual uri + // rntbd://yt1prdddc01-docdb-1.documents.azure.com:14003/apps/ce8ab332-f59e-4ce7-a68e-db7e7cfaa128/services/68cc0b50-04c6-4716-bc31-2dfefd29e3ee/partitions/5604283d-0907-4bf4-9357-4fa9e62de7b5/replicas/131170760736528207s/ + for (int i = 0; i < 3; i++) { + String physicalUri = + "rntbd://dummytenant.documents.azure.com:14003/apps/APPGUID/services/SERVICEGUID/partitions/PARTITIONGUID/replicas/" + + Integer.toString(i) + (i == 0 ? "p" : "s") + "/"; + addressInformation[i] = new AddressInformation(true, true, physicalUri, Protocol.TCP); + + } + + // create objects for all the dependencies of the StoreReader + TransportClient mockTransportClient = Mockito.mock(TransportClient.class); + + // create mock store response object + StoreResponseBuilder srb = new StoreResponseBuilder(); + + + // set lsn and activityid on the store response. + srb.withHeader(WFConstants.BackendHeaders.ACTIVITY_ID, "ACTIVITYID1_1" ); + srb.withHeader(WFConstants.BackendHeaders.LSN, "50"); + + // setup mock transport client + Mockito.doReturn(Mono.just(srb.build())) + .when(mockTransportClient) + .invokeResourceOperationAsync( + Mockito.eq(URI.create(addressInformation[0].getPhysicalUri())), + Mockito.any(RxDocumentServiceRequest.class)); + + + + // get response from mock object + StoreResponse response = mockTransportClient.invokeResourceOperationAsync(URI.create(addressInformation[0].getPhysicalUri()), entity).block(); + + // validate that the LSN matches + // validate that the ActivityId Matches + + StoreResponseValidator validator = StoreResponseValidator.create().withBELSN(50).withBEActivityId("ACTIVITYID1_1").build(); + validator.validate(response); + } + + private TransportClient getMockTransportClientDuringUpgrade(AddressInformation[] addressInformation) { + // create objects for all the dependencies of the StoreReader + TransportClient mockTransportClient = Mockito.mock(TransportClient.class); + + // create mock store response object + // set lsn and activityid on the store response. + StoreResponse mockStoreResponseFast = StoreResponseBuilder.create() + .withHeader(WFConstants.BackendHeaders.LSN, "50") + .withHeader(WFConstants.BackendHeaders.ACTIVITY_ID, "ACTIVITYID1_1") + .build(); + + StoreResponse mockStoreResponseSlow = StoreResponseBuilder.create() + .withHeader(WFConstants.BackendHeaders.LSN, "30") + .withHeader(WFConstants.BackendHeaders.ACTIVITY_ID, "ACTIVITYID1_1") + .build(); + + // setup mock transport client for the first replica + Mockito.doReturn(Mono.just(mockStoreResponseFast)) + .when(mockTransportClient) + .invokeResourceOperationAsync(Mockito.eq(URI.create(addressInformation[0].getPhysicalUri())), Mockito.any(RxDocumentServiceRequest.class)); + + + // setup mock transport client with a sequence of outputs + Mockito.doReturn(Mono.just(mockStoreResponseFast)) // initial read response + .doReturn(Mono.just(mockStoreResponseFast)) // barrier retry, count 1 + .doReturn(Mono.just(mockStoreResponseFast)) // barrier retry, count 2 + .doReturn(Mono.error(new InvalidPartitionException())) // throw invalid partition exception to simulate collection recreate with same name + .doReturn(Mono.just(mockStoreResponseFast)) // new read + .doReturn(Mono.just(mockStoreResponseFast)) // subsequent barriers + .doReturn(Mono.just(mockStoreResponseFast)) + .doReturn(Mono.just(mockStoreResponseFast)) + .when(mockTransportClient).invokeResourceOperationAsync( + Mockito.eq(URI.create(addressInformation[1].getPhysicalUri())), + Mockito.any(RxDocumentServiceRequest.class)); + + // After this, the product code should reset target identity, and lsn response + Queue queueOfResponses = new ArrayDeque<>(); + + // let the first 10 responses be slow, and then fast + for (int i = 0; i < 20; i++) { + queueOfResponses.add(i <= 2 ? mockStoreResponseSlow : mockStoreResponseFast); + } + + // setup mock transport client with a sequence of outputs, for the second replica + // This replica behaves in the following manner: + // calling InvokeResourceOperationAsync + // 1st time: returns valid LSN + // 2nd time: returns InvalidPartitionException + // initial read response + + Mockito.doAnswer((params) -> Mono.just(queueOfResponses.poll())) + .when(mockTransportClient).invokeResourceOperationAsync( + Mockito.eq(URI.create(addressInformation[2].getPhysicalUri())), + Mockito.any(RxDocumentServiceRequest.class)); + + return mockTransportClient; + } + + private enum ReadQuorumResultKind { + QuorumMet, + QuorumSelected, + QuorumNotSelected + } + + private TransportClient getMockTransportClientForGlobalStrongReads(AddressInformation[] addressInformation, ReadQuorumResultKind result) { + // create objects for all the dependencies of the StoreReader + TransportClient mockTransportClient = Mockito.mock(TransportClient.class); + + // create mock store response object + + StoreResponse mockStoreResponse1 = StoreResponseBuilder.create() + .withHeader(WFConstants.BackendHeaders.LSN, "100") + .withHeader(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN, "90") + .withHeader(WFConstants.BackendHeaders.ACTIVITY_ID, "ACTIVITYID1_1") + .withHeader(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS, "1") + .build(); + + StoreResponse mockStoreResponse2 = StoreResponseBuilder.create() + .withHeader(WFConstants.BackendHeaders.LSN, "90") + .withHeader(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN, "90") + .withHeader(WFConstants.BackendHeaders.ACTIVITY_ID, "ACTIVITYID1_2") + .withHeader(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS, "1") + .build(); + + + StoreResponse mockStoreResponse3 = StoreResponseBuilder.create() + .withHeader(WFConstants.BackendHeaders.LSN, "92") + .withHeader(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN, "90") + .withHeader(WFConstants.BackendHeaders.ACTIVITY_ID, "ACTIVITYID1_3") + .withHeader(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS, "1") + .build(); + + StoreResponse mockStoreResponse4 = StoreResponseBuilder.create() + .withHeader(WFConstants.BackendHeaders.LSN, "100") + .withHeader(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN, "92") + .withHeader(WFConstants.BackendHeaders.ACTIVITY_ID, "ACTIVITYID1_3") + .withHeader(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS, "1") + .build(); + + StoreResponse mockStoreResponse5 = StoreResponseBuilder.create() + .withHeader(WFConstants.BackendHeaders.LSN, "100") + .withHeader(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN, "100") + .withHeader(WFConstants.BackendHeaders.ACTIVITY_ID, "ACTIVITYID1_3") + .withHeader(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS, "1") + .withHeader(WFConstants.BackendHeaders.CURRENT_REPLICA_SET_SIZE, "1") + .withHeader(WFConstants.BackendHeaders.QUORUM_ACKED_LSN, "100") + .build(); + // set lsn and activityid on the store response. + + StoreResponse mockStoreResponseFast = StoreResponseBuilder.create() + .withHeader(WFConstants.BackendHeaders.LSN, "50") + .withHeader(WFConstants.BackendHeaders.ACTIVITY_ID, "ACTIVITYID1_1") + .build(); + + if(result == ReadQuorumResultKind.QuorumMet) { + // setup mock transport client for the first replica + Mockito.doReturn(Mono.just(mockStoreResponse5)) + .when(mockTransportClient).invokeResourceOperationAsync( + Mockito.eq(URI.create(addressInformation[0].getPhysicalUri())), Mockito.any(RxDocumentServiceRequest.class)); + + Mockito.doReturn(Mono.just(mockStoreResponse1)) + .doReturn(Mono.just(mockStoreResponse1)) + .doReturn(Mono.just(mockStoreResponse1)) + .doReturn(Mono.just(mockStoreResponse1)) + .doReturn(Mono.just(mockStoreResponse1)) + .doReturn(Mono.just(mockStoreResponse5)) + .when(mockTransportClient).invokeResourceOperationAsync( + Mockito.eq(URI.create(addressInformation[1].getPhysicalUri())), + Mockito.any(RxDocumentServiceRequest.class)); + + Mockito.doReturn(Mono.just(mockStoreResponse2)) + .doReturn(Mono.just(mockStoreResponse2)) + .doReturn(Mono.just(mockStoreResponse2)) + .doReturn(Mono.just(mockStoreResponse1)) + .doReturn(Mono.just(mockStoreResponse4)) + .doReturn(Mono.just(mockStoreResponse5)) + .when(mockTransportClient).invokeResourceOperationAsync( + Mockito.eq(URI.create(addressInformation[2].getPhysicalUri())), + Mockito.any(RxDocumentServiceRequest.class)); + } + + if (result == ReadQuorumResultKind.QuorumSelected) { + // setup mock transport client for the first replica + Mockito.doReturn(Mono.just(mockStoreResponse2)) + .when(mockTransportClient).invokeResourceOperationAsync( + Mockito.eq(URI.create(addressInformation[0].getPhysicalUri())), Mockito.any(RxDocumentServiceRequest.class)); + + // setup mock transport client with a sequence of outputs + Mockito.doReturn(Mono.just(mockStoreResponse1)) + .when(mockTransportClient).invokeResourceOperationAsync( + Mockito.eq(URI.create(addressInformation[1].getPhysicalUri())), Mockito.any(RxDocumentServiceRequest.class)); + + + // setup mock transport client with a sequence of outputs + Mockito.doReturn(Mono.just(mockStoreResponse2)) + .when(mockTransportClient).invokeResourceOperationAsync( + Mockito.eq(URI.create(addressInformation[2].getPhysicalUri())), Mockito.any(RxDocumentServiceRequest.class)); + } else if (result == ReadQuorumResultKind.QuorumNotSelected) { + // setup mock transport client for the first replica + + Mockito.doReturn(Mono.just(mockStoreResponse5)) + .when(mockTransportClient).invokeResourceOperationAsync( + Mockito.eq(URI.create(addressInformation[0].getPhysicalUri())), Mockito.any(RxDocumentServiceRequest.class)); + + Mockito.doReturn(Mono.just(mockStoreResponse5)) + .when(mockTransportClient).invokeResourceOperationAsync( + Mockito.eq(URI.create(addressInformation[1].getPhysicalUri())), Mockito.any(RxDocumentServiceRequest.class)); + + Mockito.doReturn(Mono.error(new GoneException("test"))) + .when(mockTransportClient).invokeResourceOperationAsync( + Mockito.eq(URI.create(addressInformation[2].getPhysicalUri())), Mockito.any(RxDocumentServiceRequest.class)); + } + + return mockTransportClient; + } + + private TransportClient getMockTransportClientForGlobalStrongWrites( + AddressInformation[] addressInformation, + int indexOfCaughtUpReplica, + boolean undershootGlobalCommittedLsnDuringBarrier, + boolean overshootLsnDuringBarrier, + boolean overshootGlobalCommittedLsnDuringBarrier) + { + TransportClient mockTransportClient = Mockito.mock(TransportClient.class); + + // create mock store response object + + // set lsn and activityid on the store response. + StoreResponse mockStoreResponse1 = StoreResponseBuilder.create() + .withHeader(WFConstants.BackendHeaders.LSN, "100") + .withHeader(WFConstants.BackendHeaders.ACTIVITY_ID, "ACTIVITYID1_1") + .withHeader(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN, "90") + .withHeader(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS, "1") + .build(); + + StoreResponse mockStoreResponse2 = StoreResponseBuilder.create() + .withHeader(WFConstants.BackendHeaders.LSN, "100") + .withHeader(WFConstants.BackendHeaders.ACTIVITY_ID, "ACTIVITYID1_2") + .withHeader(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN, "100") + .withHeader(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS, "1") + .build(); + + StoreResponse mockStoreResponse3 = StoreResponseBuilder.create() + .withHeader(WFConstants.BackendHeaders.LSN, "103") + .withHeader(WFConstants.BackendHeaders.ACTIVITY_ID, "ACTIVITYID1_3") + .withHeader(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN, "100") + .withHeader(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS, "1") + .build(); + + StoreResponse mockStoreResponse4 = StoreResponseBuilder.create() + .withHeader(WFConstants.BackendHeaders.LSN, "103") + .withHeader(WFConstants.BackendHeaders.ACTIVITY_ID, "ACTIVITYID1_3") + .withHeader(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN, "103") + .withHeader(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS, "1") + .build(); + + StoreResponse mockStoreResponse5 = StoreResponseBuilder.create() + .withHeader(WFConstants.BackendHeaders.LSN, "106") + .withHeader(WFConstants.BackendHeaders.ACTIVITY_ID, "ACTIVITYID1_3") + .withHeader(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN, "103") + .withHeader(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS, "1") + .build(); + + StoreResponse finalResponse = null; + if (undershootGlobalCommittedLsnDuringBarrier) { + finalResponse = mockStoreResponse1; + } else { + if (overshootLsnDuringBarrier) { + if (overshootGlobalCommittedLsnDuringBarrier) { + finalResponse = mockStoreResponse5; + } else { + finalResponse = mockStoreResponse3; + } + } else { + if (overshootGlobalCommittedLsnDuringBarrier) { + finalResponse = mockStoreResponse4; + } else { + finalResponse = mockStoreResponse2; + } + } + } + + for (int i = 0; i < addressInformation.length; i++) { + if (i == indexOfCaughtUpReplica) { + Mockito.doReturn(Mono.just(mockStoreResponse1)) + .doReturn(Mono.just(mockStoreResponse1)) + .doReturn(Mono.just(mockStoreResponse1)) + .doReturn(Mono.just(mockStoreResponse1)) + .doReturn(Mono.just(finalResponse)) + .when(mockTransportClient).invokeResourceOperationAsync( + Mockito.eq(URI.create(addressInformation[i].getPhysicalUri())), Mockito.any(RxDocumentServiceRequest.class)); + + } else { + Mockito.doReturn(Mono.just(mockStoreResponse1)) + .doReturn(Mono.just(mockStoreResponse1)) + .doReturn(Mono.just(mockStoreResponse1)) + .doReturn(Mono.just(mockStoreResponse1)) + .doReturn(Mono.just(mockStoreResponse1)) + .doReturn(Mono.just(mockStoreResponse1)) + .when(mockTransportClient).invokeResourceOperationAsync( + Mockito.eq(URI.create(addressInformation[i].getPhysicalUri())), Mockito.any(RxDocumentServiceRequest.class)); + } + } + + return mockTransportClient; + } + + /** + * We are simulating upgrade scenario where one of the secondary replicas is down. + * And one of the other secondary replicas is an XP Primary (lagging behind). + * Dyanmic Quorum is in effect, so Write Quorum = 2 + * @return array of AddressInformation + */ + private AddressInformation[] getMockAddressInformationDuringUpgrade() { + // setup mocks for address information + AddressInformation[] addressInformation = new AddressInformation[3]; + + // construct URIs that look like the actual uri + // rntbd://yt1prdddc01-docdb-1.documents.azure.com:14003/apps/ce8ab332-f59e-4ce7-a68e-db7e7cfaa128/services/68cc0b50-04c6-4716-bc31-2dfefd29e3ee/partitions/5604283d-0907-4bf4-9357-4fa9e62de7b5/replicas/131170760736528207s/ + for (int i = 0; i <= 2; i++) { + String physicalUri = + "rntbd://dummytenant.documents.azure.com:14003/apps/APPGUID/services/SERVICEGUID/partitions/PARTITIONGUID/replicas/" + + Integer.toString(i) + (i == 0 ? "p" : "s") + "/"; + addressInformation[i] = new AddressInformation(true, i == 0 ? true : false, physicalUri, Protocol.TCP); + } + + return addressInformation; + } + + /** + * Given an array of address information, gives mock address cache. + * @param addressInformation + * @return + */ + private IAddressResolver getMockAddressCache(AddressInformation[] addressInformation) + { + // Address Selector is an internal sealed class that can't be mocked, but its dependency + // AddressCache can be mocked. + IAddressResolver mockAddressCache = Mockito.mock(IAddressResolver.class); + + Mockito.doReturn(Mono.just(addressInformation)).when(mockAddressCache) + .resolveAsync(Mockito.any(RxDocumentServiceRequest.class), Mockito.eq(false) /*forceRefresh*/); + + Mockito.doReturn(Mono.just(new AddressInformation[0])).when(mockAddressCache) + .resolveAsync(Mockito.any(RxDocumentServiceRequest.class), Mockito.eq(true) /*forceRefresh*/); + + return mockAddressCache; + } + + /** + * Tests for {@link StoreReader} + */ + @Test(groups = "unit") + public void storeReaderBarrier() { + // create a real document service request + RxDocumentServiceRequest entity = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Document); + + // set request charge tracker - this is referenced in store reader (ReadMultipleReplicaAsync) + DocumentServiceRequestContext requestContext = new DocumentServiceRequestContext(); + // requestContext.ClientRequestStatistics = new ClientSideRequestStatistics(); + requestContext.requestChargeTracker = new RequestChargeTracker(); + entity.requestContext = requestContext; + + // also setup timeout helper, used in store reader + // entity.requestContext.timeoutHelper = new TimeoutHelper(new TimeSpan(2, 2, 2)); + entity.requestContext.timeoutHelper = Mockito.mock(TimeoutHelper.class); + + // when the store reader throws INVALID Partition exception, the higher layer should + // clear this target identity. + // entity.requestContext.TargetIdentity = new ServiceIdentity("dummyTargetIdentity1", new Uri("http://dummyTargetIdentity1"), false); + entity.requestContext.resolvedPartitionKeyRange = new PartitionKeyRange(); + + AddressInformation[] addressInformation = getMockAddressInformationDuringUpgrade(); + IAddressResolver mockAddressCache = getMockAddressCache(addressInformation); + + // validate that the mock works + AddressInformation[] addressInfo = mockAddressCache.resolveAsync(entity, false).block(); + + assertThat(addressInfo[0]).isEqualTo(addressInformation[0]); + + AddressSelector addressSelector = new AddressSelector(mockAddressCache, Protocol.TCP); + URI primaryAddress = addressSelector.resolvePrimaryUriAsync(entity, false /*forceAddressRefresh*/).block(); + + // check if the address return from Address Selector matches the original address info + assertThat(primaryAddress.toString()).isEqualTo(addressInformation[0].getPhysicalUri()); + + // get mock transport client that returns a sequence of responses to simulate upgrade + TransportClient mockTransportClient = getMockTransportClientDuringUpgrade(addressInformation); + + // get response from mock object + StoreResponse response = mockTransportClient.invokeResourceOperationAsync(URI.create(addressInformation[0].getPhysicalUri()), entity).block(); + + // validate that the LSN matches + assertThat(response.getLSN()).isEqualTo(50); + + String activityId = response.getHeaderValue(WFConstants.BackendHeaders.ACTIVITY_ID); + + // validate that the ActivityId Matches + assertThat(activityId).isEqualTo("ACTIVITYID1_1"); + + // create a real session container - we don't need session for this test anyway + ISessionContainer sessionContainer = new SessionContainer(Strings.Emtpy); + + // create store reader with mock transport client, real address selector (that has mock address cache), and real session container + StoreReader storeReader = + new StoreReader(mockTransportClient, + addressSelector, + sessionContainer); + + // reads always go to read quorum (2) replicas + int replicaCountToRead = 2; + + List result = storeReader.readMultipleReplicaAsync( + entity, + false /*includePrimary*/, + replicaCountToRead, + true /*requiresValidLSN*/, + false /*useSessionToken*/, + ReadMode.Strong).block(); + + // make sure we got 2 responses from the store reader + assertThat(result).hasSize(2); + } + + public static void validateSuccess(Mono> single, + MultiStoreResultValidator validator) { + validateSuccess(single, validator, 10000); + } + + public static void validateSuccess(Mono> single, + MultiStoreResultValidator validator, long timeout) { + TestSubscriber> testSubscriber = new TestSubscriber<>(); + + single.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + testSubscriber.assertValueCount(1); + validator.validate(testSubscriber.values().get(0)); + } + + public static void validateSuccess(Mono single, + StoreResultValidator validator) { + validateSuccess(single, validator, 10000); + } + + public static void validateSuccess(Mono single, + StoreResultValidator validator, long timeout) { + TestSubscriber testSubscriber = new TestSubscriber<>(); + + single.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + testSubscriber.assertValueCount(1); + validator.validate(testSubscriber.values().get(0)); + } + + public static void validateException(Mono single, + FailureValidator validator) { + validateException(single, validator, 10000); + } + + public static void validateException(Mono single, + FailureValidator validator, long timeout) { + TestSubscriber testSubscriber = new TestSubscriber<>(); + + single.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNotComplete(); + testSubscriber.assertTerminated(); + assertThat(testSubscriber.errorCount()).isEqualTo(1); + validator.validate(testSubscriber.errors().get(0)); + } + + /** + * StoreClient uses ReplicatedResourceClient uses ConsistencyReader uses QuorumReader uses StoreReader uses TransportClient uses RntbdConnection + */ + @Test(groups = "unit", enabled = false) + public void storeClient() throws URISyntaxException { + // create a real document service request (with auth token level = god) + RxDocumentServiceRequest entity = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Document); + entity.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; + + // set request charge tracker - this is referenced in store reader (ReadMultipleReplicaAsync) + DocumentServiceRequestContext requestContext = new DocumentServiceRequestContext(); + requestContext.requestChargeTracker = new RequestChargeTracker(); + entity.requestContext = requestContext; + + // set a dummy resource id on the request. + entity.setResourceId("1-MxAPlgMgA="); + + // set consistency level on the request to Bounded Staleness + entity.getHeaders().put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, ConsistencyLevel.BOUNDED_STALENESS.toString()); + + // also setup timeout helper, used in store reader + entity.requestContext.timeoutHelper = new TimeoutHelper(Duration.ofSeconds(2 * 60 * 60 + 2 * 60 + 2)); + + // when the store reader throws INVALID Partition exception, the higher layer should + entity.requestContext.resolvedPartitionKeyRange = new PartitionKeyRange(); + + AddressInformation[] addressInformations = getMockAddressInformationDuringUpgrade(); + IAddressResolver mockAddressCache = getMockAddressCache(addressInformations); + + // validate that the mock works + AddressInformation[] addressInfo = mockAddressCache.resolveAsync(entity, false).block(); + assertThat(addressInfo[0]).isEqualTo(addressInformations[0]); + + AddressSelector addressSelector = new AddressSelector(mockAddressCache, Protocol.TCP); + URI primaryAddress = addressSelector.resolvePrimaryUriAsync(entity, false).block(); + + // check if the address return from Address Selector matches the original address info + assertThat(primaryAddress.toString()).isEqualTo(addressInformations[0].getPhysicalUri()); + + // get mock transport client that returns a sequence of responses to simulate upgrade + TransportClient mockTransportClient = getMockTransportClientDuringUpgrade(addressInformations); + + // get response from mock object + StoreResponse response = mockTransportClient.invokeResourceOperationAsync(new URI(addressInformations[0].getPhysicalUri()), entity).block(); + + // validate that the LSN matches + assertThat(response.getLSN()).isEqualTo(50); + + String activityId = response.getHeaderValue(WFConstants.BackendHeaders.ACTIVITY_ID); + // validate that the ActivityId Matches + assertThat(activityId).isEqualTo("ACTIVITYID1_1"); + + // create a real session container - we don't need session for this test anyway + SessionContainer sessionContainer = new SessionContainer(StringUtils.EMPTY); + + // create store reader with mock transport client, real address selector (that has mock address cache), and real session container + StoreReader storeReader = new StoreReader(mockTransportClient, addressSelector, sessionContainer); + + IAuthorizationTokenProvider mockAuthorizationTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + Mockito.when(mockAuthorizationTokenProvider.getUserAuthorizationToken(Matchers.anyString(), Matchers.any(), Matchers.anyString(), Matchers.anyMap(), + Matchers.any(), Matchers.anyMap())).thenReturn("dummyauthtoken"); + + // setup max replica set size on the config reader + ReplicationPolicy replicationPolicy = new ReplicationPolicy(); + GatewayServiceConfigurationReader mockServiceConfigReader = Mockito.mock(GatewayServiceConfigurationReader.class); + Mockito.when(mockServiceConfigReader.getUserReplicationPolicy()).thenReturn(replicationPolicy); + + try { + StoreClient storeClient = new StoreClient(new Configs(),mockAddressCache, sessionContainer, mockServiceConfigReader, mockAuthorizationTokenProvider, mockTransportClient, false); + + ServerStoreModel storeModel = new ServerStoreModel(storeClient); + Mono result = storeModel.processMessage(entity).single(); + result.block(); + + // if we have reached this point, there was a successful request. + // validate if the target identity has been cleared out. + // If the target identity is null and the request still succeeded, it means + // that the very first read succeeded without a barrier request. + assertThat(entity.requestContext.resolvedPartitionKeyRange).isNotNull(); + } catch (Exception e) { + assertThat(e instanceof ServiceUnavailableException + || e instanceof IllegalArgumentException + || e instanceof NullPointerException + || e instanceof NoSuchElementException).isTrue(); + } + } + + /** + * test consistency writer for global strong + */ + @Test(groups = "unit") + public void globalStrongConsistentWrite() { + // create a real document service request (with auth token level = god) + RxDocumentServiceRequest entity = RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Document); + entity.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; + + // set request charge tracker - this is referenced in store reader (ReadMultipleReplicaAsync) + DocumentServiceRequestContext requestContext = new DocumentServiceRequestContext(); + requestContext.requestChargeTracker = new RequestChargeTracker(); + entity.requestContext = requestContext; + + // set a dummy resource id on the request. + entity.setResourceId("1-MxAPlgMgA="); + + // set consistency level on the request to Bounded Staleness + entity.getHeaders().put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, ConsistencyLevel.STRONG.toString()); + + // also setup timeout helper, used in store reader + entity.requestContext.timeoutHelper = new TimeoutHelper(Duration.ofSeconds(2 * 60 * 60 + 2 * 60 + 2)); + + // when the store reader throws INVALID Partition exception, the higher layer should + // clear this target identity. + entity.requestContext.resolvedPartitionKeyRange = new PartitionKeyRange(); + + AddressInformation[] addressInformations = getMockAddressInformationDuringUpgrade(); + IAddressResolver mockAddressCache = getMockAddressCache(addressInformations); + + // validate that the mock works + AddressInformation[] addressInfo = mockAddressCache.resolveAsync(entity, false).block(); + assertThat(addressInformations[0]).isEqualTo(addressInfo[0]); + + AddressSelector addressSelector = new AddressSelector(mockAddressCache, Protocol.TCP); + URI primaryAddress = addressSelector.resolvePrimaryUriAsync(entity, false).block(); + + // check if the address return from Address Selector matches the original address info + assertThat(primaryAddress.toString()).isEqualTo(addressInformations[0].getPhysicalUri()); + + // create a real session container - we don't need session for this test anyway + SessionContainer sessionContainer = new SessionContainer(StringUtils.EMPTY); + GatewayServiceConfigurationReader serviceConfigurationReader = Mockito.mock(GatewayServiceConfigurationReader.class); + + IAuthorizationTokenProvider mockAuthorizationTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + Mockito.when(mockAuthorizationTokenProvider.getUserAuthorizationToken(Matchers.anyString(),Matchers.any(), Matchers.anyString(), Matchers.anyMap(), + Matchers.any(), Matchers.anyMap())).thenReturn("dummyauthtoken"); + + for (int i = 0; i < addressInformations.length; i++) { + TransportClient mockTransportClient = getMockTransportClientForGlobalStrongWrites(addressInformations, i, false, false, false); + StoreReader storeReader = new StoreReader(mockTransportClient, addressSelector, sessionContainer); + ConsistencyWriter consistencyWriter = new ConsistencyWriter(addressSelector, sessionContainer, mockTransportClient, mockAuthorizationTokenProvider, serviceConfigurationReader, false); + StoreResponse response = consistencyWriter.writeAsync(entity, new TimeoutHelper(Duration.ofSeconds(30)), false).block(); + assertThat(response.getLSN()).isEqualTo(100); + + //globalCommittedLsn never catches up in this case + mockTransportClient = getMockTransportClientForGlobalStrongWrites(addressInformations, i, true, false, false); + consistencyWriter = new ConsistencyWriter(addressSelector, sessionContainer, mockTransportClient, mockAuthorizationTokenProvider, serviceConfigurationReader, false); + try { + response = consistencyWriter.writeAsync(entity, new TimeoutHelper(Duration.ofSeconds(30)), false).block(); + // fail("it should throw exception"); + } catch (Exception e) { + } + + mockTransportClient = getMockTransportClientForGlobalStrongWrites(addressInformations, i, false, true, false); + storeReader = new StoreReader(mockTransportClient, addressSelector, sessionContainer); + consistencyWriter = new ConsistencyWriter(addressSelector, sessionContainer, mockTransportClient, mockAuthorizationTokenProvider, serviceConfigurationReader, false); + response = consistencyWriter.writeAsync(entity, new TimeoutHelper(Duration.ofSeconds(30)), false).block(); + assertThat(response.getLSN()).isEqualTo(100); + + mockTransportClient = getMockTransportClientForGlobalStrongWrites(addressInformations, i, false, true, true); + storeReader = new StoreReader(mockTransportClient, addressSelector, sessionContainer); + consistencyWriter = new ConsistencyWriter(addressSelector, sessionContainer, mockTransportClient, mockAuthorizationTokenProvider, serviceConfigurationReader, false); + response = consistencyWriter.writeAsync(entity, new TimeoutHelper(Duration.ofSeconds(30)), false).block(); + assertThat(response.getLSN()).isEqualTo(100); + + + mockTransportClient = getMockTransportClientForGlobalStrongWrites(addressInformations, i, false, false, true); + storeReader = new StoreReader(mockTransportClient, addressSelector, sessionContainer); + consistencyWriter = new ConsistencyWriter(addressSelector, sessionContainer, mockTransportClient, mockAuthorizationTokenProvider, serviceConfigurationReader, false); + response = consistencyWriter.writeAsync(entity, new TimeoutHelper(Duration.ofSeconds(30)), false).block(); + assertThat(response.getLSN()).isEqualTo(100); + + } + } + + /** + * Mocking Consistency + */ + @Test(groups = "unit", priority = 1) + public void globalStrongConsistency() { + // create a real document service request (with auth token level = god) + RxDocumentServiceRequest entity = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Document); + entity.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; + + // set request charge tracker - this is referenced in store reader (ReadMultipleReplicaAsync) + DocumentServiceRequestContext requestContext = new DocumentServiceRequestContext(); + requestContext.requestChargeTracker = new RequestChargeTracker(); + entity.requestContext = requestContext; + + // set a dummy resource id on the request. + entity.setResourceId("1-MxAPlgMgA="); + + // set consistency level on the request to Bounded Staleness + entity.getHeaders().put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, ConsistencyLevel.BOUNDED_STALENESS.toString()); + + // also setup timeout helper, used in store reader + entity.requestContext.timeoutHelper = new TimeoutHelper(Duration.ofSeconds(2 * 60 * 60 + 2 * 60 + 2)); + + // when the store reader throws INVALID Partition exception, the higher layer should + // clear this target identity. + entity.requestContext.resolvedPartitionKeyRange = new PartitionKeyRange(); + + AddressInformation[] addressInformations = getMockAddressInformationDuringUpgrade(); + IAddressResolver mockAddressCache = getMockAddressCache(addressInformations); + + // validate that the mock works + AddressInformation[] addressInfo = mockAddressCache.resolveAsync(entity, false).block(); + assertThat(addressInfo[0]).isEqualTo(addressInformations[0]); + + AddressSelector addressSelector = new AddressSelector(mockAddressCache, Protocol.TCP); + URI primaryAddress = addressSelector.resolvePrimaryUriAsync(entity, false).block(); + + // check if the address return from Address Selector matches the original address info + assertThat(primaryAddress.toString()).isEqualTo(addressInformations[0].getPhysicalUri()); + + // Quorum Met scenario Start + { + // get mock transport client that returns a sequence of responses to simulate upgrade + TransportClient mockTransportClient = getMockTransportClientForGlobalStrongReads(addressInformations, ReadQuorumResultKind.QuorumMet); + + // create a real session container - we don't need session for this test anyway + SessionContainer sessionContainer = new SessionContainer(StringUtils.EMPTY); + + // create store reader with mock transport client, real address selector (that has mock address cache), and real session container + StoreReader storeReader = new StoreReader(mockTransportClient, addressSelector, sessionContainer); + + IAuthorizationTokenProvider mockAuthorizationTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + Mockito.when(mockAuthorizationTokenProvider.getUserAuthorizationToken(Matchers.anyString(), Matchers.any(), Matchers.anyString(), Matchers.anyMap(), + Matchers.any(), Matchers.anyMap())).thenReturn("dummyauthtoken"); + + // setup max replica set size on the config reader + ReplicationPolicy replicationPolicy = new ReplicationPolicy(); + GatewayServiceConfigurationReader mockServiceConfigReader = Mockito.mock(GatewayServiceConfigurationReader.class); + Mockito.when(mockServiceConfigReader.getUserReplicationPolicy()).thenReturn(replicationPolicy); + + QuorumReader reader = new QuorumReader(new Configs(),mockTransportClient, addressSelector, storeReader, mockServiceConfigReader, mockAuthorizationTokenProvider); + + entity.requestContext.originalRequestConsistencyLevel = ConsistencyLevel.STRONG; + + StoreResponse result = reader.readStrongAsync(entity, 2, ReadMode.Strong).block(); + assertThat(result.getLSN()).isEqualTo(100); + + String globalCommitedLSN = result.getHeaderValue(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN); + + long nGlobalCommitedLSN = Long.parseLong(globalCommitedLSN); + assertThat(nGlobalCommitedLSN).isEqualTo(90); + } + + // Quorum Selected scenario + { + // get mock transport client that returns a sequence of responses to simulate upgrade + TransportClient mockTransportClient = getMockTransportClientForGlobalStrongReads(addressInformations, ReadQuorumResultKind.QuorumSelected); + + // create a real session container - we don't need session for this test anyway + SessionContainer sessionContainer = new SessionContainer(StringUtils.EMPTY); + + // create store reader with mock transport client, real address selector (that has mock address cache), and real session container + StoreReader storeReader = new StoreReader(mockTransportClient, addressSelector, sessionContainer); + + IAuthorizationTokenProvider mockAuthorizationTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + Mockito.when(mockAuthorizationTokenProvider.getUserAuthorizationToken(Matchers.anyString(), Matchers.any(), Matchers.anyString(), Matchers.anyMap(), + Matchers.any(), Matchers.anyMap())).thenReturn("dummyauthtoken"); + + // setup max replica set size on the config reader + ReplicationPolicy replicationPolicy = new ReplicationPolicy(); + BridgeInternal.setMaxReplicaSetSize(replicationPolicy,4); + + GatewayServiceConfigurationReader mockServiceConfigReader = Mockito.mock(GatewayServiceConfigurationReader.class); + Mockito.when(mockServiceConfigReader.getUserReplicationPolicy()).thenReturn(replicationPolicy); + Mockito.when(mockServiceConfigReader.getDefaultConsistencyLevel()).thenReturn(ConsistencyLevel.STRONG); + + QuorumReader reader = new QuorumReader(new Configs(), mockTransportClient, addressSelector, storeReader, mockServiceConfigReader, mockAuthorizationTokenProvider); + entity.requestContext.originalRequestConsistencyLevel = ConsistencyLevel.STRONG; + entity.requestContext.quorumSelectedLSN = -1; + entity.requestContext.globalCommittedSelectedLSN = -1; + try { + StoreResponse result = reader.readStrongAsync(entity, 2, ReadMode.Strong).block(); + assertThat(false).isTrue(); + } catch (Exception ex) { + if (ex.getCause() instanceof GoneException) { + logger.info("Gone exception expected!"); + } + } + + assertThat(entity.requestContext.quorumSelectedLSN).isEqualTo(100); + assertThat(entity.requestContext.globalCommittedSelectedLSN).isEqualTo(100); + } + + // Quorum not met scenario + { + // get mock transport client that returns a sequence of responses to simulate upgrade + TransportClient mockTransportClient = getMockTransportClientForGlobalStrongReads(addressInformations, ReadQuorumResultKind.QuorumNotSelected); + + // create a real session container - we don't need session for this test anyway + SessionContainer sessionContainer = new SessionContainer(StringUtils.EMPTY); + + // create store reader with mock transport client, real address selector (that has mock address cache), and real session container + StoreReader storeReader = + new StoreReader(mockTransportClient, + addressSelector, + sessionContainer); + + IAuthorizationTokenProvider mockAuthorizationTokenProvider = Mockito.mock(IAuthorizationTokenProvider.class); + Mockito.when(mockAuthorizationTokenProvider.getUserAuthorizationToken(Matchers.anyString(), Matchers.any(), Matchers.anyString(), Matchers.anyMap(), + Matchers.any(), Matchers.anyMap())).thenReturn("dummyauthtoken"); + // setup max replica set size on the config reader + ReplicationPolicy replicationPolicy = new ReplicationPolicy(); + BridgeInternal.setMaxReplicaSetSize(replicationPolicy,4); + + GatewayServiceConfigurationReader mockServiceConfigReader = Mockito.mock(GatewayServiceConfigurationReader.class); + Mockito.when(mockServiceConfigReader.getUserReplicationPolicy()).thenReturn(replicationPolicy); + Mockito.when(mockServiceConfigReader.getDefaultConsistencyLevel()).thenReturn(ConsistencyLevel.STRONG); + + QuorumReader reader = new QuorumReader(new Configs(), mockTransportClient, addressSelector, storeReader, mockServiceConfigReader, mockAuthorizationTokenProvider); + entity.requestContext.originalRequestConsistencyLevel = ConsistencyLevel.STRONG; + entity.requestContext.performLocalRefreshOnGoneException = true; + + StoreResponse result = reader.readStrongAsync(entity, 2, ReadMode.Strong).block(); + assertThat(result.getLSN()).isEqualTo(100); + + String globalCommitedLSN; + globalCommitedLSN = result.getHeaderValue(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN); + long nGlobalCommitedLSN = Long.parseLong(globalCommitedLSN); + assertThat(nGlobalCommitedLSN).isEqualTo(90); + } + + } + + // TODO: more mocking unit tests for different scenarios in StoreReader + // TODO: more mocking tests on how session work for StoreReader +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/StoreReaderTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/StoreReaderTest.java new file mode 100644 index 0000000000000..af3c084974043 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/StoreReaderTest.java @@ -0,0 +1,814 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.GoneException; +import com.azure.data.cosmos.internal.ISessionContainer; +import com.azure.data.cosmos.PartitionKeyRangeGoneException; +import com.azure.data.cosmos.RequestRateTooLargeException; +import com.azure.data.cosmos.internal.*; +import com.azure.data.cosmos.NotFoundException; +import com.azure.data.cosmos.PartitionIsMigratingException; +import com.azure.data.cosmos.PartitionKeyRangeIsSplittingException; +import com.google.common.collect.ImmutableList; +import io.reactivex.subscribers.TestSubscriber; +import org.assertj.core.api.AssertionsForClassTypes; +import org.mockito.Mockito; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; +import reactor.core.publisher.DirectProcessor; +import reactor.core.publisher.Mono; + +import java.net.URI; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; + +import static com.azure.data.cosmos.internal.HttpConstants.StatusCodes.GONE; +import static com.azure.data.cosmos.internal.HttpConstants.SubStatusCodes.COMPLETING_PARTITION_MIGRATION; +import static com.azure.data.cosmos.internal.HttpConstants.SubStatusCodes.COMPLETING_SPLIT; +import static com.azure.data.cosmos.internal.HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE; +import static org.assertj.core.api.Assertions.assertThat; + +public class StoreReaderTest { + private static final int TIMEOUT = 30000; + + + /** + * Tests for {@link StoreReader} + */ + @Test(groups = "unit") + public void startBackgroundAddressRefresh() throws Exception { + TransportClient transportClient = Mockito.mock(TransportClient.class); + AddressSelector addressSelector = Mockito.mock(AddressSelector.class); + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + + StoreReader storeReader = new StoreReader(transportClient, addressSelector, sessionContainer); + + CyclicBarrier b = new CyclicBarrier(2); + DirectProcessor> subject = DirectProcessor.create(); + CountDownLatch c = new CountDownLatch(1); + + List uris = ImmutableList.of(URI.create("https://localhost:5050"), URI.create("https://localhost:5051"), + URI.create("https://localhost:50502"), URI.create("https://localhost:5053")); + + Mockito.doAnswer(invocationOnMock -> subject.single().doOnSuccess(x -> c.countDown()).doAfterTerminate(() -> new Thread() { + @Override + public void run() { + try { + b.await(); + } catch (Exception e) { + e.printStackTrace(); + } + } + }.start())).when(addressSelector).resolveAllUriAsync(Mockito.any(RxDocumentServiceRequest.class), Mockito.eq(true), Mockito.eq(true)); + RxDocumentServiceRequest request = Mockito.mock(RxDocumentServiceRequest.class); + storeReader.startBackgroundAddressRefresh(request); + + subject.onNext(uris); + subject.onComplete(); + + TimeUnit.MILLISECONDS.sleep(100); + AssertionsForClassTypes.assertThat(c.getCount()).isEqualTo(0); + AssertionsForClassTypes.assertThat(b.getNumberWaiting()).isEqualTo(1); + b.await(1000, TimeUnit.MILLISECONDS); + } + + @DataProvider(name = "verifyCanContinueOnExceptionArgProvider") + public Object[][] verifyCanContinueOnExceptionArgProvider() { + return new Object[][]{ + {new PartitionKeyRangeGoneException(), false,}, + {new PartitionKeyRangeIsSplittingException(), false,}, + {new PartitionKeyRangeGoneException(), false,}, + {new PartitionIsMigratingException(), false,}, + {new GoneException(), true,}, + {ExceptionBuilder.create().withHeader(HttpConstants.HttpHeaders.REQUEST_VALIDATION_FAILURE, "").asGoneException(), true,}, + {ExceptionBuilder.create().withHeader(HttpConstants.HttpHeaders.REQUEST_VALIDATION_FAILURE, "0").asGoneException(), true,}, + {ExceptionBuilder.create().withHeader(HttpConstants.HttpHeaders.REQUEST_VALIDATION_FAILURE, "1").asGoneException(), false,}, + }; + } + + @Test(groups = "unit", dataProvider = "verifyCanContinueOnExceptionArgProvider") + public void verifyCanContinueOnException(CosmosClientException dce, Boolean shouldVerify) { + CosmosClientException capturedFailure = null; + try { + StoreReader.verifyCanContinueOnException(dce); + } catch (CosmosClientException e) { + capturedFailure = e; + } + + if (shouldVerify) { + assertThat(capturedFailure).isNull(); + } else { + assertThat(capturedFailure).isEqualTo(dce); + } + } + + @DataProvider(name = "exceptionArgProvider") + public Object[][] exceptionArgProvider() { + return new Object[][]{ + // exception to be thrown from transportClient, expected (exception type, status, subStatus) + { new PartitionKeyRangeGoneException(), PartitionKeyRangeGoneException.class, GONE, PARTITION_KEY_RANGE_GONE, }, + { new PartitionKeyRangeIsSplittingException() , PartitionKeyRangeIsSplittingException.class, GONE, COMPLETING_SPLIT, }, + { new PartitionIsMigratingException(), PartitionIsMigratingException.class, GONE, COMPLETING_PARTITION_MIGRATION, }, + }; + } + + @Test(groups = "unit", dataProvider = "exceptionArgProvider") + public void exception(Exception ex, Class klass, int expectedStatusCode, Integer expectedSubStatusCode) { + TransportClientWrapper transportClientWrapper = new TransportClientWrapper.Builder.ReplicaResponseBuilder + .SequentialBuilder() + .then(ex) + .build(); + + URI primaryUri = URI.create("primary"); + URI secondaryUri1 = URI.create("secondary1"); + URI secondaryUri2 = URI.create("secondary2"); + URI secondaryUri3 = URI.create("secondary3"); + + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.Simple.create() + .withPrimary(primaryUri) + .withSecondary(ImmutableList.of(secondaryUri1, secondaryUri2, secondaryUri3)) + .build(); + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + StoreReader storeReader = new StoreReader(transportClientWrapper.transportClient, addressSelectorWrapper.addressSelector, sessionContainer); + + TimeoutHelper timeoutHelper = Mockito.mock(TimeoutHelper.class); + RxDocumentServiceRequest dsr = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + dsr.requestContext = Mockito.mock(DocumentServiceRequestContext.class); + dsr.requestContext.timeoutHelper = timeoutHelper; + dsr.requestContext.resolvedPartitionKeyRange = partitionKeyRangeWithId("1"); + Mono> res = storeReader.readMultipleReplicaAsync(dsr, true, 3, true, true, ReadMode.Strong); + + FailureValidator failureValidator = FailureValidator.builder() + .instanceOf(klass) + .statusCode(expectedStatusCode) + .subStatusCode(expectedSubStatusCode) + .build(); + + TestSubscriber> subscriber = new TestSubscriber<>(); + res.subscribe(subscriber); + subscriber.awaitTerminalEvent(); + subscriber.assertNotComplete(); + assertThat(subscriber.errorCount()).isEqualTo(1); + failureValidator.validate(subscriber.errors().get(0)); + } + + /** + * reading in session consistency, if the requested session token cannot be supported by some replicas + * tries others till we find a replica which can support the given session token + */ + @Test(groups = "unit") + public void sessionNotAvailableFromSomeReplicas_FindReplicaSatisfyingRequestedSession() { + long slowReplicaLSN = 651175; + long globalCommittedLsn = 651174; + String partitionKeyRangeId = "73"; + NotFoundException foundException = new NotFoundException(); + foundException.responseHeaders().put(HttpConstants.HttpHeaders.SESSION_TOKEN, partitionKeyRangeId + ":-1#" + slowReplicaLSN); + foundException.responseHeaders().put(WFConstants.BackendHeaders.LSN, Long.toString(slowReplicaLSN)); + foundException.responseHeaders().put(WFConstants.BackendHeaders.LOCAL_LSN, Long.toString(slowReplicaLSN)); + foundException.responseHeaders().put(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN, Long.toString(globalCommittedLsn)); + + long fasterReplicaLSN = 651176; + + StoreResponse storeResponse = StoreResponseBuilder.create() + .withSessionToken(partitionKeyRangeId + ":-1#" + fasterReplicaLSN) + .withLSN(fasterReplicaLSN) + .withLocalLSN(fasterReplicaLSN) + .withQuorumAckecdLsn(fasterReplicaLSN) + .withQuorumAckecdLocalLsn(fasterReplicaLSN) + .withGlobalCommittedLsn(-1) + .withItemLocalLSN(fasterReplicaLSN) + .withRequestCharge(1.1) + .build(); + + TransportClientWrapper transportClientWrapper = new TransportClientWrapper.Builder.ReplicaResponseBuilder + .SequentialBuilder() + .then(foundException) // 1st replica read returns not found with lower lsn + .then(foundException) // 2nd replica read returns not found with lower lsn + .then(foundException) // 3rd replica read returns not found with lower lsn + .then(storeResponse) // 4th replica read returns storeResponse satisfying requested session token + .build(); + + URI primaryUri = URI.create("primary"); + URI secondaryUri1 = URI.create("secondary1"); + URI secondaryUri2 = URI.create("secondary2"); + URI secondaryUri3 = URI.create("secondary3"); + + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.Simple.create() + .withPrimary(primaryUri) + .withSecondary(ImmutableList.of(secondaryUri1, secondaryUri2, secondaryUri3)) + .build(); + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + StoreReader storeReader = new StoreReader(transportClientWrapper.transportClient, addressSelectorWrapper.addressSelector, sessionContainer); + + TimeoutHelper timeoutHelper = Mockito.mock(TimeoutHelper.class); + RxDocumentServiceRequest dsr = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + dsr.getHeaders().put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, ConsistencyLevel.SESSION.toString()); + dsr.requestContext = new DocumentServiceRequestContext(); + Utils.ValueHolder sessionToken = Utils.ValueHolder.initialize(null); + dsr.requestContext.sessionToken = sessionToken.v; + dsr.requestContext.timeoutHelper = timeoutHelper; + dsr.requestContext.resolvedPartitionKeyRange = partitionKeyRangeWithId(partitionKeyRangeId); + dsr.requestContext.requestChargeTracker = new RequestChargeTracker(); + assertThat(VectorSessionToken.tryCreate("-1#" + fasterReplicaLSN , sessionToken)).isTrue(); + + Mockito.doReturn(sessionToken.v).when(sessionContainer).resolvePartitionLocalSessionToken(Mockito.eq(dsr), Mockito.anyString()); + + Mono> readResult = storeReader.readMultipleReplicaAsync( + dsr, + /* includePrimary */ true, + /* replicaCountToRead */ 1, + /* requiresValidLSN */ true, + /* useSessionToken */ true, + /* readMode */ ReadMode.Any, + /* checkMinLsn */ true, + /* forceReadAll */ false); + + MultiStoreResultValidator validator = MultiStoreResultValidator.create() + .withSize(1) + .validateEachWith(StoreResultValidator.create() + .isValid() + .noException() + .withStoreResponse(StoreResponseValidator.create() + .isSameAs(storeResponse) + .build()) + .build()) + .build(); + validateSuccess(readResult, validator); + + addressSelectorWrapper.validate() + .verifyNumberOfForceCachRefresh(0) + .verifyVesolvePrimaryUriAsyncCount(0) + .verifyTotalInvocations(1); + } + + /** + * Reading with session consistency, replicas have session token with higher than requested and return not found + */ + @Test(groups = "unit") + public void sessionRead_LegitimateNotFound() { + long lsn = 651175; + long globalCommittedLsn = 651174; + String partitionKeyRangeId = "73"; + + NotFoundException foundException = new NotFoundException(); + foundException.responseHeaders().put(HttpConstants.HttpHeaders.SESSION_TOKEN, partitionKeyRangeId + ":-1#" + lsn); + foundException.responseHeaders().put(WFConstants.BackendHeaders.LSN, Long.toString(lsn)); + foundException.responseHeaders().put(WFConstants.BackendHeaders.LOCAL_LSN, Long.toString(lsn)); + foundException.responseHeaders().put(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN, Long.toString(globalCommittedLsn)); + + TransportClientWrapper transportClientWrapper = new TransportClientWrapper.Builder.ReplicaResponseBuilder + .SequentialBuilder() + .then(foundException) // 1st replica read returns not found + .then(foundException) // 2nd replica read returns not found + .then(foundException) // 3rd replica read returns not found + .then(foundException) // 4th replica read returns not found + .build(); + + URI primaryUri = URI.create("primary"); + URI secondaryUri1 = URI.create("secondary1"); + URI secondaryUri2 = URI.create("secondary2"); + URI secondaryUri3 = URI.create("secondary3"); + + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.Simple.create() + .withPrimary(primaryUri) + .withSecondary(ImmutableList.of(secondaryUri1, secondaryUri2, secondaryUri3)) + .build(); + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + StoreReader storeReader = new StoreReader(transportClientWrapper.transportClient, addressSelectorWrapper.addressSelector, sessionContainer); + + TimeoutHelper timeoutHelper = Mockito.mock(TimeoutHelper.class); + RxDocumentServiceRequest dsr = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + dsr.getHeaders().put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, ConsistencyLevel.SESSION.toString()); + dsr.requestContext = new DocumentServiceRequestContext(); + Utils.ValueHolder sessionToken = Utils.ValueHolder.initialize(null); + dsr.requestContext.sessionToken = sessionToken.v; + dsr.requestContext.timeoutHelper = timeoutHelper; + dsr.requestContext.resolvedPartitionKeyRange = partitionKeyRangeWithId(partitionKeyRangeId); + dsr.requestContext.requestChargeTracker = new RequestChargeTracker(); + assertThat(VectorSessionToken.tryCreate("-1#" + (lsn - 1) , sessionToken)).isTrue(); + + Mockito.doReturn(sessionToken.v).when(sessionContainer).resolvePartitionLocalSessionToken(Mockito.eq(dsr), Mockito.anyString()); + + Mono> readResult = storeReader.readMultipleReplicaAsync( + dsr, + /* includePrimary */ true, + /* replicaCountToRead */ 1, + /* requiresValidLSN */ true, + /* useSessionToken */ true, + /* readMode */ ReadMode.Any, + /* checkMinLsn */ true, + /* forceReadAll */ false); + + MultiStoreResultValidator validator = MultiStoreResultValidator.create() + .withSize(1) + .validateEachWith(StoreResultValidator.create() + .isValid() + .withException(FailureValidator.builder().instanceOf(NotFoundException.class).build()) + .build()) + .build(); + validateSuccess(readResult, validator); + } + + /** + * reading in session consistency, none of the replicas can support the requested session token. + */ + @Test(groups = "unit") + public void sessionRead_ReplicasDoNotHaveTheRequestedLSN_NoResult() { + long lsn = 651175; + long globalCommittedLsn = 651174; + String partitionKeyRangeId = "73"; + + NotFoundException foundException = new NotFoundException(); + foundException.responseHeaders().put(HttpConstants.HttpHeaders.SESSION_TOKEN, partitionKeyRangeId + ":-1#" + lsn); + foundException.responseHeaders().put(WFConstants.BackendHeaders.LSN, Long.toString(lsn)); + foundException.responseHeaders().put(WFConstants.BackendHeaders.LOCAL_LSN, Long.toString(lsn)); + foundException.responseHeaders().put(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN, Long.toString(globalCommittedLsn)); + + TransportClientWrapper transportClientWrapper = new TransportClientWrapper.Builder.ReplicaResponseBuilder + .SequentialBuilder() + .then(foundException) // 1st replica read returns not found + .then(foundException) // 2nd replica read returns not found + .then(foundException) // 3rd replica read returns not found + .then(foundException) // 4th replica read returns not found + .build(); + + URI primaryUri = URI.create("primary"); + URI secondaryUri1 = URI.create("secondary1"); + URI secondaryUri2 = URI.create("secondary2"); + URI secondaryUri3 = URI.create("secondary3"); + + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.Simple.create() + .withPrimary(primaryUri) + .withSecondary(ImmutableList.of(secondaryUri1, secondaryUri2, secondaryUri3)) + .build(); + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + StoreReader storeReader = new StoreReader(transportClientWrapper.transportClient, addressSelectorWrapper.addressSelector, sessionContainer); + + TimeoutHelper timeoutHelper = Mockito.mock(TimeoutHelper.class); + RxDocumentServiceRequest dsr = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + dsr.getHeaders().put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, ConsistencyLevel.SESSION.toString()); + dsr.requestContext = new DocumentServiceRequestContext(); + Utils.ValueHolder sessionToken = Utils.ValueHolder.initialize(null); + dsr.requestContext.sessionToken = sessionToken.v; + dsr.requestContext.timeoutHelper = timeoutHelper; + dsr.requestContext.resolvedPartitionKeyRange = partitionKeyRangeWithId(partitionKeyRangeId); + dsr.requestContext.requestChargeTracker = new RequestChargeTracker(); + assertThat(VectorSessionToken.tryCreate("-1#" + (lsn + 1) , sessionToken)).isTrue(); + + Mockito.doReturn(sessionToken.v).when(sessionContainer).resolvePartitionLocalSessionToken(Mockito.eq(dsr), Mockito.anyString()); + + Mono> readResult = storeReader.readMultipleReplicaAsync( + dsr, + /* includePrimary */ true, + /* replicaCountToRead */ 1, + /* requiresValidLSN */ true, + /* useSessionToken */ true, + /* readMode */ ReadMode.Any, + /* checkMinLsn */ true, + /* forceReadAll */ false); + + MultiStoreResultValidator validator = MultiStoreResultValidator.create() + .withSize(0) + .build(); + validateSuccess(readResult, validator); + } + + @Test(groups = "unit") + public void requestRateTooLarge_BubbleUp() { + long lsn = 1045395; + long globalCommittedLsn = 1045395; + String partitionKeyRangeId = "257"; + + RequestRateTooLargeException requestRateTooLargeException = new RequestRateTooLargeException(); + requestRateTooLargeException.responseHeaders().put(HttpConstants.HttpHeaders.LSN, Long.toString(lsn)); + requestRateTooLargeException.responseHeaders().put(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN, Long.toString(globalCommittedLsn)); + requestRateTooLargeException.responseHeaders().put(WFConstants.BackendHeaders.LOCAL_LSN, Long.toString(lsn)); + requestRateTooLargeException.responseHeaders().put(HttpConstants.HttpHeaders.SESSION_TOKEN, partitionKeyRangeId + ":-1#" + lsn); + + TransportClientWrapper transportClientWrapper = new TransportClientWrapper.Builder.ReplicaResponseBuilder + .SequentialBuilder() + .then(requestRateTooLargeException) // 1st replica read returns 429 + .then(requestRateTooLargeException) // 2nd replica read returns 429 + .then(requestRateTooLargeException) // 3rd replica read returns 429 + .then(requestRateTooLargeException) // 4th replica read returns 429 + .build(); + + URI primaryUri = URI.create("primary"); + URI secondaryUri1 = URI.create("secondary1"); + URI secondaryUri2 = URI.create("secondary2"); + URI secondaryUri3 = URI.create("secondary3"); + + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.Simple.create() + .withPrimary(primaryUri) + .withSecondary(ImmutableList.of(secondaryUri1, secondaryUri2, secondaryUri3)) + .build(); + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + StoreReader storeReader = new StoreReader(transportClientWrapper.transportClient, addressSelectorWrapper.addressSelector, sessionContainer); + + TimeoutHelper timeoutHelper = Mockito.mock(TimeoutHelper.class); + RxDocumentServiceRequest dsr = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + dsr.getHeaders().put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, ConsistencyLevel.SESSION.toString()); + dsr.requestContext = new DocumentServiceRequestContext(); + Utils.ValueHolder sessionToken = Utils.ValueHolder.initialize(null); + dsr.requestContext.sessionToken = sessionToken.v; + dsr.requestContext.timeoutHelper = timeoutHelper; + dsr.requestContext.resolvedPartitionKeyRange = partitionKeyRangeWithId("1"); + dsr.requestContext.requestChargeTracker = new RequestChargeTracker(); + assertThat(VectorSessionToken.tryCreate("-1#" + (lsn - 1) , sessionToken)).isTrue(); + + Mockito.doReturn(sessionToken.v).when(sessionContainer).resolvePartitionLocalSessionToken(Mockito.eq(dsr), Mockito.anyString()); + + Mono> readResult = storeReader.readMultipleReplicaAsync( + dsr, + /* includePrimary */ true, + /* replicaCountToRead */ 1, + /* requiresValidLSN */ true, + /* useSessionToken */ true, + /* readMode */ ReadMode.Any, + /* checkMinLsn */ true, + /* forceReadAll */ false); + + MultiStoreResultValidator validator = MultiStoreResultValidator.create() + .withSize(1) + .validateEachWith(FailureValidator.builder().instanceOf(RequestRateTooLargeException.class).build()) + .build(); + validateSuccess(readResult, validator); + } + + @Test(groups = "unit") + public void readPrimaryAsync() { + TransportClient transportClient = Mockito.mock(TransportClient.class); + AddressSelector addressSelector = Mockito.mock(AddressSelector.class); + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + + URI primaryURI = URI.create("primaryLoc"); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + + request.requestContext = Mockito.mock(DocumentServiceRequestContext.class); + request.requestContext.timeoutHelper = Mockito.mock(TimeoutHelper.class); + request.requestContext.resolvedPartitionKeyRange = partitionKeyRangeWithId("12"); + request.requestContext.requestChargeTracker = new RequestChargeTracker(); + + Mockito.doReturn(Mono.just(primaryURI)).when(addressSelector).resolvePrimaryUriAsync( + Mockito.eq(request) , Mockito.eq(false)); + + StoreResponse storeResponse = Mockito.mock(StoreResponse.class); + Mockito.doReturn(Mono.just(storeResponse)).when(transportClient).invokeResourceOperationAsync(Mockito.eq(primaryURI), Mockito.eq(request)); + + StoreReader storeReader = new StoreReader(transportClient, addressSelector, sessionContainer); + + Mono readResult = storeReader.readPrimaryAsync(request, true, true); + StoreResultValidator validator = StoreResultValidator.create() + .withStoreResponse(StoreResponseValidator.create().isSameAs(storeResponse).build()) + .build(); + validateSuccess(readResult, validator); + } + + @Test(groups = "unit") + public void readPrimaryAsync_GoneFromReplica() { + TransportClient transportClient = Mockito.mock(TransportClient.class); + AddressSelector addressSelector = Mockito.mock(AddressSelector.class); + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + + URI primaryURI = URI.create("primaryLoc"); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + + request.requestContext = Mockito.mock(DocumentServiceRequestContext.class); + request.requestContext.timeoutHelper = Mockito.mock(TimeoutHelper.class); + request.requestContext.resolvedPartitionKeyRange = partitionKeyRangeWithId("12"); + request.requestContext.requestChargeTracker = new RequestChargeTracker(); + + Mockito.doReturn(Mono.just(primaryURI)).when(addressSelector).resolvePrimaryUriAsync( + Mockito.eq(request) , Mockito.eq(false)); + + Mockito.doReturn(Mono.error(ExceptionBuilder.create().asGoneException())).when(transportClient).invokeResourceOperationAsync(Mockito.eq(primaryURI), Mockito.eq(request)); + StoreReader storeReader = new StoreReader(transportClient, addressSelector, sessionContainer); + Mono readResult = storeReader.readPrimaryAsync(request, true, true); + + FailureValidator validator = FailureValidator.builder().instanceOf(GoneException.class).build(); + validateException(readResult, validator); + } + + @Test(groups = "unit") + public void readPrimaryAsync_GoneExceptionOnTimeout() { + TransportClient transportClient = Mockito.mock(TransportClient.class); + AddressSelector addressSelector = Mockito.mock(AddressSelector.class); + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + + URI primaryURI = URI.create("primaryLoc"); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + + request.requestContext = Mockito.mock(DocumentServiceRequestContext.class); + request.requestContext.timeoutHelper = Mockito.mock(TimeoutHelper.class); + Mockito.doReturn(true).when(request.requestContext.timeoutHelper).isElapsed(); + request.requestContext.resolvedPartitionKeyRange = partitionKeyRangeWithId("12"); + request.requestContext.requestChargeTracker = new RequestChargeTracker(); + + Mockito.doReturn(Mono.just(primaryURI)).when(addressSelector).resolvePrimaryUriAsync( + Mockito.eq(request) , Mockito.eq(false)); + + StoreResponse storeResponse = Mockito.mock(StoreResponse.class); + Mockito.doReturn(Mono.just(storeResponse)).when(transportClient).invokeResourceOperationAsync(Mockito.eq(primaryURI), Mockito.eq(request)); + + StoreReader storeReader = new StoreReader(transportClient, addressSelector, sessionContainer); + + Mono readResult = storeReader.readPrimaryAsync(request, true, true); + FailureValidator validator = FailureValidator.builder().instanceOf(GoneException.class).build(); + validateException(readResult, validator); + } + + @DataProvider(name = "readPrimaryAsync_RetryOnGoneArgProvider") + public Object[][] readPrimaryAsync_RetryOnGoneArgProvider() { + return new Object[][]{ + // first exception from TransportClient, + // performLocalRefreshOnGoneException, + // retry with force refresh expected, + // validator for expected Exception from Single + // StoreResult has a successful StoreResponse + { + // partition moved, refresh replica address cache and retry + ExceptionBuilder.create().asGoneException(), true, true, null, true + }, + + { + // partition moved, refresh replica address cache is not requested, fail + ExceptionBuilder.create().asGoneException(), false, false, FailureValidator.builder().instanceOf(GoneException.class).build(), false + }, + + { + // invalid partition exception represents collection stale, cannot succeed, propagate failure + ExceptionBuilder.create().asInvalidPartitionException(), true, false, null, false + }, + + { + // cannot continue on partition key range gone, require address cache refresh + ExceptionBuilder.create().asPartitionKeyRangeGoneException(), true, false, + FailureValidator.builder().instanceOf(PartitionKeyRangeGoneException.class).build(), true + }, + + { + // cannot continue on partition split, require address cache refresh + ExceptionBuilder.create().asPartitionKeyRangeIsSplittingException(), true, false, + FailureValidator.builder().instanceOf(PartitionKeyRangeIsSplittingException.class).build(), true + }, + + { + // cannot continue on partition split, require address cache refresh + ExceptionBuilder.create().asPartitionIsMigratingException(), true, false, + FailureValidator.builder().instanceOf(PartitionIsMigratingException.class).build(), true + }, + }; + } + + @Test(groups = "unit", dataProvider = "readPrimaryAsync_RetryOnGoneArgProvider") + public void readPrimaryAsync_RetryOnPrimaryReplicaMove(Exception firstExceptionFromTransport, + boolean performLocalRefreshOnGoneException, + boolean retryWithForceRefreshExpected, + FailureValidator failureFromSingle, + boolean expectedStoreResponseInStoredReadResult) { + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + StoreResponse response = StoreResponseBuilder.create().build(); + + TransportClientWrapper transportClientWrapper = TransportClientWrapper.Builder.sequentialBuilder() + .then(firstExceptionFromTransport) + .then(response) + .build(); + + URI primaryURIPriorToRefresh = URI.create("stale"); + URI primaryURIAfterRefresh = URI.create("new"); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + + request.requestContext.performLocalRefreshOnGoneException = performLocalRefreshOnGoneException; + request.requestContext.timeoutHelper = Mockito.mock(TimeoutHelper.class); + request.requestContext.resolvedPartitionKeyRange = partitionKeyRangeWithId("12"); + request.requestContext.requestChargeTracker = new RequestChargeTracker(); + + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.PrimaryReplicaMoveBuilder.create(Protocol.HTTPS) + .withPrimaryReplicaMove(primaryURIPriorToRefresh, primaryURIAfterRefresh).build(); + StoreReader storeReader = new StoreReader(transportClientWrapper.transportClient, addressSelectorWrapper.addressSelector, sessionContainer); + + Mono readResult = storeReader.readPrimaryAsync(request, true, true); + + if (failureFromSingle == null) { + StoreResultValidator validator; + if (expectedStoreResponseInStoredReadResult) { + validator = StoreResultValidator.create().withStoreResponse(StoreResponseValidator.create().isSameAs(response).build()).build(); + } else { + validator = StoreResultValidator.create().withException(FailureValidator.builder().sameAs(firstExceptionFromTransport).build()).build(); + } + + validateSuccess(readResult, validator); + } else { + validateException(readResult, failureFromSingle); + } + + int numberOfAttempts = 1 + (retryWithForceRefreshExpected ? 1: 0); + + transportClientWrapper.validate() + .verifyNumberOfInvocations(numberOfAttempts); + + addressSelectorWrapper.validate() + .verifyResolveAddressesAsync(0) + .verifyResolveAllUriAsync(0) + .verifyVesolvePrimaryUriAsyncCount(numberOfAttempts) + .verifyNumberOfForceCachRefresh(retryWithForceRefreshExpected ? 1: 0); + } + + @DataProvider(name = "readMultipleReplicasAsyncArgProvider") + public Object[][] readMultipleReplicasAsyncArgProvider() { + return new Object[][]{ + // boolean includePrimary, int replicaCountToRead, ReadMode.STRONG + { false, 3, ReadMode.Strong }, + { true, 3, ReadMode.Strong }, + { false, 3, ReadMode.Any }, + { true, 3, ReadMode.Any }, + { true, 2, ReadMode.Any }, + { false, 2, ReadMode.Any }, + { true, 1, ReadMode.Any }, + { false, 1, ReadMode.Any }, + }; + } + + @Test(groups = "unit", dataProvider = "readMultipleReplicasAsyncArgProvider") + public void readMultipleReplicasAsync(boolean includePrimary, int replicaCountToRead, ReadMode readMode) { + // This adds basic tests for StoreReader.readMultipleReplicasAsync(.) without failure + // TODO: add some tests for readMultipleReplicasAsync which mock behaviour of failure of reading from a replica + ISessionContainer sessionContainer = Mockito.mock(ISessionContainer.class); + URI primaryReplicaURI = URI.create("primary"); + ImmutableList secondaryReplicaURIs = ImmutableList.of(URI.create("secondary1"), URI.create("secondary2"), URI.create("secondary3")); + AddressSelectorWrapper addressSelectorWrapper = AddressSelectorWrapper.Builder.Simple.create() + .withPrimary(primaryReplicaURI) + .withSecondary(secondaryReplicaURIs) + .build(); + + RxDocumentServiceRequest request = RxDocumentServiceRequest.createFromName( + OperationType.Read, "/dbs/db/colls/col/docs/docId", ResourceType.Document); + + request.requestContext = Mockito.mock(DocumentServiceRequestContext.class); + request.requestContext.timeoutHelper = Mockito.mock(TimeoutHelper.class); + request.requestContext.resolvedPartitionKeyRange = partitionKeyRangeWithId("12"); + + request.requestContext.requestChargeTracker = new RequestChargeTracker(); + + double requestChargePerRead = 1.1; + + StoreResponse primaryResponse = StoreResponseBuilder.create() + .withLSN(51) + .withLocalLSN(18) + .withRequestCharge(requestChargePerRead) + .build(); + StoreResponse secondaryResponse1 = StoreResponseBuilder.create() + .withLSN(50) + .withLocalLSN(17) + .withRequestCharge(requestChargePerRead) + .build(); + StoreResponse secondaryResponse2 = StoreResponseBuilder.create() + .withLSN(49) + .withLocalLSN(16) + .withRequestCharge(requestChargePerRead) + .build(); + StoreResponse secondaryResponse3 = StoreResponseBuilder.create() + .withLSN(48) + .withLocalLSN(15) + .withRequestCharge(requestChargePerRead) + .build(); + + List responseList = ImmutableList.of(primaryResponse, secondaryResponse1, secondaryResponse2, secondaryResponse3); + + TransportClientWrapper transportClientWrapper = TransportClientWrapper.Builder.uriToResultBuilder() + .storeResponseOn(primaryReplicaURI, OperationType.Read, ResourceType.Document, primaryResponse, false) + .storeResponseOn(secondaryReplicaURIs.get(0), OperationType.Read, ResourceType.Document, secondaryResponse1, false) + .storeResponseOn(secondaryReplicaURIs.get(1), OperationType.Read, ResourceType.Document, secondaryResponse2, false) + .storeResponseOn(secondaryReplicaURIs.get(2), OperationType.Read, ResourceType.Document, secondaryResponse3, false) + .build(); + + StoreReader storeReader = new StoreReader(transportClientWrapper.transportClient, addressSelectorWrapper.addressSelector, sessionContainer); + + Mono> readResult = storeReader.readMultipleReplicaAsync(request, includePrimary, replicaCountToRead, true, true, readMode); + + long expectedMinLsn = + responseList + .stream() + .filter(sr -> (sr != primaryResponse || includePrimary)) + .mapToLong(sr -> + { + String value = (ReadMode.Strong == readMode)? + sr.getHeaderValue(WFConstants.BackendHeaders.LSN) : + sr.getHeaderValue(WFConstants.BackendHeaders.LOCAL_LSN); + return Long.parseLong(value); + }) + .min().orElse(-1); + + + MultiStoreResultValidator validator = MultiStoreResultValidator.create() + .withSize(replicaCountToRead) + .withMinimumLSN(expectedMinLsn) + .noFailure() + .withTotalRequestCharge(requestChargePerRead * replicaCountToRead) + .build(); + validateSuccess(readResult, validator); + + transportClientWrapper.validate() + .verifyNumberOfInvocations(replicaCountToRead); + addressSelectorWrapper.validate() + .verifyNumberOfForceCachRefresh(0) + .verifyVesolvePrimaryUriAsyncCount(0) + .verifyTotalInvocations(1); + } + + public static void validateSuccess(Mono> single, + MultiStoreResultValidator validator) { + validateSuccess(single, validator, 10000); + } + + public static void validateSuccess(Mono> single, + MultiStoreResultValidator validator, long timeout) { + TestSubscriber> testSubscriber = new TestSubscriber<>(); + + single.flux().subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + testSubscriber.assertValueCount(1); + validator.validate(testSubscriber.values().get(0)); + } + + public static void validateSuccess(Mono single, + StoreResultValidator validator) { + validateSuccess(single, validator, 10000); + } + + public static void validateSuccess(Mono single, + StoreResultValidator validator, long timeout) { + TestSubscriber testSubscriber = new TestSubscriber<>(); + + single.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + testSubscriber.assertValueCount(1); + validator.validate(testSubscriber.values().get(0)); + } + + public static void validateException(Mono single, + FailureValidator validator, long timeout) { + TestSubscriber testSubscriber = new TestSubscriber<>(); + + single.flux().subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNotComplete(); + testSubscriber.assertTerminated(); + assertThat(testSubscriber.errorCount()).isEqualTo(1); + validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0)); + } + + public static void validateException(Mono single, + FailureValidator validator) { + validateException(single, validator, TIMEOUT); + } + + private PartitionKeyRange partitionKeyRangeWithId(String id) { + PartitionKeyRange partitionKeyRange = Mockito.mock(PartitionKeyRange.class); + Mockito.doReturn(id).when(partitionKeyRange).id(); + return partitionKeyRange; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/StoreReaderUnderTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/StoreReaderUnderTest.java new file mode 100644 index 0000000000000..8577010c40243 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/StoreReaderUnderTest.java @@ -0,0 +1,70 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.ISessionContainer; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.google.common.collect.ImmutableList; +import org.apache.commons.lang3.tuple.Pair; +import reactor.core.publisher.Mono; + +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class StoreReaderUnderTest extends StoreReader { + + public List>> invocations = Collections.synchronizedList(new ArrayList<>()); + + public StoreReaderUnderTest(TransportClient transportClient, AddressSelector addressSelector, ISessionContainer sessionContainer) { + super(transportClient, addressSelector, sessionContainer); + } + + @Override + public Mono> readMultipleReplicaAsync(RxDocumentServiceRequest entity, boolean includePrimary, int replicaCountToRead, boolean requiresValidLsn, boolean useSessionToken, ReadMode readMode) { + Method method = new Object(){}.getClass().getEnclosingMethod(); + ImmutableList list = ImmutableList.of(entity, includePrimary, replicaCountToRead, requiresValidLsn, useSessionToken, readMode); + invocations.add(Pair.of(method, list)); + + return super.readMultipleReplicaAsync(entity, includePrimary, replicaCountToRead, requiresValidLsn, useSessionToken, readMode); + } + + @Override + public Mono> readMultipleReplicaAsync(RxDocumentServiceRequest entity, boolean includePrimary, int replicaCountToRead, boolean requiresValidLsn, boolean useSessionToken, ReadMode readMode, boolean checkMinLSN, boolean forceReadAll) { + Method method = new Object(){}.getClass().getEnclosingMethod(); + ImmutableList list = ImmutableList.of(entity, includePrimary, replicaCountToRead, requiresValidLsn, useSessionToken, readMode, checkMinLSN, forceReadAll); + invocations.add(Pair.of(method, list)); + return super.readMultipleReplicaAsync(entity, includePrimary, replicaCountToRead, requiresValidLsn, useSessionToken, readMode, checkMinLSN, forceReadAll); + } + + @Override + public Mono readPrimaryAsync(RxDocumentServiceRequest entity, boolean requiresValidLsn, boolean useSessionToken) { + Method method = new Object(){}.getClass().getEnclosingMethod(); + ImmutableList list = ImmutableList.of(entity, requiresValidLsn, useSessionToken); + invocations.add(Pair.of(method, list)); + return super.readPrimaryAsync(entity, requiresValidLsn, useSessionToken); + } + +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/StoreResponseTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/StoreResponseTest.java new file mode 100644 index 0000000000000..0659edbb8f1bf --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/StoreResponseTest.java @@ -0,0 +1,69 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.directconnectivity.StoreResponse; +import org.apache.commons.io.IOUtils; +import org.testng.annotations.Test; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.util.ArrayList; +import java.util.HashMap; + +import static org.assertj.core.api.Assertions.assertThat; + +public class StoreResponseTest { + @Test(groups = { "unit" }) + public void stringContent() { + String content = "I am body"; + HashMap headerMap = new HashMap<>(); + headerMap.put("key1", "value1"); + headerMap.put("key2", "value2"); + + StoreResponse sp = new StoreResponse(200, new ArrayList<>(headerMap.entrySet()), content); + + assertThat(sp.getStatus()).isEqualTo(200); + assertThat(sp.getResponseStream()).isNull(); + assertThat(sp.getResponseBody()).isEqualTo(content); + assertThat(sp.getHeaderValue("key1")).isEqualTo("value1"); + } + + @Test(groups = { "unit" }) + public void streamContent() throws Exception { + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + baos.write(new byte[] { 3, 0, 1, 9, -1, 125 }); + HashMap headerMap = new HashMap<>(); + headerMap.put("key1", "value1"); + headerMap.put("key2", "value2"); + + StoreResponse sp = new StoreResponse(200, new ArrayList<>(headerMap.entrySet()), new ByteArrayInputStream(baos.toByteArray())); + + assertThat(sp.getStatus()).isEqualTo(200); + assertThat(sp.getResponseBody()).isNull(); + assertThat(sp.getResponseStream()).isNotNull(); + assertThat(IOUtils.contentEquals(new ByteArrayInputStream(baos.toByteArray()), sp.getResponseStream())); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/StoreResponseValidator.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/StoreResponseValidator.java new file mode 100644 index 0000000000000..d30e68de2fa64 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/StoreResponseValidator.java @@ -0,0 +1,210 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.internal.HttpConstants; +import org.assertj.core.api.Condition; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +public interface StoreResponseValidator { + + void validate(StoreResponse storeResponse); + + public static Builder create() { + return new Builder(); + } + + public class Builder { + private List validators = new ArrayList<>(); + + public StoreResponseValidator build() { + return new StoreResponseValidator() { + + @SuppressWarnings({"rawtypes", "unchecked"}) + @Override + public void validate(StoreResponse resp) { + for (StoreResponseValidator validator : validators) { + validator.validate(resp); + } + } + }; + } + public Builder hasHeader(String headerKey) { + + validators.add(new StoreResponseValidator() { + @Override + public void validate(StoreResponse resp) { + assertThat(Arrays.asList(resp.getResponseHeaderNames())).asList().contains(headerKey); + } + }); + return this; + } + public Builder withHeader(String headerKey, String headerValue) { + + validators.add(new StoreResponseValidator() { + @Override + public void validate(StoreResponse resp) { + assertThat(Arrays.asList(resp.getResponseHeaderNames())).asList().contains(headerKey); + int index = Arrays.asList(resp.getResponseHeaderNames()).indexOf(headerKey); + assertThat(resp.getResponseHeaderValues()[index]).isEqualTo(headerValue); + } + }); + return this; + } + + public Builder withHeaderValueCondition(String headerKey, Condition condition) { + + validators.add(new StoreResponseValidator() { + @Override + public void validate(StoreResponse resp) { + assertThat(Arrays.asList(resp.getResponseHeaderNames())).asList().contains(headerKey); + int index = Arrays.asList(resp.getResponseHeaderNames()).indexOf(headerKey); + String value = resp.getResponseHeaderValues()[index]; + condition.matches(value); + } + }); + return this; + } + + public Builder isSameAs(StoreResponse storeResponse) { + + validators.add(new StoreResponseValidator() { + @Override + public void validate(StoreResponse resp) { + assertThat(resp).isSameAs(storeResponse); + } + }); + return this; + } + + public Builder withContent(String content) { + + validators.add(new StoreResponseValidator() { + @Override + public void validate(StoreResponse resp) { + assertThat(content).isEqualTo(resp.getResponseBody()); + } + }); + return this; + } + + public Builder withStatus(int status) { + + validators.add(new StoreResponseValidator() { + @Override + public void validate(StoreResponse resp) { + assertThat(status == resp.getStatus()).isTrue(); + } + }); + return this; + } + + public Builder in(StoreResponse... storeResponse) { + + validators.add(new StoreResponseValidator() { + @Override + public void validate(StoreResponse resp) { + assertThat(resp).isIn((Object[]) storeResponse); + } + }); + return this; + } + + public Builder withBEActivityId(String activityId) { + withHeader(WFConstants.BackendHeaders.ACTIVITY_ID, activityId); + return this; + } + + public Builder withRequestCharge(double value) { + withHeader(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double.toString(value)); + return this; + } + + public Builder withRequestChargeGreaterThanOrEqualTo(double value) { + withHeaderValueCondition(HttpConstants.HttpHeaders.REQUEST_CHARGE, new Condition<>(s -> { + try { + double parsed = Double.parseDouble(s); + return parsed >= value; + } catch (Exception e) { + return false; + } + }, "request charge should be greater than or equal to " + value)); + return this; + } + + public Builder withRequestChargeLessThanOrEqualTo(double value) { + withHeaderValueCondition(HttpConstants.HttpHeaders.REQUEST_CHARGE, new Condition<>(s -> { + try { + double parsed = Double.parseDouble(s); + return parsed <= value; + } catch (Exception e) { + return false; + } + }, "request charge should be greater than or equal to " + value)); + return this; + } + + + public Builder withBELSN(long lsn) { + withHeader(WFConstants.BackendHeaders.LSN, Long.toString(lsn)); + return this; + } + + public Builder withBELocalLSN(long lsn) { + withHeader(WFConstants.BackendHeaders.LOCAL_LSN, Long.toString(lsn)); + return this; + } + + public Builder withBELSNGreaterThanOrEqualTo(long minLSN) { + Condition condition = new Condition<>(value -> { + try { + Long valueAsLong = Long.parseLong(value); + return valueAsLong > minLSN; + } catch (Exception e) { + return false; + } + }, "min lsn"); + withHeaderValueCondition(WFConstants.BackendHeaders.LSN, condition); + return this; + } + + public Builder withBEGlobalLSNGreaterThanOrEqualTo(long minLSN) { + Condition condition = new Condition<>(value -> { + try { + Long valueAsLong = Long.parseLong(value); + return valueAsLong > minLSN; + } catch (Exception e) { + return false; + } + }, "min global lsn"); + withHeaderValueCondition(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN, condition); + return this; + } + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/StoreResultValidator.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/StoreResultValidator.java new file mode 100644 index 0000000000000..1a8afa6d324a7 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/StoreResultValidator.java @@ -0,0 +1,180 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.internal.FailureValidator; + +import java.net.URI; +import java.util.ArrayList; +import java.util.List; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; +import static org.assertj.core.api.AssertionsForClassTypes.fail; + + +public interface StoreResultValidator { + + static Builder create() { + return new Builder(); + } + + void validate(StoreResult storeResult); + + class Builder { + private List validators = new ArrayList<>(); + + public StoreResultValidator build() { + return new StoreResultValidator() { + + @SuppressWarnings({"rawtypes", "unchecked"}) + @Override + public void validate(StoreResult storeResult) { + for (StoreResultValidator validator : validators) { + validator.validate(storeResult); + } + } + }; + } + + public Builder withStoreResponse(StoreResponseValidator storeResponseValidator) { + validators.add(new StoreResultValidator() { + + @Override + public void validate(StoreResult storeResult) { + try { + storeResponseValidator.validate(storeResult.toResponse()); + }catch (CosmosClientException e) { + fail(e.getMessage()); + } + } + }); + return this; + } + + public Builder withException(FailureValidator failureValidator) { + validators.add(new StoreResultValidator() { + + @Override + public void validate(StoreResult storeResult) { + try { + failureValidator.validate(storeResult.getException()); + }catch (CosmosClientException e) { + fail(e.getMessage()); + } + } + }); + return this; + } + + public Builder withLSN(long lsn) { + validators.add(new StoreResultValidator() { + + @Override + public void validate(StoreResult storeResult) { + assertThat(storeResult.lsn).isEqualTo(lsn); + } + }); + return this; + } + + public Builder withMinLSN(long minLSN) { + validators.add(new StoreResultValidator() { + + @Override + public void validate(StoreResult storeResult) { + assertThat(storeResult.lsn).isGreaterThanOrEqualTo(minLSN); + } + }); + return this; + } + + public Builder withGlobalCommitedLSN(long globalLsn) { + validators.add(new StoreResultValidator() { + + @Override + public void validate(StoreResult storeResult) { + assertThat(storeResult.globalCommittedLSN).isEqualTo(globalLsn); + } + }); + return this; + } + + public Builder withQuorumAckedLsn(long quorumAckedLsn) { + validators.add(new StoreResultValidator() { + + @Override + public void validate(StoreResult storeResult) { + assertThat(storeResult.quorumAckedLSN).isEqualTo(quorumAckedLsn); + } + }); + return this; + } + + public Builder noException() { + validators.add(new StoreResultValidator() { + + @Override + public void validate(StoreResult storeResult) { + assertThat(storeResult).hasFieldOrPropertyWithValue("exception", null); + assertThat(storeResult.isGoneException).isFalse(); + } + }); + return this; + } + + public Builder isValid() { + validators.add(new StoreResultValidator() { + + @Override + public void validate(StoreResult storeResult) { + assertThat(storeResult.isValid).isTrue(); + } + }); + return this; + } + + public Builder withReplicaSize(int count) { + validators.add(new StoreResultValidator() { + + @Override + public void validate(StoreResult storeResult) { + assertThat(storeResult.currentReplicaSetSize).isEqualTo(count); + } + }); + return this; + } + + public Builder withStorePhysicalURI(URI expectedURi) { + validators.add(new StoreResultValidator() { + + @Override + public void validate(StoreResult storeResult) { + assertThat(storeResult.storePhysicalAddress).isEqualTo(expectedURi); + } + }); + return this; + } + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/TimeoutHelperTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/TimeoutHelperTest.java new file mode 100644 index 0000000000000..ed66930532334 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/TimeoutHelperTest.java @@ -0,0 +1,58 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.directconnectivity.TimeoutHelper; +import org.testng.annotations.Test; + +import java.time.Duration; + +import static org.assertj.core.api.Assertions.assertThat; + +public class TimeoutHelperTest { + + @Test(groups = "unit") + public void isElapsed() throws InterruptedException { + Duration duration1 = Duration.ofMillis(100); + TimeoutHelper timeoutHelper1 = new TimeoutHelper(duration1); + assertThat(timeoutHelper1.isElapsed()).isFalse(); + + Duration duration2 = Duration.ofMillis(100); + TimeoutHelper timeoutHelper2 = new TimeoutHelper(duration2); + Thread.sleep(100); + assertThat(timeoutHelper2.isElapsed()).isTrue(); + } + + @Test(groups = "unit") + public void getRemainingTime() throws InterruptedException { + for (int i = 1; i <= 5; i++) { + Duration duration = Duration.ofMillis(100); + TimeoutHelper timeoutHelper = new TimeoutHelper(duration); + Thread.sleep((10*i)); + Duration remainingTime1 = timeoutHelper.getRemainingTime(); + //Giving 5 ms extra buffer in case thread sleep complete early + assertThat(remainingTime1.toMillis()).isLessThanOrEqualTo(100-10*i+5); + } + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/TransportClientWrapper.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/TransportClientWrapper.java new file mode 100644 index 0000000000000..2319209fbc23f --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/TransportClientWrapper.java @@ -0,0 +1,327 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.*; +import org.apache.commons.lang3.tuple.Pair; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.net.URI; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; + +import static org.assertj.core.api.Assertions.assertThat; + +public class TransportClientWrapper { + private static Logger logger = LoggerFactory.getLogger(TransportClientWrapper.class); + public final TransportClient transportClient; + private final AtomicBoolean valid; + private final AtomicInteger cnt; + private final List> requests; + + TransportClientWrapper(TransportClient transportClient, AtomicInteger cnt, AtomicBoolean valid, List> requests) { + this.transportClient = transportClient; + this.valid = valid; + this.cnt = cnt; + this.requests = requests; + } + + public static class TransportClientWrapperVerificationBuilder { + private List> actions = new ArrayList<>(); + + public static TransportClientWrapperVerificationBuilder create() { + return new TransportClientWrapperVerificationBuilder(); + } + + public TransportClientWrapperVerificationBuilder verifyNumberOfInvocations(int count) { + actions.add(transportClientWrapper -> { + assertThat(transportClientWrapper.getNumberOfInvocations()).isEqualTo(count); + return null; + }); + return this; + } + + public void execute(TransportClientWrapper transportClientWrapper) { + for(Function action: actions) { + action.apply(transportClientWrapper); + } + } + } + + public TransportClientWrapper verifyNumberOfInvocations(int count) { + assertThat(cnt.get()).isEqualTo(count); + return this; + } + + public List> getCapturedArgs() { + return requests; + } + + public int getNumberOfInvocations() { + return cnt.get(); + } + + public TransportClientWrapper validate() { + assertThat(valid).isTrue(); + return this; + } + + public interface Builder { + + static void capture(List> capturedRequests, InvocationOnMock invocation) { + URI physicalUri = invocation.getArgumentAt(0, URI.class); + RxDocumentServiceRequest request = invocation.getArgumentAt(1, RxDocumentServiceRequest.class); + logger.debug("URI: {}, request {}", physicalUri, request); + capturedRequests.add(Pair.of(physicalUri, request)); + } + + TransportClientWrapper build(); + + public static ReplicaResponseBuilder replicaResponseBuilder() { + return new ReplicaResponseBuilder(); + } + + class ReplicaResponseBuilder implements Builder { + Map responseFunctionDictionary = new HashMap<>(); + + public ReplicaResponseBuilder addReplica(URI replicaURI, + Function2WithCheckedException invocationNumberToStoreResponse) { + + responseFunctionDictionary.put(replicaURI, invocationNumberToStoreResponse); + return this; + } + + public TransportClientWrapper build() { + + Map replicaResponseCounterDict = new HashMap<>(); + + AtomicInteger i = new AtomicInteger(0); + AtomicBoolean valid = new AtomicBoolean(true); + List> capturedArgs = Collections.synchronizedList(new ArrayList<>()); + + TransportClient transportClient = Mockito.mock(TransportClient.class); + Mockito.doAnswer(invocation -> { + i.incrementAndGet(); + URI physicalUri = invocation.getArgumentAt(0, URI.class); + RxDocumentServiceRequest request = invocation.getArgumentAt(1, RxDocumentServiceRequest.class); + Function2WithCheckedException function = responseFunctionDictionary.get(physicalUri); + if (function == null) { + valid.set(false); + return Mono.error(new IllegalStateException("no registered function for replica " + physicalUri)); + } + int current; + synchronized (transportClient) { + capture(capturedArgs, invocation); + + AtomicInteger cnt = replicaResponseCounterDict.get(physicalUri); + if (cnt == null) { + cnt = new AtomicInteger(0); + replicaResponseCounterDict.put(physicalUri, cnt); + } + + current = cnt.getAndIncrement(); + } + + try { + return Mono.just(function.apply(current, request)); + } catch (Exception e) { + return Mono.error(e); + } + + }).when(transportClient).invokeResourceOperationAsync(Mockito.any(URI.class), Mockito.any(RxDocumentServiceRequest.class)); + + return new TransportClientWrapper(transportClient, i, valid, capturedArgs); + } + } + + + static SequentialBuilder sequentialBuilder() { + return new SequentialBuilder(); + } + + class SequentialBuilder implements Builder { + private List list = new ArrayList<>(); + + public SequentialBuilder then(StoreResponse response) { + list.add(response); + return this; + } + + public SequentialBuilder then(Exception exception) { + list.add(exception); + return this; + } + + public TransportClientWrapper build() { + AtomicInteger i = new AtomicInteger(0); + AtomicBoolean valid = new AtomicBoolean(true); + List> capturedArgs = Collections.synchronizedList(new ArrayList<>()); + + TransportClient transportClient = Mockito.mock(TransportClient.class); + Mockito.doAnswer(invocation -> { + capture(capturedArgs, invocation); + + int current = i.getAndIncrement(); + if (current >= list.size()) { + valid.set(false); + return Mono.error(new IllegalStateException()); + } + Object obj = list.get(current); + StoreResponse response = Utils.as(obj, StoreResponse.class); + if (response != null) { + return Mono.just(response); + } else { + return Mono.error((Exception) obj); + } + + }).when(transportClient).invokeResourceOperationAsync(Mockito.any(URI.class), Mockito.any(RxDocumentServiceRequest.class)); + + return new TransportClientWrapper(transportClient, i, valid, capturedArgs); + } + } + + static UriToResultBuilder uriToResultBuilder() { + return new UriToResultBuilder(); + } + + class UriToResultBuilder implements Builder { + private static class Result { + StoreResponse storeResponse; + Exception exception; + boolean stickyResult; + + public Result(StoreResponse storeResponse, Exception exception, boolean stickyResult) { + this.storeResponse = storeResponse; + this.exception = exception; + this.stickyResult = stickyResult; + } + } + + private static class Tuple { + URI replicaURI; + OperationType operationType; + ResourceType resourceType; + + public Tuple(URI replicaURI, OperationType operationType, ResourceType resourceType) { + this.replicaURI = replicaURI; + this.operationType = operationType; + this.resourceType = resourceType; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Tuple tuple = (Tuple) o; + return Objects.equals(replicaURI, tuple.replicaURI) && + operationType == tuple.operationType && + resourceType == tuple.resourceType; + } + + @Override + public int hashCode() { + return Objects.hash(replicaURI, operationType, resourceType); + } + + @Override + public String toString() { + return "Tuple{" + + "replicaURI=" + replicaURI + + ", operationType=" + operationType + + ", resourceType=" + resourceType + + '}'; + } + } + private Map> uriToResult = new HashMap<>(); + + + private UriToResultBuilder resultOn(URI replicaURI, OperationType operationType, ResourceType resourceType, StoreResponse rsp, Exception ex, boolean stickyResult) { + Tuple key = new Tuple(replicaURI, operationType, resourceType); + List list = uriToResult.get(key); + if (list == null) { + list = new ArrayList<>(); + uriToResult.put(key, list); + } + list.add(new Result(rsp, ex, stickyResult)); + return this; + } + + public UriToResultBuilder storeResponseOn(URI replicaURI, OperationType operationType, ResourceType resourceType, StoreResponse response, boolean stickyResult) { + resultOn(replicaURI, operationType, resourceType, response, null, stickyResult); + return this; + } + + public UriToResultBuilder exceptionOn(URI replicaURI, OperationType operationType, ResourceType resourceType, Exception exception, boolean stickyResult) { + resultOn(replicaURI, operationType, resourceType, null, exception, stickyResult); + return this; + } + + public TransportClientWrapper build() { + AtomicBoolean valid = new AtomicBoolean(true); + AtomicInteger cnt = new AtomicInteger(0); + List> capturedArgs = Collections.synchronizedList(new ArrayList<>()); + TransportClient transportClient = Mockito.mock(TransportClient.class); + Mockito.doAnswer(invocation -> { + cnt.getAndIncrement(); + URI physicalUri = invocation.getArgumentAt(0, URI.class); + RxDocumentServiceRequest request = invocation.getArgumentAt(1, RxDocumentServiceRequest.class); + capture(capturedArgs, invocation); + + Tuple tuple = new Tuple(physicalUri, request.getOperationType(), request.getResourceType()); + List list = uriToResult.get(tuple); + if (list == null || list.isEmpty()) { + // unknown + valid.set(false); + return Mono.error(new IllegalStateException(tuple.toString())); + } + + Result result = list.get(0); + + if (!result.stickyResult) { + list.remove(0); + } + if (result.storeResponse != null) { + return Mono.just(result.storeResponse); + } else { + return Mono.error(result.exception); + } + + }).when(transportClient).invokeResourceOperationAsync(Mockito.any(URI.class), Mockito.any(RxDocumentServiceRequest.class)); + + return new TransportClientWrapper(transportClient, cnt, valid, capturedArgs); + } + } + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/WebExceptionUtilityTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/WebExceptionUtilityTest.java new file mode 100644 index 0000000000000..11fffa33b1b5f --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/directconnectivity/WebExceptionUtilityTest.java @@ -0,0 +1,137 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.directconnectivity; + +import com.azure.data.cosmos.internal.directconnectivity.WebExceptionUtility; +import io.netty.channel.ChannelException; +import io.netty.channel.ConnectTimeoutException; +import io.netty.handler.timeout.ReadTimeoutException; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import javax.net.ssl.SSLHandshakeException; +import javax.net.ssl.SSLPeerUnverifiedException; +import java.net.ConnectException; +import java.net.NoRouteToHostException; +import java.net.SocketTimeoutException; +import java.net.UnknownHostException; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; + +/** + * validation tests for {@link WebExceptionUtility} + */ +public class WebExceptionUtilityTest { + + @DataProvider(name = "exceptionToIsRetriable") + public Object[][] exceptionToIsRetriable() { + return new Object[][]{ + // exception, is retriable + { + new RuntimeException(), false + }, + { + new ConnectException(), true + }, + { + new ConnectTimeoutException(), true + }, + { + new UnknownHostException(), true + }, + { + ReadTimeoutException.INSTANCE, false + }, + { + new SSLHandshakeException("dummy"), true + }, + { + new NoRouteToHostException(), true, + }, + { + new SSLPeerUnverifiedException("dummy"), true + }, + { + new SocketTimeoutException(), false + } + }; + } + + @Test(groups = "unit", dataProvider = "exceptionToIsRetriable") + public void isWebExceptionRetriable(Exception e, boolean isRetriable) { + boolean actualRes = WebExceptionUtility.isWebExceptionRetriable(e); + if (isRetriable) { + assertThat(actualRes).describedAs(e.toString()).isTrue(); + } else { + assertThat(actualRes).describedAs(e.toString()).isFalse(); + } + } + + @DataProvider(name = "networkFailure") + public Object[][] networkFailure() { + return new Object[][]{ + // exception, is retriable + { + new RuntimeException(), false + }, + { + new ConnectException(), true + }, + { + new ConnectTimeoutException(), true + }, + { + new UnknownHostException(), true + }, + { + ReadTimeoutException.INSTANCE, true + }, + { + new SSLHandshakeException("dummy"), true + }, + { + new NoRouteToHostException(), true, + }, + { + new SSLPeerUnverifiedException("dummy"), true + }, + { + new SocketTimeoutException(), true + }, + { + new ChannelException(), true + } + }; + } + + @Test(groups = "unit", dataProvider = "networkFailure") + public void isNetworkFailure(Exception e, boolean isNetworkFailure) { + boolean actualRes = WebExceptionUtility.isNetworkFailure(e); + if (isNetworkFailure) { + assertThat(actualRes).describedAs(e.toString()).isTrue(); + } else { + assertThat(actualRes).describedAs(e.toString()).isFalse(); + } + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/query/DocumentProducerTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/query/DocumentProducerTest.java new file mode 100644 index 0000000000000..ae5f57aa55a93 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/query/DocumentProducerTest.java @@ -0,0 +1,942 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.CosmosError; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.azure.data.cosmos.internal.GlobalEndpointManager; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.IRetryPolicyFactory; +import com.azure.data.cosmos.internal.RetryPolicy; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import com.azure.data.cosmos.internal.caches.RxPartitionKeyRangeCache; +import com.azure.data.cosmos.internal.query.orderbyquery.OrderByRowResult; +import com.azure.data.cosmos.internal.query.orderbyquery.OrderbyRowComparer; +import com.azure.data.cosmos.internal.routing.PartitionKeyRangeIdentity; +import com.azure.data.cosmos.internal.routing.Range; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterables; +import com.google.common.collect.LinkedListMultimap; +import io.reactivex.subscribers.TestSubscriber; +import org.apache.commons.lang3.RandomUtils; +import org.mockito.Matchers; +import org.mockito.Mockito; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.net.URL; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; + +public class DocumentProducerTest { + private final static Logger logger = LoggerFactory.getLogger(DocumentProducerTest.class); + private static final long TIMEOUT = 10000; + private final static String OrderByPayloadFieldName = "payload"; + private final static String OrderByItemsFieldName = "orderByItems"; + + private final static String OrderByIntFieldName = "propInt"; + private final static String DocumentPartitionKeyRangeIdFieldName = "_pkrId"; + private final static String DocumentPartitionKeyRangeMinInclusiveFieldName = "_pkrMinInclusive"; + private final static String DocumentPartitionKeyRangeMaxExclusiveFieldName = "_pkrMaxExclusive"; + + private final String collectionRid = "myrid"; + private final String collectionLink = "/dbs/mydb/colls/mycol"; + + @DataProvider(name = "splitParamProvider") + public Object[][] splitParamProvider() { + return new Object[][]{ + // initial continuation token, + // # pages from parent before split, + // # pages from left child after split, + // # pages from right child after split + {"init-cp", 10, 5, 6}, {null, 10, 5, 6}, {null, 1000, 500, 600}, {"init-cp", 1000, 500, 600}, {"init" + + "-cp", 0, 10, 12}, {null, 0, 10, 12}, {null, 0, 1, 1}, {null, 10, 1, 1},}; + } + + private IRetryPolicyFactory mockDocumentClientIRetryPolicyFactory() { + URL url; + try { + url = new URL("http://localhost"); + } catch (Exception e) { + throw new IllegalStateException(e); + } + + GlobalEndpointManager globalEndpointManager = Mockito.mock(GlobalEndpointManager.class); + Mockito.doReturn(url).when(globalEndpointManager).resolveServiceEndpoint(Mockito.any(RxDocumentServiceRequest.class)); + doReturn(false).when(globalEndpointManager).isClosed(); + return new RetryPolicy(globalEndpointManager, ConnectionPolicy.defaultPolicy()); + } + + @Test(groups = {"unit"}, dataProvider = "splitParamProvider", timeOut = TIMEOUT) + public void partitionSplit(String initialContinuationToken, int numberOfResultPagesFromParentBeforeSplit, + int numberOfResultPagesFromLeftChildAfterSplit, int numberOfResultPagesFromRightChildAfterSplit) { + int initialPageSize = 7; + int top = -1; + + String parentPartitionId = "1"; + String leftChildPartitionId = "2"; + String rightChildPartitionId = "3"; + + List> resultFromParentPartition = mockFeedResponses(parentPartitionId, + numberOfResultPagesFromParentBeforeSplit, 3, false); + List> resultFromLeftChildPartition = mockFeedResponses(leftChildPartitionId, + numberOfResultPagesFromLeftChildAfterSplit, 3, true); + List> resultFromRightChildPartition = mockFeedResponses(rightChildPartitionId, + numberOfResultPagesFromRightChildAfterSplit, 3, true); + + // sanity check + sanityCheckSplitValidation(parentPartitionId, leftChildPartitionId, rightChildPartitionId, + numberOfResultPagesFromParentBeforeSplit, + numberOfResultPagesFromLeftChildAfterSplit, + numberOfResultPagesFromRightChildAfterSplit, resultFromParentPartition, + resultFromLeftChildPartition, resultFromRightChildPartition); + + // setting up behaviour + RequestExecutor.PartitionAnswer answerFromParentPartition = + RequestExecutor.PartitionAnswer.just(parentPartitionId, resultFromParentPartition); + RequestExecutor.PartitionAnswer splitAnswerFromParentPartition = + RequestExecutor.PartitionAnswer.alwaysPartitionSplit(parentPartitionId); + + RequestExecutor.PartitionAnswer answerFromLeftChildPartition = + RequestExecutor.PartitionAnswer.just(leftChildPartitionId, resultFromLeftChildPartition); + RequestExecutor.PartitionAnswer answerFromRightChildPartition = + RequestExecutor.PartitionAnswer.just(rightChildPartitionId, resultFromRightChildPartition); + + RequestCreator requestCreator = RequestCreator.simpleMock(); + RequestExecutor requestExecutor = RequestExecutor. + fromPartitionAnswer(ImmutableList.of(answerFromParentPartition, splitAnswerFromParentPartition, + answerFromLeftChildPartition, answerFromRightChildPartition)); + + PartitionKeyRange parentPartitionKeyRange = mockPartitionKeyRange(parentPartitionId); + PartitionKeyRange leftChildPartitionKeyRange = mockPartitionKeyRange(leftChildPartitionId); + PartitionKeyRange rightChildPartitionKeyRange = mockPartitionKeyRange(rightChildPartitionId); + + // this returns replacement ranges upon split detection + IDocumentQueryClient queryClient = mockQueryClient(ImmutableList.of(leftChildPartitionKeyRange, + rightChildPartitionKeyRange)); + + DocumentProducer documentProducer = new DocumentProducer<>( + queryClient, + collectionRid, + null, + requestCreator, + requestExecutor, + parentPartitionKeyRange, + collectionLink, + () -> mockDocumentClientIRetryPolicyFactory().getRequestPolicy(), + Document.class, + null, + initialPageSize, + initialContinuationToken, + top); + + TestSubscriber.DocumentProducerFeedResponse> subscriber = new TestSubscriber<>(); + + documentProducer.produceAsync().subscribe(subscriber); + subscriber.awaitTerminalEvent(); + + subscriber.assertNoErrors(); + subscriber.assertComplete(); + + validateSplitCaptureRequests( + requestCreator.invocations, + initialContinuationToken, + parentPartitionId, + leftChildPartitionId, + rightChildPartitionId, + resultFromParentPartition, + resultFromLeftChildPartition, + resultFromRightChildPartition); + + // page size match + assertThat(requestCreator.invocations.stream().map(i -> i.maxItemCount) + .distinct().collect(Collectors.toList())).containsExactlyElementsOf(Collections.singleton(initialPageSize)); + + // expected results + validateSplitResults(subscriber.values(), + parentPartitionId, leftChildPartitionId, + rightChildPartitionId, resultFromParentPartition, resultFromLeftChildPartition, + resultFromRightChildPartition, false); + + Mockito.verify(queryClient, times(1)).getPartitionKeyRangeCache(); + } + + @Test(groups = {"unit"}, dataProvider = "splitParamProvider", timeOut = TIMEOUT) + public void orderByPartitionSplit(String initialContinuationToken, int numberOfResultPagesFromParentBeforeSplit, + int numberOfResultPagesFromLeftChildAfterSplit, int numberOfResultPagesFromRightChildAfterSplit) { + int initialPageSize = 7; + int top = -1; + + String parentPartitionId = "1"; + String leftChildPartitionId = "2"; + String rightChildPartitionId = "3"; + + Integer initialPropVal = 1; + List> resultFromParentPartition = mockFeedResponses(parentPartitionId, + numberOfResultPagesFromParentBeforeSplit, 3, initialPropVal, false); + Integer highestValInParentPage = getLastValueInAsc(initialPropVal, resultFromParentPartition); + + List> resultFromLeftChildPartition = mockFeedResponses(leftChildPartitionId, + numberOfResultPagesFromLeftChildAfterSplit, 3, highestValInParentPage, true); + + List> resultFromRightChildPartition = mockFeedResponses(rightChildPartitionId, + numberOfResultPagesFromRightChildAfterSplit, 3, highestValInParentPage, true); + + // sanity check + sanityCheckSplitValidation(parentPartitionId, leftChildPartitionId, rightChildPartitionId, + numberOfResultPagesFromParentBeforeSplit, + numberOfResultPagesFromLeftChildAfterSplit, + numberOfResultPagesFromRightChildAfterSplit, resultFromParentPartition, + resultFromLeftChildPartition, resultFromRightChildPartition); + + // setting up behaviour + RequestExecutor.PartitionAnswer answerFromParentPartition = + RequestExecutor.PartitionAnswer.just(parentPartitionId, resultFromParentPartition); + RequestExecutor.PartitionAnswer splitAnswerFromParentPartition = + RequestExecutor.PartitionAnswer.alwaysPartitionSplit(parentPartitionId); + + RequestExecutor.PartitionAnswer answerFromLeftChildPartition = + RequestExecutor.PartitionAnswer.just(leftChildPartitionId, resultFromLeftChildPartition); + RequestExecutor.PartitionAnswer answerFromRightChildPartition = + RequestExecutor.PartitionAnswer.just(rightChildPartitionId, resultFromRightChildPartition); + + RequestCreator requestCreator = RequestCreator.simpleMock(); + RequestExecutor requestExecutor = RequestExecutor. + fromPartitionAnswer(ImmutableList.of(answerFromParentPartition, splitAnswerFromParentPartition, + answerFromLeftChildPartition, answerFromRightChildPartition)); + + PartitionKeyRange parentPartitionKeyRange = mockPartitionKeyRange(parentPartitionId); + PartitionKeyRange leftChildPartitionKeyRange = mockPartitionKeyRange(leftChildPartitionId); + PartitionKeyRange rightChildPartitionKeyRange = mockPartitionKeyRange(rightChildPartitionId); + + // this returns replacement ranges upon split detection + IDocumentQueryClient queryCl = mockQueryClient(ImmutableList.of(leftChildPartitionKeyRange, + rightChildPartitionKeyRange)); + + OrderByDocumentProducer documentProducer = + new OrderByDocumentProducer<>(new OrderbyRowComparer<>(ImmutableList.of(SortOrder.Ascending)), + queryCl, collectionRid, null, requestCreator, requestExecutor, + parentPartitionKeyRange, collectionLink, null, Document.class, null, + initialPageSize, initialContinuationToken, top, + /*targetRangeToOrderByContinuationTokenMap*/new HashMap<>()); + + TestSubscriber.DocumentProducerFeedResponse> subscriber = new TestSubscriber<>(); + + documentProducer.produceAsync().subscribe(subscriber); + subscriber.awaitTerminalEvent(); + + subscriber.assertNoErrors(); + subscriber.assertComplete(); + + validateSplitCaptureRequests(requestCreator.invocations, initialContinuationToken, parentPartitionId, + leftChildPartitionId, rightChildPartitionId, resultFromParentPartition, + resultFromLeftChildPartition, resultFromRightChildPartition); + + // page size match + assertThat(requestCreator.invocations.stream().map(i -> i.maxItemCount).distinct().collect(Collectors.toList())).containsExactlyElementsOf(Collections.singleton(initialPageSize)); + + // expected results + validateSplitResults(subscriber.values(), + parentPartitionId, leftChildPartitionId, + rightChildPartitionId, resultFromParentPartition, resultFromLeftChildPartition, + resultFromRightChildPartition, true); + + Mockito.verify(queryCl, times(1)).getPartitionKeyRangeCache(); + } + + @Test(groups = {"unit"}, timeOut = TIMEOUT) + public void simple() { + int initialPageSize = 7; + int top = -1; + + String partitionId = "1"; + + List requests = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + requests.add(mockRequest(partitionId)); + } + + List> responses = mockFeedResponses(partitionId, 10, 3, true); + + RequestCreator requestCreator = RequestCreator.give(requests); + RequestExecutor requestExecutor = RequestExecutor.fromPartitionAnswer(RequestExecutor.PartitionAnswer.just("1" + , responses)); + + PartitionKeyRange targetRange = mockPartitionKeyRange(partitionId); + + IDocumentQueryClient queryClient = Mockito.mock(IDocumentQueryClient.class); + String initialContinuationToken = "initial-cp"; + DocumentProducer documentProducer = new DocumentProducer<>(queryClient, collectionRid, null, + requestCreator, requestExecutor, + targetRange, collectionLink, + () -> mockDocumentClientIRetryPolicyFactory().getRequestPolicy(), Document.class, null, initialPageSize, initialContinuationToken, top); + + TestSubscriber subscriber = new TestSubscriber<>(); + + documentProducer.produceAsync().subscribe(subscriber); + subscriber.awaitTerminalEvent(); + + subscriber.assertNoErrors(); + subscriber.assertComplete(); + + subscriber.assertValueCount(responses.size()); + + // requests match + assertThat(requestCreator.invocations.stream().map(i -> i.invocationResult).collect(Collectors.toList())).containsExactlyElementsOf(requests); + + // requested max page size match + assertThat(requestCreator.invocations.stream().map(i -> i.maxItemCount).distinct().collect(Collectors.toList())).containsExactlyElementsOf(Collections.singleton(7)); + + // continuation tokens + assertThat(requestCreator.invocations.get(0).continuationToken).isEqualTo(initialContinuationToken); + assertThat(requestCreator.invocations.stream().skip(1).map(i -> i.continuationToken).collect(Collectors.toList())).containsExactlyElementsOf(responses.stream().limit(9).map(r -> r.continuationToken()).collect(Collectors.toList())); + + // source partition + assertThat(requestCreator.invocations.stream().map(i -> i.sourcePartition).distinct().collect(Collectors.toList())).containsExactlyElementsOf(Collections.singletonList(targetRange)); + } + + @Test(groups = {"unit"}, timeOut = TIMEOUT) + public void retries() { + int initialPageSize = 7; + int top = -1; + + String partitionKeyRangeId = "1"; + + RequestCreator requestCreator = RequestCreator.simpleMock(); + + List> responsesBeforeThrottle = mockFeedResponses(partitionKeyRangeId, 2, 1, false); + Exception throttlingException = mockThrottlingException(10); + List> responsesAfterThrottle = mockFeedResponses(partitionKeyRangeId, 5, 1, true); + + RequestExecutor.PartitionAnswer behaviourBeforeException = + RequestExecutor.PartitionAnswer.just(partitionKeyRangeId, responsesBeforeThrottle); + RequestExecutor.PartitionAnswer exceptionBehaviour = + RequestExecutor.PartitionAnswer.errors(partitionKeyRangeId, + Collections.singletonList(throttlingException)); + RequestExecutor.PartitionAnswer behaviourAfterException = + RequestExecutor.PartitionAnswer.just(partitionKeyRangeId, responsesAfterThrottle); + + RequestExecutor requestExecutor = RequestExecutor.fromPartitionAnswer(behaviourBeforeException, + exceptionBehaviour, + behaviourAfterException); + + PartitionKeyRange targetRange = mockPartitionKeyRange(partitionKeyRangeId); + + IDocumentQueryClient queryClient = Mockito.mock(IDocumentQueryClient.class); + String initialContinuationToken = "initial-cp"; + DocumentProducer documentProducer = new DocumentProducer<>(queryClient, collectionRid, null, + requestCreator, requestExecutor, + targetRange, collectionLink, + () -> mockDocumentClientIRetryPolicyFactory().getRequestPolicy(), Document.class, null, initialPageSize, initialContinuationToken, top); + + TestSubscriber subscriber = new TestSubscriber<>(); + + documentProducer.produceAsync().subscribe(subscriber); + subscriber.awaitTerminalEvent(); + + subscriber.assertNoErrors(); + subscriber.assertComplete(); + + subscriber.assertValueCount(responsesBeforeThrottle.size() + responsesAfterThrottle.size()); + + // requested max page size match + assertThat(requestCreator.invocations.stream().map(i -> i.maxItemCount).distinct().collect(Collectors.toList())).containsExactlyElementsOf(Collections.singleton(7)); + + // continuation tokens + assertThat(requestCreator.invocations.get(0).continuationToken).isEqualTo(initialContinuationToken); + + // source partition + assertThat(requestCreator.invocations.stream().map(i -> i.sourcePartition).distinct().collect(Collectors.toList())).containsExactlyElementsOf(Collections.singletonList(targetRange)); + + List resultContinuationToken = + subscriber.values().stream().map(r -> r.pageResult.continuationToken()).collect(Collectors.toList()); + List beforeExceptionContinuationTokens = + responsesBeforeThrottle.stream().map(FeedResponse::continuationToken).collect(Collectors.toList()); + List afterExceptionContinuationTokens = + responsesAfterThrottle.stream().map(FeedResponse::continuationToken).collect(Collectors.toList()); + + assertThat(resultContinuationToken).containsExactlyElementsOf(Iterables.concat(beforeExceptionContinuationTokens, afterExceptionContinuationTokens)); + + String continuationTokenOnException = Iterables.getLast(beforeExceptionContinuationTokens); + + assertThat(requestCreator.invocations.stream().map(cr -> cr.continuationToken)).containsExactlyElementsOf(Iterables.concat(ImmutableList.of(initialContinuationToken), Iterables.limit(resultContinuationToken, resultContinuationToken.size() - 1))); + + assertThat(requestExecutor.partitionKeyRangeIdToCapturedInvocation.get(partitionKeyRangeId).stream().map(cr -> cr.request.getContinuation())).containsExactlyElementsOf(Iterables.concat(ImmutableList.of(initialContinuationToken), beforeExceptionContinuationTokens, Collections.singletonList(continuationTokenOnException), Iterables.limit(afterExceptionContinuationTokens, afterExceptionContinuationTokens.size() - 1))); + } + + @Test(groups = {"unit"}, timeOut = TIMEOUT) + public void retriesExhausted() { + int initialPageSize = 7; + int top = -1; + + String partitionKeyRangeId = "1"; + + RequestCreator requestCreator = RequestCreator.simpleMock(); + + List> responsesBeforeThrottle = mockFeedResponses(partitionKeyRangeId, 1, 1, false); + Exception throttlingException = mockThrottlingException(10); + + RequestExecutor.PartitionAnswer behaviourBeforeException = + RequestExecutor.PartitionAnswer.just(partitionKeyRangeId, responsesBeforeThrottle); + RequestExecutor.PartitionAnswer exceptionBehaviour = + RequestExecutor.PartitionAnswer.errors(partitionKeyRangeId, Collections.nCopies(10, + throttlingException)); + + RequestExecutor requestExecutor = RequestExecutor.fromPartitionAnswer(behaviourBeforeException, + exceptionBehaviour); + + PartitionKeyRange targetRange = mockPartitionKeyRange(partitionKeyRangeId); + + IDocumentQueryClient queryClient = Mockito.mock(IDocumentQueryClient.class); + String initialContinuationToken = "initial-cp"; + DocumentProducer documentProducer = new DocumentProducer(queryClient, collectionRid, null, + requestCreator, requestExecutor, + targetRange, collectionRid, + () -> mockDocumentClientIRetryPolicyFactory().getRequestPolicy(), Document.class, null, initialPageSize, initialContinuationToken, top); + + TestSubscriber subscriber = new TestSubscriber<>(); + + documentProducer.produceAsync().subscribe(subscriber); + subscriber.awaitTerminalEvent(); + + subscriber.assertError(throttlingException); + subscriber.assertValueCount(responsesBeforeThrottle.size()); + } + + private CosmosClientException mockThrottlingException(long retriesAfter) { + CosmosClientException throttleException = mock(CosmosClientException.class); + doReturn(429).when(throttleException).statusCode(); + doReturn(retriesAfter).when(throttleException).retryAfterInMilliseconds(); + return throttleException; + } + + private List> mockFeedResponses(String partitionKeyRangeId, int numberOfPages, + int numberOfDocsPerPage, boolean completed) { + return mockFeedResponsesPartiallySorted(partitionKeyRangeId, numberOfPages, numberOfDocsPerPage, false, -1, + completed); + } + + private List> mockFeedResponses(String partitionKeyRangeId, int numberOfPages, + int numberOfDocsPerPage, int orderByFieldInitialVal, boolean completed) { + return mockFeedResponsesPartiallySorted(partitionKeyRangeId, numberOfPages, numberOfDocsPerPage, true, + orderByFieldInitialVal, completed); + } + + private List> mockFeedResponsesPartiallySorted(String partitionKeyRangeId, + int numberOfPages, int numberOfDocsPerPage, boolean isOrderby, int orderByFieldInitialVal, + boolean completed) { + String uuid = UUID.randomUUID().toString(); + List> responses = new ArrayList<>(); + for (int i = 0; i < numberOfPages; i++) { + FeedResponseBuilder rfb = FeedResponseBuilder.queryFeedResponseBuilder(Document.class); + List res = new ArrayList<>(); + + for (int j = 0; j < numberOfDocsPerPage; j++) { + + Document d = getDocumentDefinition(); + if (isOrderby) { + BridgeInternal.setProperty(d, OrderByIntFieldName, orderByFieldInitialVal + RandomUtils.nextInt(0, 3)); + BridgeInternal.setProperty(d, DocumentPartitionKeyRangeIdFieldName, partitionKeyRangeId); + PartitionKeyRange pkr = mockPartitionKeyRange(partitionKeyRangeId); + + BridgeInternal.setProperty(d, DocumentPartitionKeyRangeMinInclusiveFieldName, pkr.getMinInclusive()); + BridgeInternal.setProperty(d, DocumentPartitionKeyRangeMaxExclusiveFieldName, pkr.getMaxExclusive()); + + QueryItem qi = new QueryItem("{ \"item\": " + Integer.toString(d.getInt(OrderByIntFieldName)) + + " }"); + String json = + "{\"" + OrderByPayloadFieldName + "\" : " + d.toJson() + ", \"" + OrderByItemsFieldName + "\" : [ " + qi.toJson() + " ] }"; + + OrderByRowResult row = new OrderByRowResult<>(Document.class, json, + mockPartitionKeyRange(partitionKeyRangeId), "backend continuation token"); + res.add(row); + } else { + res.add(d); + } + } + rfb.withResults(res); + + if (!(completed && i == numberOfPages - 1)) { + rfb.withContinuationToken("cp:" + uuid + ":" + i); + } + + FeedResponse resp = rfb.build(); + responses.add(resp); + } + return responses; + } + + private int getLastValueInAsc(int initialValue, List> responsesList) { + Integer value = null; + for (FeedResponse page : responsesList) { + for (Document d : page.results()) { + Integer tmp = d.getInt(OrderByIntFieldName); + if (tmp != null) { + value = tmp; + } + } + } + if (value != null) { + return value; + } else { + return initialValue; + } + } + + private IDocumentQueryClient mockQueryClient(List replacementRanges) { + IDocumentQueryClient client = Mockito.mock(IDocumentQueryClient.class); + RxPartitionKeyRangeCache cache = Mockito.mock(RxPartitionKeyRangeCache.class); + doReturn(cache).when(client).getPartitionKeyRangeCache(); + doReturn(Mono.just(replacementRanges)).when(cache). + tryGetOverlappingRangesAsync(anyString(), any(Range.class), anyBoolean(), Matchers.anyMap()); + return client; + } + + private PartitionKeyRange mockPartitionKeyRange(String partitionKeyRangeId) { + PartitionKeyRange pkr = Mockito.mock(PartitionKeyRange.class); + doReturn(partitionKeyRangeId).when(pkr).id(); + doReturn(partitionKeyRangeId + ":AA").when(pkr).getMinInclusive(); + doReturn(partitionKeyRangeId + ":FF").when(pkr).getMaxExclusive(); + return pkr; + } + + private RxDocumentServiceRequest mockRequest(String partitionKeyRangeId) { + RxDocumentServiceRequest req = Mockito.mock(RxDocumentServiceRequest.class); + PartitionKeyRangeIdentity pkri = new PartitionKeyRangeIdentity(partitionKeyRangeId); + doReturn(pkri).when(req).getPartitionKeyRangeIdentity(); + return req; + } + + private static void validateSplitCaptureRequests(List capturedInvocationList, + String initialContinuationToken, String parentPartitionId, String leftChildPartitionId, + String rightChildPartitionId, + List> expectedResultPagesFromParentPartitionBeforeSplit, + List> expectedResultPagesFromLeftChildPartition, + List> expectedResultPagesFromRightChildPartition) { + + int numberOfResultPagesFromParentBeforeSplit = expectedResultPagesFromParentPartitionBeforeSplit.size(); + int numberOfResultPagesFromLeftChildAfterSplit = expectedResultPagesFromLeftChildPartition.size(); + int numberOfResultPagesFromRightChildAfterSplit = expectedResultPagesFromRightChildPartition.size(); + + // numberOfResultPagesFromParentBeforeSplit + 1 requests to parent partition + assertThat(capturedInvocationList.stream().limit(numberOfResultPagesFromParentBeforeSplit + 1).filter(i -> i.sourcePartition.id().equals(parentPartitionId))).hasSize(numberOfResultPagesFromParentBeforeSplit + 1); + + assertThat(capturedInvocationList.stream().skip(numberOfResultPagesFromParentBeforeSplit + 1).filter(i -> i.sourcePartition.id().equals(leftChildPartitionId))).hasSize(numberOfResultPagesFromLeftChildAfterSplit); + + assertThat(capturedInvocationList.stream().skip(numberOfResultPagesFromParentBeforeSplit + 1).filter(i -> i.sourcePartition.id().equals(rightChildPartitionId))).hasSize(numberOfResultPagesFromRightChildAfterSplit); + + BiFunction, String, Stream> filterByPartition = (stream, partitionId) -> stream.filter(i -> i.sourcePartition.id().equals(partitionId)); + + Function>, Stream> extractContinuationToken = + (list) -> list.stream().map(p -> p.continuationToken()); + + assertThat(filterByPartition.apply(capturedInvocationList.stream(), parentPartitionId).map(r -> r.continuationToken)).containsExactlyElementsOf(toList(Stream.concat(Stream.of(initialContinuationToken), extractContinuationToken.apply(expectedResultPagesFromParentPartitionBeforeSplit)))); + + String expectedInitialChildContinuationTokenInheritedFromParent = + expectedResultPagesFromParentPartitionBeforeSplit.size() > 0 ? + expectedResultPagesFromParentPartitionBeforeSplit.get(expectedResultPagesFromParentPartitionBeforeSplit.size() - 1).continuationToken() : initialContinuationToken; + + assertThat(filterByPartition.andThen(s -> s.map(r -> r.continuationToken)).apply(capturedInvocationList.stream(), leftChildPartitionId)).containsExactlyElementsOf(toList(Stream.concat(Stream.of(expectedInitialChildContinuationTokenInheritedFromParent), extractContinuationToken.apply(expectedResultPagesFromLeftChildPartition) + //drop last page with null cp which doesn't trigger any request + .limit(expectedResultPagesFromLeftChildPartition.size() - 1)))); + + assertThat(filterByPartition.andThen(s -> s.map(r -> r.continuationToken)).apply(capturedInvocationList.stream(), rightChildPartitionId)).containsExactlyElementsOf(toList(Stream.concat(Stream.of(expectedInitialChildContinuationTokenInheritedFromParent), extractContinuationToken.apply(expectedResultPagesFromRightChildPartition) + //drop last page with null cp which doesn't trigger any request + .limit(expectedResultPagesFromRightChildPartition.size() - 1)))); + } + + private static void sanityCheckSplitValidation(String parentPartitionId, String leftChildPartitionId, + String rightChildPartitionId, int numberOfResultPagesFromParentBeforeSplit, + int numberOfResultPagesFromLeftChildAfterSplit, int numberOfResultPagesFromRightChildAfterSplit, + List> resultFromParent, List> resultFromLeftChild, + List> resultFromRightChild) { + // test sanity check + assertThat(resultFromParent).hasSize(numberOfResultPagesFromParentBeforeSplit); + assertThat(resultFromLeftChild).hasSize(numberOfResultPagesFromLeftChildAfterSplit); + assertThat(resultFromRightChild).hasSize(numberOfResultPagesFromRightChildAfterSplit); + + //validate expected result continuation token + assertThat(toList(resultFromParent.stream().map(p -> p.continuationToken()).filter(cp -> Strings.isNullOrEmpty(cp)))).isEmpty(); + + assertThat(toList(resultFromLeftChild.stream().map(p -> p.continuationToken()).limit(resultFromLeftChild.size() - 1).filter(cp -> Strings.isNullOrEmpty(cp)))).isEmpty(); + + assertThat(resultFromLeftChild.get(resultFromLeftChild.size() - 1).continuationToken()).isNullOrEmpty(); + + assertThat(toList(resultFromRightChild.stream().map(p -> p.continuationToken()).limit(resultFromRightChild.size() - 1).filter(cp -> Strings.isNullOrEmpty(cp)))).isEmpty(); + + assertThat(resultFromRightChild.get(resultFromRightChild.size() - 1).continuationToken()).isNullOrEmpty(); + } + + private void validateSplitResults(List.DocumentProducerFeedResponse> actualPages, + String parentPartitionId, String leftChildPartitionId, String rightChildPartitionId, + List> resultFromParent, List> resultFromLeftChild, + List> resultFromRightChild, boolean isOrderby) { + + if (isOrderby) { + Supplier> getStreamOfActualDocuments = + () -> actualPages.stream().flatMap(p -> p.pageResult.results().stream()); + + Comparator comparator = new Comparator() { + @Override + public int compare(Document o1, Document o2) { + ObjectNode obj1 = (ObjectNode) o1.get(OrderByPayloadFieldName); + ObjectNode obj2 = (ObjectNode) o1.get(OrderByPayloadFieldName); + + int cmp = (obj1).get(OrderByIntFieldName).asInt() - (obj2).get(OrderByIntFieldName).asInt(); + if (cmp != 0) { + return cmp; + } + + return obj1.get(DocumentPartitionKeyRangeMinInclusiveFieldName).asText().compareTo(obj2.get(DocumentPartitionKeyRangeMinInclusiveFieldName).asText()); + } + }; + + List expectedDocuments = Stream.concat(Stream.concat(resultFromParent.stream(), + resultFromLeftChild.stream()), + resultFromRightChild.stream()).flatMap(p -> p.results().stream()).sorted(comparator).collect(Collectors.toList()); + + List actualDocuments = + getStreamOfActualDocuments.get().map(d -> d.id()).collect(Collectors.toList()); + assertThat(actualDocuments).containsExactlyElementsOf(expectedDocuments.stream().map(d -> d.id()).collect(Collectors.toList())); + + } else { + assertThat(actualPages).hasSize(resultFromParent.size() + resultFromLeftChild.size() + resultFromRightChild.size()); + + BiFunction> repeater = (v, cnt) -> { + return IntStream.range(0, cnt).mapToObj(i -> v); + }; + + List expectedCapturedPartitionIds = + toList(Stream.concat(Stream.concat(repeater.apply(parentPartitionId, resultFromParent.size()), + repeater.apply(leftChildPartitionId, + resultFromLeftChild.size())), + repeater.apply(rightChildPartitionId, resultFromRightChild.size()))); + + assertThat(toList(partitionKeyRangeIds(actualPages).stream())).containsExactlyInAnyOrderElementsOf(expectedCapturedPartitionIds); + + validateResults(feedResponses(actualPages), ImmutableList.of(resultFromParent, resultFromLeftChild, + resultFromRightChild)); + } + } + + private static List repeat(T t, int cnt) { + return IntStream.range(0, cnt).mapToObj(i -> t).collect(Collectors.toList()); + } + + private static List> feedResponses(List.DocumentProducerFeedResponse> responses) { + return responses.stream().map(dpFR -> dpFR.pageResult).collect(Collectors.toList()); + } + + private static List toList(Stream stream) { + return stream.collect(Collectors.toList()); + } + + private static List partitionKeyRangeIds(List.DocumentProducerFeedResponse> responses) { + return responses.stream().map(dpFR -> dpFR.sourcePartitionKeyRange.id()).collect(Collectors.toList()); + } + + private static void validateResults(List> captured, + List>> expectedResponsesFromPartitions) { + List> expected = + expectedResponsesFromPartitions.stream().flatMap(l -> l.stream()).collect(Collectors.toList()); + assertThat(captured).hasSameSizeAs(expected); + for (int i = 0; i < expected.size(); i++) { + FeedResponse actualPage = captured.get(i); + FeedResponse expectedPage = expected.get(i); + assertEqual(actualPage, expectedPage); + } + } + + private static void assertEqual(FeedResponse actualPage, FeedResponse expectedPage) { + assertThat(actualPage.results()).hasSameSizeAs(actualPage.results()); + assertThat(actualPage.continuationToken()).isEqualTo(expectedPage.continuationToken()); + + for (int i = 0; i < actualPage.results().size(); i++) { + Document actualDoc = actualPage.results().get(i); + Document expectedDoc = expectedPage.results().get(i); + assertThat(actualDoc.id()).isEqualTo(expectedDoc.id()); + assertThat(actualDoc.getString("prop")).isEqualTo(expectedDoc.getString("prop")); + } + } + + static abstract class RequestExecutor implements Function>> { + + LinkedListMultimap partitionKeyRangeIdToCapturedInvocation = + LinkedListMultimap.create(); + + class CapturedInvocation { + long time = System.nanoTime(); + RxDocumentServiceRequest request; + FeedResponse invocationResult; + Exception failureResult; + + public CapturedInvocation(RxDocumentServiceRequest request, Exception ex) { + this.request = request; + this.failureResult = ex; + } + + public CapturedInvocation(RxDocumentServiceRequest request, PartitionAnswer.Response resp) { + this.request = request; + this.invocationResult = resp.invocationResult; + this.failureResult = resp.failureResult; + } + } + + private static CosmosClientException partitionKeyRangeGoneException() { + Map headers = new HashMap<>(); + headers.put(HttpConstants.HttpHeaders.SUB_STATUS, + Integer.toString(HttpConstants.SubStatusCodes.PARTITION_KEY_RANGE_GONE)); + return BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.GONE, new CosmosError(), headers); + } + + protected void capture(String partitionId, CapturedInvocation captureInvocation) { + partitionKeyRangeIdToCapturedInvocation.put(partitionId, captureInvocation); + } + + public static RequestExecutor fromPartitionAnswer(List answers) { + return new RequestExecutor() { + @Override + public Flux> apply(RxDocumentServiceRequest request) { + synchronized (this) { + logger.debug("executing request: " + request + " cp is: " + request.getContinuation()); + for (PartitionAnswer a : answers) { + if (a.getPartitionKeyRangeId().equals(request.getPartitionKeyRangeIdentity().getPartitionKeyRangeId())) { + try { + PartitionAnswer.Response resp = a.onRequest(request); + if (resp != null) { + CapturedInvocation ci = new CapturedInvocation(request, resp); + capture(a.getPartitionKeyRangeId(), ci); + return resp.toSingle(); + } + + } catch (Exception e) { + capture(a.getPartitionKeyRangeId(), new CapturedInvocation(request, e)); + return Flux.error(e); + } + } + } + throw new RuntimeException(); + } + } + }; + } + + public static RequestExecutor fromPartitionAnswer(PartitionAnswer... answers) { + return fromPartitionAnswer(ImmutableList.copyOf(answers)); + } + + abstract static class PartitionAnswer { + class Response { + FeedResponse invocationResult; + Exception failureResult; + + public Response(FeedResponse invocationResult) { + this.invocationResult = invocationResult; + } + + public Response(Exception ex) { + this.failureResult = ex; + } + + public Flux> toSingle() { + if (invocationResult != null) { + return Flux.just(invocationResult); + } else { + return Flux.error(failureResult); + } + } + } + + private String partitionKeyRangeId; + + private static boolean targetsPartition(RxDocumentServiceRequest req, String partitionKeyRangeId) { + return partitionKeyRangeId.equals(req.getPartitionKeyRangeIdentity().getPartitionKeyRangeId()); + } + + protected PartitionAnswer(String partitionKeyRangeId) { + this.partitionKeyRangeId = partitionKeyRangeId; + } + + public String getPartitionKeyRangeId() { + return partitionKeyRangeId; + } + + public abstract Response onRequest(final RxDocumentServiceRequest req); + + public static PartitionAnswer just(String partitionId, List> resps) { + AtomicInteger index = new AtomicInteger(); + return new PartitionAnswer(partitionId) { + @Override + public Response onRequest(RxDocumentServiceRequest request) { + if (!PartitionAnswer.targetsPartition(request, partitionId)) { + return null; + } + synchronized (this) { + if (index.get() < resps.size()) { + return new Response(resps.get(index.getAndIncrement())); + } + } + return null; + } + }; + } + + public static PartitionAnswer always(String partitionId, final Exception ex) { + return new PartitionAnswer(partitionId) { + @Override + public Response onRequest(RxDocumentServiceRequest request) { + if (!PartitionAnswer.targetsPartition(request, partitionId)) { + return null; + } + + return new Response(ex); + } + }; + } + + public static PartitionAnswer errors(String partitionId, List exs) { + AtomicInteger index = new AtomicInteger(); + return new PartitionAnswer(partitionId) { + @Override + public Response onRequest(RxDocumentServiceRequest request) { + if (!PartitionAnswer.targetsPartition(request, partitionId)) { + return null; + } + synchronized (this) { + if (index.get() < exs.size()) { + return new Response(exs.get(index.getAndIncrement())); + } + } + return null; + } + }; + } + + public static PartitionAnswer alwaysPartitionSplit(String partitionId) { + return new PartitionAnswer(partitionId) { + @Override + public Response onRequest(RxDocumentServiceRequest request) { + if (!PartitionAnswer.targetsPartition(request, partitionId)) { + return null; + } + return new Response(partitionKeyRangeGoneException()); + } + }; + } + } + } + + static abstract class RequestCreator implements TriFunction { + + public static RequestCreator give(List requests) { + AtomicInteger i = new AtomicInteger(0); + return new RequestCreator() { + + @Override + public RxDocumentServiceRequest apply(PartitionKeyRange pkr, String cp, Integer ps) { + synchronized (this) { + RxDocumentServiceRequest req = requests.get(i.getAndIncrement()); + invocations.add(new CapturedInvocation(pkr, cp, ps, req)); + return req; + } + } + }; + } + + public static RequestCreator simpleMock() { + return new RequestCreator() { + @Override + public RxDocumentServiceRequest apply(PartitionKeyRange pkr, String cp, Integer ps) { + synchronized (this) { + RxDocumentServiceRequest req = Mockito.mock(RxDocumentServiceRequest.class); + PartitionKeyRangeIdentity pkri = new PartitionKeyRangeIdentity(pkr.id()); + doReturn(pkri).when(req).getPartitionKeyRangeIdentity(); + doReturn(cp).when(req).getContinuation(); + invocations.add(new CapturedInvocation(pkr, cp, ps, req)); + logger.debug("creating request: " + req + " cp is " + cp); + return req; + } + } + }; + } + + class CapturedInvocation { + PartitionKeyRange sourcePartition; + String continuationToken; + Integer maxItemCount; + RxDocumentServiceRequest invocationResult; + + public CapturedInvocation(PartitionKeyRange sourcePartition, String continuationToken, + Integer maxItemCount, RxDocumentServiceRequest invocationResult) { + this.sourcePartition = sourcePartition; + this.continuationToken = continuationToken; + this.maxItemCount = maxItemCount; + this.invocationResult = invocationResult; + } + } + + List invocations = Collections.synchronizedList(new ArrayList<>()); + +// abstract public RxDocumentServiceRequest call(PartitionKeyRange pkr, String cp, Integer ps); + } + + private Document getDocumentDefinition() { + String uuid = UUID.randomUUID().toString(); + Document doc = new Document(String.format("{ " + "\"id\": \"%s\", " + "\"mypk\": \"%s\", " + "\"sgmts\": " + + "[[6519456, 1471916863], [2498434, 1455671440]], " + + "\"prop\": \"%s\"" + "}", uuid, uuid, uuid)); + return doc; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/query/FeedResponseBuilder.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/query/FeedResponseBuilder.java new file mode 100644 index 0000000000000..22112ff6e8206 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/query/FeedResponseBuilder.java @@ -0,0 +1,98 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.RxDocumentServiceResponse; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class FeedResponseBuilder { + private final boolean isChangeFeed; + private final Class klass; + + private Map headers = new HashMap<>(); + private boolean noMoreChangesInChangeFeed = false; + private List results; + + private FeedResponseBuilder(Class klass, boolean isChangeFeed) { + this.klass = klass; + this.isChangeFeed = isChangeFeed; + } + + public FeedResponseBuilder withContinuationToken(String continuationToken) { + + if (isChangeFeed) { + headers.put(HttpConstants.HttpHeaders.E_TAG, continuationToken); + } else { + headers.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); + } + return this; + } + + public FeedResponseBuilder withResults(List results) { + this.results = results; + return this; + } + + public FeedResponseBuilder withResults(T... results) { + this.results = Arrays.asList(results); + return this; + } + + public FeedResponseBuilder lastChangeFeedPage() { + this.noMoreChangesInChangeFeed = true; + return this; + } + + public FeedResponse build() { + RxDocumentServiceResponse rsp = mock(RxDocumentServiceResponse.class); + when(rsp.getResponseHeaders()).thenReturn(headers); + when(rsp.getQueryResponse(klass)).thenReturn(results); + if (isChangeFeed) { + when(rsp.getStatusCode()).thenReturn(noMoreChangesInChangeFeed? + HttpConstants.StatusCodes.NOT_MODIFIED : 200); + return BridgeInternal.toChaneFeedResponsePage(rsp, klass); + } else { + return BridgeInternal.toFeedResponsePage(rsp, klass); + } + } + + public static FeedResponseBuilder queryFeedResponseBuilder(Class klass) { + return new FeedResponseBuilder(klass, false); + } + + public static FeedResponseBuilder changeFeedResponseBuilder(Class klass) { + return new FeedResponseBuilder(klass, true); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/query/FetcherTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/query/FetcherTest.java new file mode 100644 index 0000000000000..3260d52d6f2e4 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/query/FetcherTest.java @@ -0,0 +1,231 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.query; + +import com.azure.data.cosmos.ChangeFeedOptions; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.RxDocumentServiceRequest; +import io.reactivex.subscribers.TestSubscriber; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiFunction; +import java.util.function.Function; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + +public class FetcherTest { + + @DataProvider(name = "queryParams") + public static Object[][] queryParamProvider() { + + FeedOptions options1 = new FeedOptions(); + options1.maxItemCount(100); + options1.requestContinuation("cp-init"); // initial continuation token + int top1 = -1; // no top + + // no continuation token + FeedOptions options2 = new FeedOptions(); + options2.maxItemCount(100); + int top2 = -1; // no top + + // top more than max item count + FeedOptions options3 = new FeedOptions(); + options3.maxItemCount(100); + int top3 = 200; + + // top less than max item count + FeedOptions options4 = new FeedOptions(); + options4.maxItemCount(100); + int top4 = 20; + + return new Object[][] { + { options1, top1 }, + { options2, top2 }, + { options3, top3 }, + { options4, top4 }}; + } + + @Test(groups = { "unit" }, dataProvider = "queryParams") + public void query(FeedOptions options, int top) { + + FeedResponse fp1 = FeedResponseBuilder.queryFeedResponseBuilder(Document.class) + .withContinuationToken("cp1") + .withResults(new Document(), new Document(), new Document()) + .build(); + + FeedResponse fp2 = FeedResponseBuilder.queryFeedResponseBuilder(Document.class) + .withContinuationToken(null) + .withResults(new Document()) + .build(); + + List> feedResponseList = Arrays.asList(fp1, fp2); + + AtomicInteger totalResultsReceived = new AtomicInteger(0); + + AtomicInteger requestIndex = new AtomicInteger(0); + + BiFunction createRequestFunc = (token, maxItemCount) -> { + assertThat(maxItemCount).describedAs("max item count").isEqualTo( + getExpectedMaxItemCountInRequest(options, top, feedResponseList, requestIndex.get())); + assertThat(token).describedAs("continuation token").isEqualTo( + getExpectedContinuationTokenInRequest(options.requestContinuation(), feedResponseList, requestIndex.get())); + requestIndex.getAndIncrement(); + + return mock(RxDocumentServiceRequest.class); + }; + + AtomicInteger executeIndex = new AtomicInteger(0); + + Function>> executeFunc = request -> { + FeedResponse rsp = feedResponseList.get(executeIndex.getAndIncrement()); + totalResultsReceived.addAndGet(rsp.results().size()); + return Flux.just(rsp); + }; + + Fetcher fetcher = + new Fetcher<>(createRequestFunc, executeFunc, options.requestContinuation(), false, top, + options.maxItemCount()); + + validateFetcher(fetcher, options, top, feedResponseList); + } + + private void validateFetcher(Fetcher fetcher, + FeedOptions options, + int top, + List> feedResponseList) { + + int totalNumberOfDocs = 0; + + int index = 0; + while(index < feedResponseList.size()) { + assertThat(fetcher.shouldFetchMore()).describedAs("should fetch more pages").isTrue(); + totalNumberOfDocs += validate(fetcher.nextPage()).results().size(); + + if ((top != -1) && (totalNumberOfDocs >= top)) { + break; + } + index++; + } + assertThat(fetcher.shouldFetchMore()).describedAs("should not fetch more pages").isFalse(); + } + + @Test(groups = { "unit" }) + public void changeFeed() { + + ChangeFeedOptions options = new ChangeFeedOptions(); + options.maxItemCount(100); + + boolean isChangeFeed = true; + int top = -1; + + FeedResponse fp1 = FeedResponseBuilder.changeFeedResponseBuilder(Document.class) + .withContinuationToken("cp1") + .withResults(new Document()) + .build(); + + FeedResponse fp2 = FeedResponseBuilder.changeFeedResponseBuilder(Document.class) + .withContinuationToken("cp2") + .lastChangeFeedPage() + .build(); + + List> feedResponseList = Arrays.asList(fp1, fp2); + + AtomicInteger requestIndex = new AtomicInteger(0); + + BiFunction createRequestFunc = (token, maxItemCount) -> { + assertThat(maxItemCount).describedAs("max item count").isEqualTo(options.maxItemCount()); + assertThat(token).describedAs("continuation token").isEqualTo( + getExpectedContinuationTokenInRequest(options.requestContinuation(), feedResponseList, requestIndex.getAndIncrement())); + + return mock(RxDocumentServiceRequest.class); + }; + + AtomicInteger executeIndex = new AtomicInteger(0); + + Function>> executeFunc = request -> { + return Flux.just(feedResponseList.get(executeIndex.getAndIncrement())); + }; + + Fetcher fetcher = + new Fetcher<>(createRequestFunc, executeFunc, options.requestContinuation(), isChangeFeed, top, + options.maxItemCount()); + + validateFetcher(fetcher, options, feedResponseList); + } + + private void validateFetcher(Fetcher fetcher, + ChangeFeedOptions options, + List> feedResponseList) { + + + for(FeedResponse change: feedResponseList) { + assertThat(fetcher.shouldFetchMore()).describedAs("should fetch more pages").isTrue(); + validate(fetcher.nextPage()); + } + + assertThat(fetcher.shouldFetchMore()).describedAs("should not fetch more pages").isFalse(); + } + + private FeedResponse validate(Flux> page) { + TestSubscriber> subscriber = new TestSubscriber<>(); + page.subscribe(subscriber); + subscriber.awaitTerminalEvent(); + subscriber.assertComplete(); + subscriber.assertNoErrors(); + subscriber.assertValueCount(1); + return subscriber.values().get(0); + } + + private String getExpectedContinuationTokenInRequest(String continuationToken, + List> feedResponseList, + int requestIndex) { + if (requestIndex == 0) { + return continuationToken; + } + + return feedResponseList.get(requestIndex - 1).continuationToken(); + } + + private int getExpectedMaxItemCountInRequest(FeedOptions options, + int top, + List> feedResponseList, + int requestIndex) { + if (top == -1) { + return options.maxItemCount(); + } + + int numberOfReceivedItemsSoFar = + feedResponseList.subList(0, requestIndex).stream().mapToInt(rsp -> rsp.results().size()).sum(); + + return Math.min(top - numberOfReceivedItemsSoFar, options.maxItemCount()); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/routing/InMemoryCollectionRoutingMapTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/routing/InMemoryCollectionRoutingMapTest.java new file mode 100644 index 0000000000000..944d32f179dca --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/routing/InMemoryCollectionRoutingMapTest.java @@ -0,0 +1,270 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.azure.data.cosmos.internal.PartitionKeyRange; +import com.google.common.collect.ImmutableList; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.testng.annotations.Test; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; + +import static org.assertj.core.api.Assertions.assertThat; + +public class InMemoryCollectionRoutingMapTest { + + static class ServerIdentityImp implements IServerIdentity { + private int value; + public ServerIdentityImp(int value) { + this.value = value; + } + + static ServerIdentityImp of(int value) { + return new ServerIdentityImp(value); + } + } + + @Test(groups = { "unit" }) + public void collectionRoutingMap() { + InMemoryCollectionRoutingMap routingMap = InMemoryCollectionRoutingMap + .tryCreateCompleteRoutingMap(Arrays.asList( + new ImmutablePair<>( + new PartitionKeyRange("2", "0000000050", "0000000070"), ServerIdentityImp.of(2)), + new ImmutablePair<>(new PartitionKeyRange("0", "", "0000000030"), + ServerIdentityImp.of(0)), + new ImmutablePair<>( + new PartitionKeyRange("1", "0000000030", "0000000050"), ServerIdentityImp.of(1)), + new ImmutablePair<>(new PartitionKeyRange("3", "0000000070", "FF"), + ServerIdentityImp.of(3))), + StringUtils.EMPTY); + + assertThat("0").isEqualTo(routingMap.getOrderedPartitionKeyRanges().get(0).id()); + assertThat("1").isEqualTo(routingMap.getOrderedPartitionKeyRanges().get(1).id()); + assertThat("2").isEqualTo(routingMap.getOrderedPartitionKeyRanges().get(2).id()); + assertThat("3").isEqualTo(routingMap.getOrderedPartitionKeyRanges().get(3).id()); + + + assertThat("0").isEqualTo(routingMap.getRangeByEffectivePartitionKey("").id()); + assertThat("0").isEqualTo(routingMap.getRangeByEffectivePartitionKey("0000000000").id()); + assertThat("1").isEqualTo(routingMap.getRangeByEffectivePartitionKey("0000000030").id()); + assertThat("1").isEqualTo(routingMap.getRangeByEffectivePartitionKey("0000000031").id()); + assertThat("3").isEqualTo(routingMap.getRangeByEffectivePartitionKey("0000000071").id()); + + assertThat("0").isEqualTo(routingMap.getRangeByPartitionKeyRangeId("0").id()); + assertThat("1").isEqualTo(routingMap.getRangeByPartitionKeyRangeId("1").id()); + + assertThat(4).isEqualTo( + routingMap + .getOverlappingRanges(Collections.singletonList(new Range(PartitionKeyRange.MINIMUM_INCLUSIVE_EFFECTIVE_PARTITION_KEY, + PartitionKeyRange.MAXIMUM_EXCLUSIVE_EFFECTIVE_PARTITION_KEY, true, false))) + .size()); + assertThat(0).isEqualTo( + routingMap + .getOverlappingRanges(Collections.singletonList(new Range(PartitionKeyRange.MINIMUM_INCLUSIVE_EFFECTIVE_PARTITION_KEY, + PartitionKeyRange.MINIMUM_INCLUSIVE_EFFECTIVE_PARTITION_KEY, false, false))) + .size()); + + Collection partitionKeyRanges = routingMap + .getOverlappingRanges(Collections.singletonList(new Range("0000000040", "0000000040", true, true))); + + assertThat(1).isEqualTo(partitionKeyRanges.size()); + Iterator iterator = partitionKeyRanges.iterator(); + assertThat("1").isEqualTo(iterator.next().id()); + + Collection partitionKeyRanges1 = routingMap + .getOverlappingRanges(Arrays.asList(new Range("0000000040", "0000000045", true, true), + new Range("0000000045", "0000000046", true, true), + new Range("0000000046", "0000000050", true, true))); + + assertThat(2).isEqualTo(partitionKeyRanges1.size()); + Iterator iterator1 = partitionKeyRanges1.iterator(); + assertThat("1").isEqualTo(iterator1.next().id()); + assertThat("2").isEqualTo(iterator1.next().id()); + } + + @Test(groups = { "unit" }, expectedExceptions = IllegalStateException.class) + public void invalidRoutingMap() { + InMemoryCollectionRoutingMap.tryCreateCompleteRoutingMap(Arrays.asList( + new ImmutablePair<>(new PartitionKeyRange("1", "0000000020", "0000000030"), + ServerIdentityImp.of(2)), + new ImmutablePair<>(new PartitionKeyRange("2", "0000000025", "0000000035"), + ServerIdentityImp.of(2))), + StringUtils.EMPTY); + } + + @Test(groups = { "unit" }) + public void incompleteRoutingMap() { + InMemoryCollectionRoutingMap routingMap = InMemoryCollectionRoutingMap + .tryCreateCompleteRoutingMap(Arrays.asList( + new ImmutablePair<>(new PartitionKeyRange("2", "", "0000000030"), + ServerIdentityImp.of(2)), + new ImmutablePair<>(new PartitionKeyRange("3", "0000000031", "FF"), + ServerIdentityImp.of(2))), + StringUtils.EMPTY); + + assertThat(routingMap).isNull(); + + routingMap = InMemoryCollectionRoutingMap.tryCreateCompleteRoutingMap(Arrays.asList( + new ImmutablePair<>(new PartitionKeyRange("2", "", "0000000030"), ServerIdentityImp.of(2)), + new ImmutablePair<>(new PartitionKeyRange("3", "0000000030", "FF"), ServerIdentityImp.of(2))), + StringUtils.EMPTY); + + assertThat(routingMap).isNotNull(); + } + + @Test(groups = {"unit"}) + public void goneRanges() { + CollectionRoutingMap routingMap = InMemoryCollectionRoutingMap.tryCreateCompleteRoutingMap( + ImmutableList.of( + new ImmutablePair(new PartitionKeyRange("2", "", "0000000030", ImmutableList.of("1", "0")), null), + new ImmutablePair(new PartitionKeyRange("3", "0000000030", "0000000032", ImmutableList.of("5")), null), + new ImmutablePair(new PartitionKeyRange("4", "0000000032", "FF"), null)), + StringUtils.EMPTY); + + assertThat(routingMap.IsGone("1")).isTrue(); + assertThat(routingMap.IsGone("0")).isTrue(); + assertThat(routingMap.IsGone("5")).isTrue(); + + assertThat(routingMap.IsGone("2")).isFalse(); + assertThat(routingMap.IsGone("3")).isFalse(); + assertThat(routingMap.IsGone("4")).isFalse(); + assertThat(routingMap.IsGone("100")).isFalse(); + } + + @Test(groups = {"unit"}) + public void tryCombineRanges() { + CollectionRoutingMap routingMap = InMemoryCollectionRoutingMap.tryCreateCompleteRoutingMap( + ImmutableList.of( + new ImmutablePair( + new PartitionKeyRange( + "2", + "0000000050", + "0000000070"), + null), + + new ImmutablePair( + new PartitionKeyRange( + "0", + "", + "0000000030"), + null), + + new ImmutablePair( + new PartitionKeyRange( + "1", + "0000000030", + "0000000050"), + null), + + new ImmutablePair( + new PartitionKeyRange( + "3", + "0000000070", + "FF"), + null) + ), StringUtils.EMPTY); + + CollectionRoutingMap newRoutingMap = routingMap.tryCombine( + ImmutableList.of( + new ImmutablePair( + new PartitionKeyRange( + "4", + "", + "0000000010", + ImmutableList.of("0") + ), + null), + + new ImmutablePair( + new PartitionKeyRange( + "5", + "0000000010", + "0000000030", + ImmutableList.of("0") + ), + null) + )); + + assertThat(newRoutingMap).isNotNull(); + + newRoutingMap = routingMap.tryCombine( + ImmutableList.of( + new ImmutablePair( + new PartitionKeyRange( + "6", + "", + "0000000005", + ImmutableList.of("0", "4") + ), + null), + + new ImmutablePair( + new PartitionKeyRange( + "7", + "0000000005", + "0000000010", + ImmutableList.of("0", "4") + ), + null), + + new ImmutablePair( + new PartitionKeyRange( + "8", + "0000000010", + "0000000015", + ImmutableList.of("0", "5") + ), + null), + + new ImmutablePair( + new PartitionKeyRange( + "9", + "0000000015", + "0000000030", + ImmutableList.of("0", "5") + ), + null) + )); + + assertThat(newRoutingMap).isNotNull(); + + newRoutingMap = routingMap.tryCombine( + ImmutableList.of( + new ImmutablePair( + new PartitionKeyRange( + "10", + "", + "0000000002", + ImmutableList.of("0", "4", "6") + ), + null) + )); + + assertThat(newRoutingMap).isNull(); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/routing/LocationCacheTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/routing/LocationCacheTest.java new file mode 100644 index 0000000000000..a61f303c754e6 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/routing/LocationCacheTest.java @@ -0,0 +1,439 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.BridgeUtils; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.internal.DatabaseAccount; +import com.azure.data.cosmos.internal.DatabaseAccountLocation; +import com.azure.data.cosmos.internal.DatabaseAccountManagerInternal; +import com.azure.data.cosmos.internal.*; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterables; +import org.apache.commons.collections4.list.UnmodifiableList; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URL; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + +import static com.azure.data.cosmos.BridgeUtils.createDatabaseAccountLocation; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Tests for {@link LocationCache} + */ +public class LocationCacheTest { + private final static URL DefaultEndpoint = createUrl("https://default.documents.azure.com"); + private final static URL Location1Endpoint = createUrl("https://location1.documents.azure.com"); + private final static URL Location2Endpoint = createUrl("https://location2.documents.azure.com"); + private final static URL Location3Endpoint = createUrl("https://location3.documents.azure.com"); + private final static URL Location4Endpoint = createUrl("https://location4.documents.azure.com"); + + private static HashMap EndpointByLocation = new HashMap<>(); + + static { + EndpointByLocation.put("location1", LocationCacheTest.Location1Endpoint); + EndpointByLocation.put("location2", LocationCacheTest.Location2Endpoint); + EndpointByLocation.put("location3", LocationCacheTest.Location3Endpoint); + EndpointByLocation.put("location4", LocationCacheTest.Location4Endpoint); + } + + private final Configs configs = new Configs() { + @Override + public int getUnavailableLocationsExpirationTimeInSeconds() { + return 3; + } + }; + + private UnmodifiableList preferredLocations; + private DatabaseAccount databaseAccount; + private LocationCache cache; + private GlobalEndpointManager endpointManager; + private DatabaseAccountManagerInternalMock mockedClient; + + @DataProvider(name = "paramsProvider") + public Object[][] paramsProvider() { + // provides all possible combinations for + // useMultipleWriteEndpoints, endpointDiscoveryEnabled, isPreferredListEmpty + List list = new ArrayList<>(); + for (int i = 0; i < 8; i++) { + boolean useMultipleWriteEndpoints = (i & 1) > 0; + boolean endpointDiscoveryEnabled = (i & 2) > 0; + boolean isPreferredListEmpty = (i & 4) > 0; + list.add(new Object[]{useMultipleWriteEndpoints, endpointDiscoveryEnabled, isPreferredListEmpty}); + } + + return list.toArray(new Object[][]{}); + } + + @Test(groups = "long", dataProvider = "paramsProvider") + public void validateAsync(boolean useMultipleWriteEndpoints, + boolean endpointDiscoveryEnabled, + boolean isPreferredListEmpty) throws Exception { + validateLocationCacheAsync(useMultipleWriteEndpoints, + endpointDiscoveryEnabled, + isPreferredListEmpty); + } + + @Test(groups = "long") + public void validateWriteEndpointOrderWithClientSideDisableMultipleWriteLocation() throws Exception { + this.initialize(false, true, false); + assertThat(this.cache.getWriteEndpoints().get(0)).isEqualTo(LocationCacheTest.Location1Endpoint); + assertThat(this.cache.getWriteEndpoints().get(1)).isEqualTo(LocationCacheTest.Location2Endpoint); + assertThat(this.cache.getWriteEndpoints().get(2)).isEqualTo(LocationCacheTest.Location3Endpoint); + } + + private static DatabaseAccount createDatabaseAccount(boolean useMultipleWriteLocations) { + DatabaseAccount databaseAccount = BridgeUtils.createDatabaseAccount( + // read endpoints + ImmutableList.of( + createDatabaseAccountLocation("location1", LocationCacheTest.Location1Endpoint.toString()), + createDatabaseAccountLocation("location2", LocationCacheTest.Location2Endpoint.toString()), + createDatabaseAccountLocation("location4", LocationCacheTest.Location4Endpoint.toString())), + + // write endpoints + ImmutableList.of( + createDatabaseAccountLocation("location1", LocationCacheTest.Location1Endpoint.toString()), + createDatabaseAccountLocation("location2", LocationCacheTest.Location2Endpoint.toString()), + createDatabaseAccountLocation("location3", LocationCacheTest.Location3Endpoint.toString())), + // if the account supports multi master multi muster + useMultipleWriteLocations); + + return databaseAccount; + } + + private void initialize( + boolean useMultipleWriteLocations, + boolean enableEndpointDiscovery, + boolean isPreferredLocationsListEmpty) throws Exception { + + this.mockedClient = new DatabaseAccountManagerInternalMock(); + this.databaseAccount = LocationCacheTest.createDatabaseAccount(useMultipleWriteLocations); + + this.preferredLocations = isPreferredLocationsListEmpty ? + new UnmodifiableList<>(Collections.emptyList()) : + new UnmodifiableList<>(ImmutableList.of("location1", "location2", "location3")); + + this.cache = new LocationCache( + this.preferredLocations, + LocationCacheTest.DefaultEndpoint, + enableEndpointDiscovery, + useMultipleWriteLocations, + configs); + + this.cache.onDatabaseAccountRead(this.databaseAccount); + + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.enableEndpointDiscovery(enableEndpointDiscovery); + BridgeInternal.setUseMultipleWriteLocations(connectionPolicy, useMultipleWriteLocations); + connectionPolicy.preferredLocations(this.preferredLocations); + + this.endpointManager = new GlobalEndpointManager(mockedClient, connectionPolicy, configs); + } + + class DatabaseAccountManagerInternalMock implements DatabaseAccountManagerInternal { + private final AtomicInteger counter = new AtomicInteger(0); + + private void reset() { + counter.set(0); + } + + private int getInvocationCounter() { + return counter.get(); + } + + @Override + public Flux getDatabaseAccountFromEndpoint(URI endpoint) { + return Flux.just(LocationCacheTest.this.databaseAccount); + } + + @Override + public ConnectionPolicy getConnectionPolicy() { + throw new RuntimeException("not supported"); + } + + @Override + public URI getServiceEndpoint() { + try { + return LocationCacheTest.DefaultEndpoint.toURI(); + } catch (Exception e) { + throw new RuntimeException(); + } + } + } + + private static Stream toStream(Iterable iterable) { + return StreamSupport.stream(iterable.spliterator(), false); + } + + private void validateLocationCacheAsync( + boolean useMultipleWriteLocations, + boolean endpointDiscoveryEnabled, + boolean isPreferredListEmpty) throws Exception { + for (int writeLocationIndex = 0; writeLocationIndex < 3; writeLocationIndex++) { + for (int readLocationIndex = 0; readLocationIndex < 2; readLocationIndex++) { + this.initialize( + useMultipleWriteLocations, + endpointDiscoveryEnabled, + isPreferredListEmpty); + + UnmodifiableList currentWriteEndpoints = this.cache.getWriteEndpoints(); + UnmodifiableList currentReadEndpoints = this.cache.getReadEndpoints(); + for (int i = 0; i < readLocationIndex; i++) { + this.cache.markEndpointUnavailableForRead(createUrl(Iterables.get(this.databaseAccount.getReadableLocations(), i).getEndpoint())); + this.endpointManager.markEndpointUnavailableForRead(createUrl(Iterables.get(this.databaseAccount.getReadableLocations(), i).getEndpoint()));; + } + for (int i = 0; i < writeLocationIndex; i++) { + this.cache.markEndpointUnavailableForWrite(createUrl(Iterables.get(this.databaseAccount.getWritableLocations(), i).getEndpoint())); + this.endpointManager.markEndpointUnavailableForWrite(createUrl(Iterables.get(this.databaseAccount.getWritableLocations(), i).getEndpoint())); + } + + Map writeEndpointByLocation = toStream(this.databaseAccount.getWritableLocations()) + .collect(Collectors.toMap(i -> i.getName(), i -> createUrl(i.getEndpoint()))); + + Map readEndpointByLocation = toStream(this.databaseAccount.getReadableLocations()) + .collect(Collectors.toMap(i -> i.getName(), i -> createUrl(i.getEndpoint()))); + + URL[] preferredAvailableWriteEndpoints = toStream(this.preferredLocations).skip(writeLocationIndex) + .filter(location -> writeEndpointByLocation.containsKey(location)) + .map(location -> writeEndpointByLocation.get(location)) + .collect(Collectors.toList()).toArray(new URL[0]); + + URL[] preferredAvailableReadEndpoints = toStream(this.preferredLocations).skip(readLocationIndex) + .filter(location -> readEndpointByLocation.containsKey(location)) + .map(location -> readEndpointByLocation.get(location)) + .collect(Collectors.toList()).toArray(new URL[0]); + + this.validateEndpointRefresh( + useMultipleWriteLocations, + endpointDiscoveryEnabled, + preferredAvailableWriteEndpoints, + preferredAvailableReadEndpoints, + writeLocationIndex > 0); + + this.validateGlobalEndpointLocationCacheRefreshAsync(); + + this.validateRequestEndpointResolution( + useMultipleWriteLocations, + endpointDiscoveryEnabled, + preferredAvailableWriteEndpoints, + preferredAvailableReadEndpoints); + + // wait for TTL on unavailability info + + TimeUnit.SECONDS.sleep(configs.getUnavailableLocationsExpirationTimeInSeconds() + 1); + + assertThat(currentWriteEndpoints.toArray()).containsExactly(this.cache.getWriteEndpoints().toArray()); + assertThat(currentReadEndpoints.toArray()).containsExactly(this.cache.getReadEndpoints().toArray()); + } + } + } + + private void validateEndpointRefresh( + boolean useMultipleWriteLocations, + boolean endpointDiscoveryEnabled, + URL[] preferredAvailableWriteEndpoints, + URL[] preferredAvailableReadEndpoints, + boolean isFirstWriteEndpointUnavailable) { + + Utils.ValueHolder canRefreshInBackgroundHolder = new Utils.ValueHolder<>(); + canRefreshInBackgroundHolder.v = false; + + boolean shouldRefreshEndpoints = this.cache.shouldRefreshEndpoints(canRefreshInBackgroundHolder); + + boolean isMostPreferredLocationUnavailableForRead = false; + boolean isMostPreferredLocationUnavailableForWrite = useMultipleWriteLocations ? + false : isFirstWriteEndpointUnavailable; + if (this.preferredLocations.size() > 0) { + String mostPreferredReadLocationName = this.preferredLocations.stream() + .filter(location -> toStream(databaseAccount.getReadableLocations()) + .anyMatch(readLocation -> readLocation.getName().equals(location))) + .findFirst().orElse(null); + + URL mostPreferredReadEndpoint = LocationCacheTest.EndpointByLocation.get(mostPreferredReadLocationName); + isMostPreferredLocationUnavailableForRead = preferredAvailableReadEndpoints.length == 0 ? + true : (!areEqual(preferredAvailableReadEndpoints[0], mostPreferredReadEndpoint)); + + String mostPreferredWriteLocationName = this.preferredLocations.stream() + .filter(location -> toStream(databaseAccount.getWritableLocations()) + .anyMatch(writeLocation -> writeLocation.getName().equals(location))) + .findFirst().orElse(null); + + URL mostPreferredWriteEndpoint = LocationCacheTest.EndpointByLocation.get(mostPreferredWriteLocationName); + + if (useMultipleWriteLocations) { + isMostPreferredLocationUnavailableForWrite = preferredAvailableWriteEndpoints.length == 0 ? + true : (!areEqual(preferredAvailableWriteEndpoints[0], mostPreferredWriteEndpoint)); + } + } + + if (!endpointDiscoveryEnabled) { + assertThat(shouldRefreshEndpoints).isFalse(); + } else { + assertThat(shouldRefreshEndpoints).isEqualTo( + isMostPreferredLocationUnavailableForRead || isMostPreferredLocationUnavailableForWrite); + } + + if (shouldRefreshEndpoints) { + assertThat(canRefreshInBackgroundHolder.v).isTrue(); + } + } + + private boolean areEqual(URL url1, URL url2) { + return url1.equals(url2); + } + + private void validateGlobalEndpointLocationCacheRefreshAsync() throws Exception { + + mockedClient.reset(); + List> list = IntStream.range(0, 10) + .mapToObj(index -> this.endpointManager.refreshLocationAsync(null)) + .collect(Collectors.toList()); + + Flux.merge(list).then().block(); + + assertThat(mockedClient.getInvocationCounter()).isLessThanOrEqualTo(1); + mockedClient.reset(); + + IntStream.range(0, 10) + .mapToObj(index -> this.endpointManager.refreshLocationAsync(null)) + .collect(Collectors.toList()); + for (Mono completable : list) { + completable.block(); + } + + assertThat(mockedClient.getInvocationCounter()).isLessThanOrEqualTo(1); + } + + private void validateRequestEndpointResolution( + boolean useMultipleWriteLocations, + boolean endpointDiscoveryEnabled, + URL[] availableWriteEndpoints, + URL[] availableReadEndpoints) throws MalformedURLException { + URL firstAvailableWriteEndpoint; + URL secondAvailableWriteEndpoint; + + if (!endpointDiscoveryEnabled) { + firstAvailableWriteEndpoint = LocationCacheTest.DefaultEndpoint; + secondAvailableWriteEndpoint = LocationCacheTest.DefaultEndpoint; + } else if (!useMultipleWriteLocations) { + firstAvailableWriteEndpoint = createUrl(Iterables.get(this.databaseAccount.getWritableLocations(), 0).getEndpoint()); + secondAvailableWriteEndpoint = createUrl(Iterables.get(this.databaseAccount.getWritableLocations(), 1).getEndpoint()); + } else if (availableWriteEndpoints.length > 1) { + firstAvailableWriteEndpoint = availableWriteEndpoints[0]; + secondAvailableWriteEndpoint = availableWriteEndpoints[1]; + } else if (availableWriteEndpoints.length > 0) { + firstAvailableWriteEndpoint = availableWriteEndpoints[0]; + Iterator writeLocationsIterator = databaseAccount.getWritableLocations().iterator(); + String writeEndpoint = writeLocationsIterator.next().getEndpoint(); + secondAvailableWriteEndpoint = writeEndpoint != firstAvailableWriteEndpoint.toString() + ? new URL(writeEndpoint) + : new URL(writeLocationsIterator.next().getEndpoint()); + } else { + firstAvailableWriteEndpoint = LocationCacheTest.DefaultEndpoint; + secondAvailableWriteEndpoint = LocationCacheTest.DefaultEndpoint; + } + + URL firstAvailableReadEndpoint; + + if (!endpointDiscoveryEnabled) { + firstAvailableReadEndpoint = LocationCacheTest.DefaultEndpoint; + } else if (this.preferredLocations.size() == 0) { + firstAvailableReadEndpoint = firstAvailableWriteEndpoint; + } else if (availableReadEndpoints.length > 0) { + firstAvailableReadEndpoint = availableReadEndpoints[0]; + } else { + firstAvailableReadEndpoint = LocationCacheTest.EndpointByLocation.get(this.preferredLocations.get(0)); + } + + URL firstWriteEnpoint = !endpointDiscoveryEnabled ? + LocationCacheTest.DefaultEndpoint : + createUrl(Iterables.get(this.databaseAccount.getWritableLocations(), 0).getEndpoint()); + + URL secondWriteEnpoint = !endpointDiscoveryEnabled ? + LocationCacheTest.DefaultEndpoint : + createUrl(Iterables.get(this.databaseAccount.getWritableLocations(), 1).getEndpoint()); + + // If current write endpoint is unavailable, write endpoints order doesn't change + // ALL write requests flip-flop between current write and alternate write endpoint + UnmodifiableList writeEndpoints = this.cache.getWriteEndpoints(); + + assertThat(firstAvailableWriteEndpoint).isEqualTo(writeEndpoints.get(0)); + assertThat(secondAvailableWriteEndpoint).isEqualTo(this.resolveEndpointForWriteRequest(ResourceType.Document, true)); + assertThat(firstAvailableWriteEndpoint).isEqualTo(this.resolveEndpointForWriteRequest(ResourceType.Document, false)); + + // Writes to other resource types should be directed to first/second write endpoint + assertThat(firstWriteEnpoint).isEqualTo(this.resolveEndpointForWriteRequest(ResourceType.Database, false)); + assertThat(secondWriteEnpoint).isEqualTo(this.resolveEndpointForWriteRequest(ResourceType.Database, true)); + + // Reads should be directed to available read endpoints regardless of resource type + assertThat(firstAvailableReadEndpoint).isEqualTo(this.resolveEndpointForReadRequest(true)); + assertThat(firstAvailableReadEndpoint).isEqualTo(this.resolveEndpointForReadRequest(false)); + } + + private URL resolveEndpointForReadRequest(boolean masterResourceType) { + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, + masterResourceType ? ResourceType.Database : ResourceType.Document); + return this.cache.resolveServiceEndpoint(request); + } + + private URL resolveEndpointForWriteRequest(ResourceType resourceType, boolean useAlternateWriteEndpoint) { + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create, resourceType); + request.requestContext.RouteToLocation(useAlternateWriteEndpoint ? 1 : 0, resourceType.isCollectionChild()); + return this.cache.resolveServiceEndpoint(request); + } + + private RxDocumentServiceRequest CreateRequest(boolean isReadRequest, boolean isMasterResourceType) + { + if (isReadRequest) { + return RxDocumentServiceRequest.create(OperationType.Read, isMasterResourceType ? ResourceType.Database : ResourceType.Document); + } else { + return RxDocumentServiceRequest.create(OperationType.Create, isMasterResourceType ? ResourceType.Database : ResourceType.Document); + } + } + private static URL createUrl(String url) { + try { + return new URL(url); + } catch (MalformedURLException e) { + throw new IllegalArgumentException(e); + } + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/routing/PartitionKeyInternalUtils.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/routing/PartitionKeyInternalUtils.java new file mode 100644 index 0000000000000..5751d70a08d51 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/routing/PartitionKeyInternalUtils.java @@ -0,0 +1,35 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.google.common.collect.ImmutableList; + +public class PartitionKeyInternalUtils { + + public static PartitionKeyInternal createPartitionKeyInternal(String str) { + return new PartitionKeyInternal(ImmutableList.of( + new StringPartitionKeyComponent(str))); + + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/routing/RoutingMapProviderHelperTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/routing/RoutingMapProviderHelperTest.java new file mode 100644 index 0000000000000..e8ed44a2fca5e --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/routing/RoutingMapProviderHelperTest.java @@ -0,0 +1,149 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.internal.routing; + +import com.azure.data.cosmos.internal.PartitionKeyRange; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.testng.annotations.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; + +public class RoutingMapProviderHelperTest { + private static final MockRoutingMapProvider ROUTING_MAP_PROVIDER = new MockRoutingMapProvider( + Arrays.asList(new PartitionKeyRange("0", "", "000A"), new PartitionKeyRange("1", "000A", "000D"), + new PartitionKeyRange("2", "000D", "0012"), new PartitionKeyRange("3", "0012", "0015"), + new PartitionKeyRange("4", "0015", "0020"), new PartitionKeyRange("5", "0020", "0040"), + new PartitionKeyRange("6", "0040", "FF"))); + + private static class MockRoutingMapProvider implements RoutingMapProvider { + private final CollectionRoutingMap routingMap; + + public MockRoutingMapProvider(Collection ranges) { + List> pairs = new ArrayList<>( + ranges.size()); + for (PartitionKeyRange range : ranges) { + pairs.add(new ImmutablePair<>(range, null)); + } + + this.routingMap = InMemoryCollectionRoutingMap.tryCreateCompleteRoutingMap(pairs, StringUtils.EMPTY); + } + + @Override + public Collection getOverlappingRanges(String collectionIdOrNameBasedLink, + Range range, boolean forceRefresh) { + return this.routingMap.getOverlappingRanges(range); + } + + @Override + public PartitionKeyRange tryGetRangeByEffectivePartitionKey(String collectionRid, String effectivePartitionKey) { + return null; + } + + @Override + public PartitionKeyRange getPartitionKeyRangeById(String collectionLink, String partitionKeyRangeId, boolean forceRefresh) { + return null; + } + } + + @Test(groups = { "unit" }, expectedExceptions = IllegalArgumentException.class) + public void nonSortedRanges() { + RoutingMapProviderHelper.getOverlappingRanges(ROUTING_MAP_PROVIDER, "dbs/db1/colls/coll1", + Arrays.asList(new Range("0B", "0B", true, true), new Range("0A", "0A", true, true))); + } + + @Test(groups = { "unit" }, expectedExceptions = IllegalArgumentException.class) + public void overlappingRanges1() { + RoutingMapProviderHelper.getOverlappingRanges(ROUTING_MAP_PROVIDER, "dbs/db1/colls/coll1", + Arrays.asList(new Range("0A", "0D", true, true), new Range("0B", "0E", true, true))); + } + + @Test(groups = { "unit" }, expectedExceptions = IllegalArgumentException.class) + public void overlappingRanges2() { + RoutingMapProviderHelper.getOverlappingRanges(ROUTING_MAP_PROVIDER, "dbs/db1/colls/coll1", + Arrays.asList(new Range("0A", "0D", true, true), new Range("0D", "0E", true, true))); + } + + @Test(groups = { "unit" }) + public void getOverlappingRanges() { + Collection ranges = RoutingMapProviderHelper.getOverlappingRanges(ROUTING_MAP_PROVIDER, + "dbs/db1/colls/coll1", + Arrays.asList(new Range("000B", "000E", true, false), + new Range("000E", "000F", true, false), new Range("000F", "0010", true, true), + new Range("0015", "0015", true, true))); + + Function func = new Function() { + @Override + public String apply(PartitionKeyRange range) { + return range.id(); + } + }; + + assertThat("1,2,4").isEqualTo(ranges.stream().map(func).collect(Collectors.joining(","))); + + // query for minimal point + ranges = RoutingMapProviderHelper.getOverlappingRanges(ROUTING_MAP_PROVIDER, "dbs/db1/colls/coll1", + Collections.singletonList(new Range("", "", true, true))); + + assertThat("0").isEqualTo(ranges.stream().map(func).collect(Collectors.joining(","))); + + // query for empty range + ranges = RoutingMapProviderHelper.getOverlappingRanges(ROUTING_MAP_PROVIDER, "dbs/db1/colls/coll1", + Collections.singletonList(new Range("", "", true, false))); + + assertThat(0).isEqualTo(ranges.size()); + + // entire range + ranges = RoutingMapProviderHelper.getOverlappingRanges(ROUTING_MAP_PROVIDER, "dbs/db1/colls/coll1", + Collections.singletonList(new Range("", "FF", true, false))); + + assertThat("0,1,2,3,4,5,6").isEqualTo(ranges.stream().map(func).collect(Collectors.joining(","))); + + // matching range + ranges = RoutingMapProviderHelper.getOverlappingRanges(ROUTING_MAP_PROVIDER, "dbs/db1/colls/coll1", + Collections.singletonList(new Range("0012", "0015", true, false))); + + assertThat("3").isEqualTo(ranges.stream().map(func).collect(Collectors.joining(","))); + + // matching range with empty ranges + ranges = RoutingMapProviderHelper.getOverlappingRanges(ROUTING_MAP_PROVIDER, "dbs/db1/colls/coll1", + Arrays.asList(new Range("", "", true, false), new Range("0012", "0015", true, false))); + + assertThat("3").isEqualTo(ranges.stream().map(func).collect(Collectors.joining(","))); + + // matching range and a little bit more. + ranges = RoutingMapProviderHelper.getOverlappingRanges(ROUTING_MAP_PROVIDER, "dbs/db1/colls/coll1", + Collections.singletonList(new Range("0012", "0015", false, true))); + + assertThat("3,4").isEqualTo(ranges.stream().map(func).collect(Collectors.joining(","))); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/routing/StringPartitionKeyComponentTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/routing/StringPartitionKeyComponentTest.java new file mode 100644 index 0000000000000..fb8eccd8aed03 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/internal/routing/StringPartitionKeyComponentTest.java @@ -0,0 +1,25 @@ +package com.azure.data.cosmos.internal.routing; + +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class StringPartitionKeyComponentTest { + @DataProvider(name = "paramProvider") + public Object[][] paramProvider() { + return new Object[][] { + {"Friday", "Friday", 0}, + {"Friday", "Venerdì", -1}, + {"Fri", "Ven", -1}, + }; + } + + @Test(groups = { "unit" }, dataProvider = "paramProvider") + public void compare(String str1, String str2, int expectedCompare) { + StringPartitionKeyComponent spkc1 = new StringPartitionKeyComponent(str1); + StringPartitionKeyComponent spkc2 = new StringPartitionKeyComponent(str2); + + assertThat(Integer.signum(spkc1.CompareTo(spkc2))).isEqualTo(Integer.signum(expectedCompare)); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/AggregateQueryTests.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/AggregateQueryTests.java new file mode 100644 index 0000000000000..f206d107c2283 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/AggregateQueryTests.java @@ -0,0 +1,214 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.FeedResponseListValidator; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.UUID; + +public class AggregateQueryTests extends TestSuiteBase { + + public static class QueryConfig { + String testName; + String query; + Object expected; + + public QueryConfig (String testName, String query, Object expected) { + this.testName = testName; + this.query = query; + this.expected = expected; + } + } + + public static class AggregateConfig { + String operator; + Object expected; + String condition; + + public AggregateConfig(String operator, Object expected, String condition) { + this.operator = operator; + this.expected = expected; + this.condition = condition; + } + } + + private CosmosContainer createdCollection; + private ArrayList docs = new ArrayList(); + private ArrayList queryConfigs = new ArrayList(); + + private String partitionKey = "mypk"; + private String uniquePartitionKey = "uniquePartitionKey"; + private String field = "field"; + private int sum; + private int numberOfDocuments = 800; + private int numberOfDocumentsWithNumericId; + private int numberOfDocsWithSamePartitionKey = 400; + + private CosmosClient client; + + @Factory(dataProvider = "clientBuildersWithDirect") + public AggregateQueryTests(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT, dataProvider = "queryMetricsArgProvider") + public void queryDocumentsWithAggregates(boolean qmEnabled) throws Exception { + + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + options.populateQueryMetrics(qmEnabled); + options.maxDegreeOfParallelism(2); + + for (QueryConfig queryConfig : queryConfigs) { + + Flux> queryObservable = createdCollection.queryItems(queryConfig.query, options); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .withAggregateValue(queryConfig.expected) + .numberOfPages(1) + .hasValidQueryMetrics(qmEnabled) + .build(); + + validateQuerySuccess(queryObservable, validator); + } + } + + public void bulkInsert() { + generateTestData(); + voidBulkInsertBlocking(createdCollection, docs); + } + + public void generateTestData() { + + Object[] values = new Object[]{null, false, true, "abc", "cdfg", "opqrs", "ttttttt", "xyz", "oo", "ppp"}; + for (int i = 0; i < values.length; i++) { + CosmosItemProperties d = new CosmosItemProperties(); + d.id(UUID.randomUUID().toString()); + BridgeInternal.setProperty(d, partitionKey, values[i]); + docs.add(d); + } + + for (int i = 0; i < numberOfDocsWithSamePartitionKey; i++) { + CosmosItemProperties d = new CosmosItemProperties(); + BridgeInternal.setProperty(d, partitionKey, uniquePartitionKey); + BridgeInternal.setProperty(d, "resourceId", Integer.toString(i)); + BridgeInternal.setProperty(d, field, i + 1); + d.id(UUID.randomUUID().toString()); + docs.add(d); + } + + numberOfDocumentsWithNumericId = numberOfDocuments - values.length - numberOfDocsWithSamePartitionKey; + for (int i = 0; i < numberOfDocumentsWithNumericId; i++) { + CosmosItemProperties d = new CosmosItemProperties(); + BridgeInternal.setProperty(d, partitionKey, i + 1); + d.id(UUID.randomUUID().toString()); + docs.add(d); + } + + sum = (int) (numberOfDocumentsWithNumericId * (numberOfDocumentsWithNumericId + 1) / 2.0); + + } + + public void generateTestConfigs() { + + String aggregateQueryFormat = "SELECT VALUE %s(r.%s) FROM r WHERE %s"; + AggregateConfig[] aggregateConfigs = new AggregateConfig[] { + new AggregateConfig("AVG", sum / numberOfDocumentsWithNumericId, String.format("IS_NUMBER(r.%s)", partitionKey)), + new AggregateConfig("AVG", null, "true"), + new AggregateConfig("COUNT", numberOfDocuments, "true"), + new AggregateConfig("MAX","xyz","true"), + new AggregateConfig("MIN", null, "true"), + new AggregateConfig("SUM", sum, String.format("IS_NUMBER(r.%s)", partitionKey)), + new AggregateConfig("SUM", null, "true") + }; + + for (AggregateConfig config: aggregateConfigs) { + String query = String.format(aggregateQueryFormat, config.operator, partitionKey, config.condition); + String testName = String.format("%s %s", config.operator, config.condition); + queryConfigs.add(new QueryConfig(testName, query, config.expected)); + } + + String aggregateSinglePartitionQueryFormat = "SELECT VALUE %s(r.%s) FROM r WHERE r.%s = '%s'"; + String aggregateSinglePartitionQueryFormatSelect = "SELECT %s(r.%s) FROM r WHERE r.%s = '%s'"; + double samePartitionSum = numberOfDocsWithSamePartitionKey * (numberOfDocsWithSamePartitionKey + 1) / 2.0; + + AggregateConfig[] aggregateSinglePartitionConfigs = new AggregateConfig[] { + new AggregateConfig("AVG", samePartitionSum / numberOfDocsWithSamePartitionKey, null), + new AggregateConfig("COUNT", numberOfDocsWithSamePartitionKey, null), + new AggregateConfig("MAX", numberOfDocsWithSamePartitionKey, null), + new AggregateConfig("MIN", 1, null), + new AggregateConfig("SUM", samePartitionSum, null) + }; + + for (AggregateConfig config: aggregateSinglePartitionConfigs) { + String query = String.format(aggregateSinglePartitionQueryFormat, config.operator, field, partitionKey, uniquePartitionKey); + String testName = String.format("%s SinglePartition %s", config.operator, "SELECT VALUE"); + queryConfigs.add(new QueryConfig(testName, query, config.expected)); + + query = String.format(aggregateSinglePartitionQueryFormatSelect, config.operator, field, partitionKey, uniquePartitionKey); + testName = String.format("%s SinglePartition %s", config.operator, "SELECT"); + queryConfigs.add(new QueryConfig(testName, query, new Document("{'$1':" + removeTrailingZerosIfInteger(config.expected) + "}"))); + } + } + + private Object removeTrailingZerosIfInteger(Object obj) { + if (obj instanceof Number) { + Number num = (Number) obj; + if (num.doubleValue() == num.intValue()) { + return num.intValue(); + } + } + return obj; + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT * 2) + public void beforeClass() throws Exception { + client = this.clientBuilder().build(); + createdCollection = getSharedMultiPartitionCosmosContainer(client); + truncateCollection(createdCollection); + + bulkInsert(); + generateTestConfigs(); + + waitIfNeededForReplicasToCatchUp(this.clientBuilder()); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/BackPressureCrossPartitionTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/BackPressureCrossPartitionTest.java new file mode 100644 index 0000000000000..91b1a8d53952c --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/BackPressureCrossPartitionTest.java @@ -0,0 +1,229 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ClientUnderTestBuilder; +import com.azure.data.cosmos.CosmosBridgeInternal; +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosContainerProperties; +import com.azure.data.cosmos.CosmosContainerRequestOptions; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.DataType; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.IncludedPath; +import com.azure.data.cosmos.Index; +import com.azure.data.cosmos.IndexingPolicy; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.internal.RxDocumentClientUnderTest; +import com.azure.data.cosmos.internal.TestUtils; +import io.reactivex.subscribers.TestSubscriber; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; +import reactor.core.scheduler.Schedulers; +import reactor.util.concurrent.Queues; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +import static org.assertj.core.api.Assertions.assertThat; + +public class BackPressureCrossPartitionTest extends TestSuiteBase { + private final Logger log = LoggerFactory.getLogger(BackPressureCrossPartitionTest.class); + + private static final int TIMEOUT = 1800000; + private static final int SETUP_TIMEOUT = 60000; + + private int numberOfDocs = 4000; + private CosmosDatabase createdDatabase; + private CosmosContainer createdCollection; + private List createdDocuments; + + private CosmosClient client; + private int numberOfPartitions; + + public String getCollectionLink() { + return TestUtils.getCollectionNameLink(createdDatabase.id(), createdCollection.id()); + } + + static protected CosmosContainerProperties getCollectionDefinition() { + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList<>(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + + IndexingPolicy indexingPolicy = new IndexingPolicy(); + List includedPaths = new ArrayList<>(); + IncludedPath includedPath = new IncludedPath(); + includedPath.path("/*"); + Collection indexes = new ArrayList<>(); + Index stringIndex = Index.Range(DataType.STRING); + BridgeInternal.setProperty(stringIndex, "precision", -1); + indexes.add(stringIndex); + + Index numberIndex = Index.Range(DataType.NUMBER); + BridgeInternal.setProperty(numberIndex, "precision", -1); + indexes.add(numberIndex); + includedPath.indexes(indexes); + includedPaths.add(includedPath); + indexingPolicy.setIncludedPaths(includedPaths); + + CosmosContainerProperties collectionDefinition = new CosmosContainerProperties( + UUID.randomUUID().toString(), + partitionKeyDef); + collectionDefinition.indexingPolicy(indexingPolicy); + + return collectionDefinition; + } + + @Factory(dataProvider = "simpleClientBuildersWithDirectHttps") + public BackPressureCrossPartitionTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + private void warmUp() { + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + // ensure collection is cached + createdCollection.queryItems("SELECT * FROM r", options).blockFirst(); + } + + @DataProvider(name = "queryProvider") + public Object[][] queryProvider() { + return new Object[][] { + // query, maxItemCount, max expected back pressure buffered, total number of expected query results + { "SELECT * FROM r", 1, 2 * Queues.SMALL_BUFFER_SIZE, numberOfDocs}, + { "SELECT * FROM r", 100, 2 * Queues.SMALL_BUFFER_SIZE, numberOfDocs}, + { "SELECT * FROM r ORDER BY r.prop", 100, 2 * Queues.SMALL_BUFFER_SIZE + 3 * numberOfPartitions, numberOfDocs}, + { "SELECT TOP 1000 * FROM r", 1, 2 * Queues.SMALL_BUFFER_SIZE, 1000}, + { "SELECT TOP 1000 * FROM r", 100, 2 * Queues.SMALL_BUFFER_SIZE, 1000}, + { "SELECT TOP 1000 * FROM r ORDER BY r.prop", 100, 2 * Queues.SMALL_BUFFER_SIZE + 3 * numberOfPartitions , 1000}, + }; + } + + // TODO: DANOBLE: Investigate DIRECT TCP performance issue + // Links: https://msdata.visualstudio.com/CosmosDB/_workitems/edit/367028https://msdata.visualstudio.com/CosmosDB/_workitems/edit/367028 + + @Test(groups = { "long" }, dataProvider = "queryProvider", timeOut = 2 * TIMEOUT) + public void query(String query, int maxItemCount, int maxExpectedBufferedCountForBackPressure, int expectedNumberOfResults) throws Exception { + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + options.maxItemCount(maxItemCount); + options.maxDegreeOfParallelism(2); + Flux> queryObservable = createdCollection.queryItems(query, options); + + RxDocumentClientUnderTest rxClient = (RxDocumentClientUnderTest)CosmosBridgeInternal.getAsyncDocumentClient(client); + rxClient.httpRequests.clear(); + + log.info("instantiating subscriber ..."); + TestSubscriber> subscriber = new TestSubscriber<>(1); + queryObservable.publishOn(Schedulers.elastic(), 1).subscribe(subscriber); + int sleepTimeInMillis = 40000; + int i = 0; + + // use a test subscriber and request for more result and sleep in between + while (subscriber.completions() == 0 && subscriber.errorCount() == 0) { + log.debug("loop " + i); + + TimeUnit.MILLISECONDS.sleep(sleepTimeInMillis); + sleepTimeInMillis /= 2; + + if (sleepTimeInMillis > 4000) { + // validate that only one item is returned to subscriber in each iteration + assertThat(subscriber.valueCount() - i).isEqualTo(1); + } + + log.debug("subscriber.getValueCount(): " + subscriber.valueCount()); + log.debug("client.httpRequests.size(): " + rxClient.httpRequests.size()); + // validate that the difference between the number of requests to backend + // and the number of returned results is always less than a fixed threshold + assertThat(rxClient.httpRequests.size() - subscriber.valueCount()) + .isLessThanOrEqualTo(maxExpectedBufferedCountForBackPressure); + + log.debug("requesting more"); + subscriber.requestMore(1); + i++; + } + + subscriber.assertNoErrors(); + subscriber.assertComplete(); + assertThat(subscriber.values().stream().mapToInt(p -> p.results().size()).sum()).isEqualTo(expectedNumberOfResults); + } + + @BeforeClass(groups = { "long" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); + client = new ClientUnderTestBuilder(clientBuilder()).build(); + createdDatabase = getSharedCosmosDatabase(client); + createdCollection = createCollection(createdDatabase, getCollectionDefinition(), options, 20000); + + ArrayList docDefList = new ArrayList<>(); + for(int i = 0; i < numberOfDocs; i++) { + docDefList.add(getDocumentDefinition(i)); + } + + createdDocuments = bulkInsertBlocking( + createdCollection, + docDefList); + + numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client).readPartitionKeyRanges(getCollectionLink(), null) + .flatMap(p -> Flux.fromIterable(p.results())).collectList().single().block().size(); + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + warmUp(); + } + + // TODO: DANOBLE: Investigate DIRECT TCP performance issue + // Links: https://msdata.visualstudio.com/CosmosDB/_workitems/edit/367028https://msdata.visualstudio.com/CosmosDB/_workitems/edit/367028 + + @AfterClass(groups = { "long" }, timeOut = 2 * SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteCollection(createdCollection); + safeClose(client); + } + + private static CosmosItemProperties getDocumentDefinition(int cnt) { + String uuid = UUID.randomUUID().toString(); + CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + + "\"id\": \"%s\", " + + "\"prop\" : %d, " + + "\"mypk\": \"%s\", " + + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + + "}" + , uuid, cnt, uuid)); + return doc; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/BackPressureTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/BackPressureTest.java new file mode 100644 index 0000000000000..fca0b1e68e5d2 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/BackPressureTest.java @@ -0,0 +1,224 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.ClientUnderTestBuilder; +import com.azure.data.cosmos.CosmosBridgeInternal; +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosContainerProperties; +import com.azure.data.cosmos.CosmosContainerRequestOptions; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.Offer; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.internal.RxDocumentClientUnderTest; +import com.azure.data.cosmos.internal.TestUtils; +import io.reactivex.subscribers.TestSubscriber; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; +import reactor.core.scheduler.Schedulers; +import reactor.util.concurrent.Queues; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +import static org.assertj.core.api.Assertions.assertThat; + +public class BackPressureTest extends TestSuiteBase { + + private static final int TIMEOUT = 200000; + private static final int SETUP_TIMEOUT = 60000; + + private CosmosDatabase createdDatabase; + private CosmosContainer createdCollection; + private List createdDocuments; + + private CosmosClient client; + + public String getCollectionLink() { + return TestUtils.getCollectionNameLink(createdDatabase.id(), createdCollection.id()); + } + + private static CosmosContainerProperties getSinglePartitionCollectionDefinition() { + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + + CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); + return collectionDefinition; + } + + @Factory(dataProvider = "simpleClientBuildersWithDirectHttps") + public BackPressureTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "long" }, timeOut = 3 * TIMEOUT) + public void readFeed() throws Exception { + FeedOptions options = new FeedOptions(); + options.maxItemCount(1); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdCollection.readAllItems(options); + + RxDocumentClientUnderTest rxClient = (RxDocumentClientUnderTest)CosmosBridgeInternal.getAsyncDocumentClient(client); + rxClient.httpRequests.clear(); + + TestSubscriber> subscriber = new TestSubscriber>(1); + queryObservable.publishOn(Schedulers.elastic(), 1).subscribe(subscriber); + int sleepTimeInMillis = 10000; // 10 seconds + + int i = 0; + // use a test subscriber and request for more result and sleep in between + while (subscriber.completions() == 0 && subscriber.getEvents().get(1).isEmpty()) { + TimeUnit.MILLISECONDS.sleep(sleepTimeInMillis); + sleepTimeInMillis /= 2; + + if (sleepTimeInMillis > 1000) { + // validate that only one item is returned to subscriber in each iteration + assertThat(subscriber.valueCount() - i).isEqualTo(1); + } + // validate that only one item is returned to subscriber in each iteration + // validate that the difference between the number of requests to backend + // and the number of returned results is always less than a fixed threshold + assertThat(rxClient.httpRequests.size() - subscriber.valueCount()) + .isLessThanOrEqualTo(Queues.SMALL_BUFFER_SIZE); + + subscriber.requestMore(1); + i++; + } + + subscriber.assertNoErrors(); + subscriber.assertComplete(); + assertThat(subscriber.valueCount()).isEqualTo(createdDocuments.size()); + } + + @Test(groups = { "long" }, timeOut = 3 * TIMEOUT) + public void query() throws Exception { + FeedOptions options = new FeedOptions(); + options.maxItemCount(1); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdCollection.queryItems("SELECT * from r", options); + + RxDocumentClientUnderTest rxClient = (RxDocumentClientUnderTest)CosmosBridgeInternal.getAsyncDocumentClient(client); + rxClient.httpRequests.clear(); + + TestSubscriber> subscriber = new TestSubscriber>(1); + queryObservable.publishOn(Schedulers.elastic(), 1).subscribe(subscriber); + int sleepTimeInMillis = 10000; + + int i = 0; + // use a test subscriber and request for more result and sleep in between + while(subscriber.completions() == 0 && subscriber.getEvents().get(1).isEmpty()) { + TimeUnit.MILLISECONDS.sleep(sleepTimeInMillis); + sleepTimeInMillis /= 2; + + if (sleepTimeInMillis > 1000) { + // validate that only one item is returned to subscriber in each iteration + assertThat(subscriber.valueCount() - i).isEqualTo(1); + } + // validate that the difference between the number of requests to backend + // and the number of returned results is always less than a fixed threshold + assertThat(rxClient.httpRequests.size() - subscriber.valueCount()) + .isLessThanOrEqualTo(Queues.SMALL_BUFFER_SIZE); + + subscriber.requestMore(1); + i++; + } + + subscriber.assertNoErrors(); + subscriber.assertComplete(); + + assertThat(subscriber.valueCount()).isEqualTo(createdDocuments.size()); + } + + @BeforeClass(groups = { "long" }, timeOut = 2 * SETUP_TIMEOUT) + public void beforeClass() throws Exception { + + CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); + client = new ClientUnderTestBuilder(clientBuilder()).build(); + createdDatabase = getSharedCosmosDatabase(client); + + createdCollection = createCollection(createdDatabase, getSinglePartitionCollectionDefinition(), options, 1000); + + RxDocumentClientUnderTest rxClient = (RxDocumentClientUnderTest)CosmosBridgeInternal.getAsyncDocumentClient(client); + + // increase throughput to max for a single partition collection to avoid throttling + // for bulk insert and later queries. + Offer offer = rxClient.queryOffers( + String.format("SELECT * FROM r WHERE r.offerResourceId = '%s'", + createdCollection.read().block().properties().resourceId()) + , null).take(1).map(FeedResponse::results).single().block().get(0); + offer.setThroughput(6000); + offer = rxClient.replaceOffer(offer).single().block().getResource(); + assertThat(offer.getThroughput()).isEqualTo(6000); + + ArrayList docDefList = new ArrayList<>(); + for(int i = 0; i < 1000; i++) { + docDefList.add(getDocumentDefinition(i)); + } + + createdDocuments = bulkInsertBlocking(createdCollection, docDefList); + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + warmUp(); + } + + private void warmUp() { + // ensure collection is cached + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + createdCollection.queryItems("SELECT * from r", options).blockFirst(); + } + + // TODO: DANOBLE: Investigate DIRECT TCP performance issue + // NOTE: This method requires multiple SHUTDOWN_TIMEOUT intervals + // SEE: https://msdata.visualstudio.com/CosmosDB/_workitems/edit/367028https://msdata.visualstudio.com/CosmosDB/_workitems/edit/367028 + + @AfterClass(groups = { "long" }, timeOut = 2 * SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteCollection(createdCollection); + safeClose(client); + } + + private static CosmosItemProperties getDocumentDefinition(int cnt) { + String uuid = UUID.randomUUID().toString(); + CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + + "\"id\": \"%s\", " + + "\"prop\" : %d, " + + "\"mypk\": \"%s\", " + + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + + "}" + , uuid, cnt, uuid)); + return doc; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ChangeFeedProcessorTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ChangeFeedProcessorTest.java new file mode 100644 index 0000000000000..136a4789f6ee5 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ChangeFeedProcessorTest.java @@ -0,0 +1,297 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.ChangeFeedProcessor; +import com.azure.data.cosmos.ChangeFeedProcessorOptions; +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosContainerProperties; +import com.azure.data.cosmos.CosmosContainerRequestOptions; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.SerializationFormattingPolicy; +import org.apache.commons.lang3.RandomStringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterClass; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.scheduler.Schedulers; + +import java.time.Duration; +import java.time.OffsetDateTime; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ChangeFeedProcessorTest extends TestSuiteBase { + private final static Logger log = LoggerFactory.getLogger(ChangeFeedProcessorTest.class); + + private CosmosDatabase createdDatabase; + private CosmosContainer createdFeedCollection; + private CosmosContainer createdLeaseCollection; + private List createdDocuments; + private static Map receivedDocuments; +// private final String databaseId = "testdb1"; +// private final String hostName = "TestHost1"; + private final String hostName = RandomStringUtils.randomAlphabetic(6); + private final int FEED_COUNT = 10; + private final int CHANGE_FEED_PROCESSOR_TIMEOUT = 5000; + + private CosmosClient client; + + private ChangeFeedProcessor changeFeedProcessor; + + @Factory(dataProvider = "clientBuilders") + public ChangeFeedProcessorTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT) + public void readFeedDocumentsStartFromBeginning() { + setupReadFeedDocuments(); + + changeFeedProcessor = ChangeFeedProcessor.Builder() + .hostName(hostName) + .handleChanges(docs -> { + ChangeFeedProcessorTest.log.info("START processing from thread {}", Thread.currentThread().getId()); + for (CosmosItemProperties item : docs) { + processItem(item); + } + ChangeFeedProcessorTest.log.info("END processing from thread {}", Thread.currentThread().getId()); + }) + .feedContainer(createdFeedCollection) + .leaseContainer(createdLeaseCollection) + .options(new ChangeFeedProcessorOptions() + .leaseRenewInterval(Duration.ofSeconds(20)) + .leaseAcquireInterval(Duration.ofSeconds(10)) + .leaseExpirationInterval(Duration.ofSeconds(30)) + .feedPollDelay(Duration.ofSeconds(2)) + .leasePrefix("TEST") + .maxItemCount(10) + .startFromBeginning(true) + .maxScaleCount(0) // unlimited + .discardExistingLeases(true) + ) + .build(); + + try { + changeFeedProcessor.start().subscribeOn(Schedulers.elastic()) + .timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)) + .subscribe(); + } catch (Exception ex) { + log.error("Change feed processor did not start in the expected time", ex); + } + + // Wait for the feed processor to receive and process the documents. + try { + Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + changeFeedProcessor.stop().subscribeOn(Schedulers.elastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe(); + + for (CosmosItemProperties item : createdDocuments) { + assertThat(receivedDocuments.containsKey(item.id())).as("Document with id: " + item.id()).isTrue(); + } + + // Wait for the feed processor to shutdown. + try { + Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT); + } catch (InterruptedException e) { + e.printStackTrace(); + } + receivedDocuments.clear(); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT) + public void readFeedDocumentsStartFromCustomDate() { + ChangeFeedProcessor changeFeedProcessor = ChangeFeedProcessor.Builder() + .hostName(hostName) + .handleChanges(docs -> { + ChangeFeedProcessorTest.log.info("START processing from thread {}", Thread.currentThread().getId()); + for (CosmosItemProperties item : docs) { + processItem(item); + } + ChangeFeedProcessorTest.log.info("END processing from thread {}", Thread.currentThread().getId()); + }) + .feedContainer(createdFeedCollection) + .leaseContainer(createdLeaseCollection) + .options(new ChangeFeedProcessorOptions() + .leaseRenewInterval(Duration.ofSeconds(20)) + .leaseAcquireInterval(Duration.ofSeconds(10)) + .leaseExpirationInterval(Duration.ofSeconds(30)) + .feedPollDelay(Duration.ofSeconds(1)) + .leasePrefix("TEST") + .maxItemCount(10) + .startTime(OffsetDateTime.now().minusDays(1)) + .minScaleCount(1) + .maxScaleCount(3) + .discardExistingLeases(true) + ) + .build(); + + try { + changeFeedProcessor.start().subscribeOn(Schedulers.elastic()) + .timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)) + .subscribe(); + } catch (Exception ex) { + log.error("Change feed processor did not start in the expected time", ex); + } + + setupReadFeedDocuments(); + + // Wait for the feed processor to receive and process the documents. + long remainingWork = FEED_TIMEOUT; + while (remainingWork > 0 && receivedDocuments.size() < FEED_COUNT) { + remainingWork -= CHANGE_FEED_PROCESSOR_TIMEOUT; + try { + Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + + assertThat(remainingWork >= 0).as("Failed to receive all the feed documents").isTrue(); + + changeFeedProcessor.stop().subscribeOn(Schedulers.elastic()).timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe(); + + for (CosmosItemProperties item : createdDocuments) { + assertThat(receivedDocuments.containsKey(item.id())).as("Document with id: " + item.id()).isTrue(); + } + + // Wait for the feed processor to shutdown. + try { + Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT); + } catch (InterruptedException e) { + e.printStackTrace(); + } + receivedDocuments.clear(); + } + + @BeforeMethod(groups = { "emulator" }, timeOut = 2 * SETUP_TIMEOUT, alwaysRun = true) + public void beforeMethod() { + createdFeedCollection = createFeedCollection(); + createdLeaseCollection = createLeaseCollection(); + } + + @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT, alwaysRun = true) + public void beforeClass() { + client = clientBuilder().build(); + +// try { +// client.getDatabase(databaseId).read() +// .map(cosmosDatabaseResponse -> cosmosDatabaseResponse.database()) +// .flatMap(database -> database.delete()) +// .onErrorResume(throwable -> { +// if (throwable instanceof com.azure.data.cosmos.CosmosClientException) { +// com.azure.data.cosmos.CosmosClientException clientException = (com.azure.data.cosmos.CosmosClientException) throwable; +// if (clientException.statusCode() == 404) { +// return Mono.empty(); +// } +// } +// return Mono.error(throwable); +// }).block(); +// Thread.sleep(500); +// } catch (Exception e){ +// log.warn("Database delete", e); +// } +// createdDatabase = createDatabase(client, databaseId); + createdDatabase = getSharedCosmosDatabase(client); + } + + @AfterMethod(groups = { "emulator" }, timeOut = 3 * SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterMethod() { + safeDeleteCollection(createdFeedCollection); + safeDeleteCollection(createdLeaseCollection); + + // Allow some time for the collections and the database to be deleted before exiting. + try { + Thread.sleep(500); + } catch (Exception e){ } + } + + @AfterClass(groups = { "emulator" }, timeOut = 2 * SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { +// try { +// client.readAllDatabases() +// .flatMap(cosmosDatabaseSettingsFeedResponse -> reactor.core.publisher.Flux.fromIterable(cosmosDatabaseSettingsFeedResponse.results())) +// .flatMap(cosmosDatabaseSettings -> { +// CosmosDatabase cosmosDatabase = client.getDatabase(cosmosDatabaseSettings.id()); +// return cosmosDatabase.delete(); +// }).blockLast(); +// Thread.sleep(500); +// } catch (Exception e){ } + + safeClose(client); + } + + private void setupReadFeedDocuments() { + receivedDocuments = new ConcurrentHashMap<>(); + List docDefList = new ArrayList<>(); + + for(int i = 0; i < FEED_COUNT; i++) { + docDefList.add(getDocumentDefinition()); + } + + createdDocuments = bulkInsertBlocking(createdFeedCollection, docDefList); + waitIfNeededForReplicasToCatchUp(clientBuilder()); + } + + private CosmosItemProperties getDocumentDefinition() { + String uuid = UUID.randomUUID().toString(); + CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + + "\"id\": \"%s\", " + + "\"mypk\": \"%s\", " + + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + + "}" + , uuid, uuid)); + return doc; + } + + private CosmosContainer createFeedCollection() { + CosmosContainerRequestOptions optionsFeedCollection = new CosmosContainerRequestOptions(); + return createCollection(createdDatabase, getCollectionDefinition(), optionsFeedCollection, 10100); + } + + private CosmosContainer createLeaseCollection() { + CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); + CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(UUID.randomUUID().toString(), "/id"); + return createCollection(createdDatabase, collectionDefinition, options, 400); + } + + private static synchronized void processItem(CosmosItemProperties item) { + ChangeFeedProcessorTest.log.info("RECEIVED {}", item.toJson(SerializationFormattingPolicy.INDENTED)); + receivedDocuments.put(item.id(), item); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ChangeFeedTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ChangeFeedTest.java new file mode 100644 index 0000000000000..60c81ce3ac11e --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ChangeFeedTest.java @@ -0,0 +1,313 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ChangeFeedOptions; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.internal.*; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.internal.TestSuiteBase; +import com.google.common.collect.ArrayListMultimap; +import com.google.common.collect.Multimap; +import org.testng.SkipException; +import org.testng.annotations.AfterClass; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.lang.reflect.Method; +import java.time.OffsetDateTime; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.UUID; + +import static com.azure.data.cosmos.CommonsBridgeInternal.partitionKeyRangeIdInternal; +import static org.assertj.core.api.Assertions.assertThat; + +//TODO: change to use external TestSuiteBase +public class ChangeFeedTest extends TestSuiteBase { + + private static final int SETUP_TIMEOUT = 40000; + private static final int TIMEOUT = 30000; + private static final String PartitionKeyFieldName = "mypk"; + private Database createdDatabase; + private DocumentCollection createdCollection; + private Multimap partitionKeyToDocuments = ArrayListMultimap.create(); + + private AsyncDocumentClient client; + + public String getCollectionLink() { + return TestUtils.getCollectionNameLink(createdDatabase.id(), createdCollection.id()); + } + + static protected DocumentCollection getCollectionDefinition() { + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/" + PartitionKeyFieldName); + partitionKeyDef.paths(paths); + + DocumentCollection collectionDefinition = new DocumentCollection(); + collectionDefinition.id(UUID.randomUUID().toString()); + collectionDefinition.setPartitionKey(partitionKeyDef); + + return collectionDefinition; + } + + public ChangeFeedTest() { + super(createGatewayRxDocumentClient()); + subscriberValidationTimeout = TIMEOUT; + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void changeFeed_fromBeginning() throws Exception { + String partitionKey = partitionKeyToDocuments.keySet().iterator().next(); + Collection expectedDocuments = partitionKeyToDocuments.get(partitionKey); + + ChangeFeedOptions changeFeedOption = new ChangeFeedOptions(); + changeFeedOption.maxItemCount(3); + changeFeedOption.partitionKey(new PartitionKey(partitionKey)); + changeFeedOption.startFromBeginning(true); + + List> changeFeedResultList = client.queryDocumentChangeFeed(getCollectionLink(), changeFeedOption) + .collectList().block(); + + int count = 0; + for (int i = 0; i < changeFeedResultList.size(); i++) { + FeedResponse changeFeedPage = changeFeedResultList.get(i); + assertThat(changeFeedPage.continuationToken()).as("Response continuation should not be null").isNotNull(); + + count += changeFeedPage.results().size(); + assertThat(changeFeedPage.results().size()) + .as("change feed should contain all the previously created documents") + .isLessThanOrEqualTo(changeFeedOption.maxItemCount()); + } + assertThat(count).as("the number of changes").isEqualTo(expectedDocuments.size()); + } + + @Test(groups = { "simple" }, timeOut = 5 * TIMEOUT) + public void changesFromPartitionKeyRangeId_FromBeginning() throws Exception { + List partitionKeyRangeIds = client.readPartitionKeyRanges(getCollectionLink(), null) + .flatMap(p -> Flux.fromIterable(p.results()), 1) + .map(Resource::id) + .collectList() + .block(); + + assertThat(partitionKeyRangeIds.size()).isGreaterThan(1); + + String pkRangeId = partitionKeyRangeIds.get(0); + + ChangeFeedOptions changeFeedOption = new ChangeFeedOptions(); + changeFeedOption.maxItemCount(3); + partitionKeyRangeIdInternal(changeFeedOption, pkRangeId); + changeFeedOption.startFromBeginning(true); + List> changeFeedResultList = client.queryDocumentChangeFeed(getCollectionLink(), changeFeedOption) + .collectList().block(); + + int count = 0; + for(int i = 0; i < changeFeedResultList.size(); i++) { + FeedResponse changeFeedPage = changeFeedResultList.get(i); + assertThat(changeFeedPage.continuationToken()).as("Response continuation should not be null").isNotNull(); + + count += changeFeedPage.results().size(); + assertThat(changeFeedPage.results().size()) + .as("change feed should contain all the previously created documents") + .isLessThanOrEqualTo(changeFeedOption.maxItemCount()); + + assertThat(changeFeedPage.continuationToken()).as("Response continuation should not be null").isNotNull(); + assertThat(changeFeedPage.continuationToken()).as("Response continuation should not be empty").isNotEmpty(); + } + assertThat(changeFeedResultList.size()).as("has at least one page").isGreaterThanOrEqualTo(1); + assertThat(count).as("the number of changes").isGreaterThan(0); + assertThat(count).as("the number of changes").isLessThan(partitionKeyToDocuments.size()); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void changeFeed_fromNow() throws Exception { + // READ change feed from current. + ChangeFeedOptions changeFeedOption = new ChangeFeedOptions(); + String partitionKey = partitionKeyToDocuments.keySet().iterator().next(); + changeFeedOption.partitionKey(new PartitionKey(partitionKey)); + + List> changeFeedResultsList = client.queryDocumentChangeFeed(getCollectionLink(), changeFeedOption) + .collectList() + .block(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder().totalSize(0).build(); + validator.validate(changeFeedResultsList); + assertThat(changeFeedResultsList.get(changeFeedResultsList.size() -1 ). + continuationToken()).as("Response continuation should not be null").isNotNull(); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void changeFeed_fromStartDate() throws Exception { + + //setStartDateTime is not currently supported in multimaster mode. So skipping the test + if(BridgeInternal.isEnableMultipleWriteLocations(client.getDatabaseAccount().single().block())){ + throw new SkipException("StartTime/IfModifiedSince is not currently supported when EnableMultipleWriteLocations is set"); + } + + // READ change feed from current. + ChangeFeedOptions changeFeedOption = new ChangeFeedOptions(); + String partitionKey = partitionKeyToDocuments.keySet().iterator().next(); + + changeFeedOption.partitionKey(new PartitionKey(partitionKey)); + OffsetDateTime befTime = OffsetDateTime.now(); + // Waiting for at-least a second to ensure that new document is created after we took the time stamp + waitAtleastASecond(befTime); + + OffsetDateTime dateTimeBeforeCreatingDoc = OffsetDateTime.now(); + changeFeedOption.startDateTime(dateTimeBeforeCreatingDoc); + + // Waiting for at-least a second to ensure that new document is created after we took the time stamp + waitAtleastASecond(dateTimeBeforeCreatingDoc); + client.createDocument(getCollectionLink(), getDocumentDefinition(partitionKey), null, true).single().block(); + + List> changeFeedResultList = client.queryDocumentChangeFeed(getCollectionLink(), + changeFeedOption).collectList().block(); + + int count = 0; + for(int i = 0; i < changeFeedResultList.size(); i++) { + FeedResponse changeFeedPage = changeFeedResultList.get(i); + count += changeFeedPage.results().size(); + assertThat(changeFeedPage.continuationToken()).as("Response continuation should not be null").isNotNull(); + } + assertThat(count).as("Change feed should have one newly created document").isEqualTo(1); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void changesFromPartitionKey_AfterInsertingNewDocuments() throws Exception { + ChangeFeedOptions changeFeedOption = new ChangeFeedOptions(); + changeFeedOption.maxItemCount(3); + String partitionKey = partitionKeyToDocuments.keySet().iterator().next(); + changeFeedOption.partitionKey(new PartitionKey(partitionKey)); + + List> changeFeedResultsList = client.queryDocumentChangeFeed(getCollectionLink(), changeFeedOption) + .collectList().block(); + + assertThat(changeFeedResultsList).as("only one page").hasSize(1); + assertThat(changeFeedResultsList.get(0).results()).as("no recent changes").isEmpty(); + + String changeFeedContinuation = changeFeedResultsList.get(changeFeedResultsList.size()-1).continuationToken(); + assertThat(changeFeedContinuation).as("continuation token is not null").isNotNull(); + assertThat(changeFeedContinuation).as("continuation token is not empty").isNotEmpty(); + + // create some documents + client.createDocument(getCollectionLink(), getDocumentDefinition(partitionKey), null, true).single().block(); + client.createDocument(getCollectionLink(), getDocumentDefinition(partitionKey), null, true).single().block(); + + // READ change feed from continuation + changeFeedOption.requestContinuation(changeFeedContinuation); + + + FeedResponse changeFeedResults2 = client.queryDocumentChangeFeed(getCollectionLink(), changeFeedOption) + .blockFirst(); + + assertThat(changeFeedResults2.results()).as("change feed should contain newly inserted docs.").hasSize(2); + assertThat(changeFeedResults2.continuationToken()).as("Response continuation should not be null").isNotNull(); + } + + public void createDocument(AsyncDocumentClient client, String partitionKey) { + Document docDefinition = getDocumentDefinition(partitionKey); + + Document createdDocument = client + .createDocument(getCollectionLink(), docDefinition, null, false).single().block().getResource(); + partitionKeyToDocuments.put(partitionKey, createdDocument); + } + + public List bulkInsert(AsyncDocumentClient client, List docs) { + ArrayList>> result = new ArrayList>>(); + for (int i = 0; i < docs.size(); i++) { + result.add(client.createDocument("dbs/" + createdDatabase.id() + "/colls/" + createdCollection.id(), docs.get(i), null, false)); + } + + return Flux.merge(Flux.fromIterable(result), 100).map(ResourceResponse::getResource).collectList().block(); + } + + @AfterMethod(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void removeCollection() { + if (createdCollection != null) { + deleteCollection(client, getCollectionLink()); + } + } + + @BeforeMethod(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void populateDocuments(Method method) { + + partitionKeyToDocuments.clear(); + + RequestOptions options = new RequestOptions(); + options.setOfferThroughput(10100); + createdCollection = createCollection(client, createdDatabase.id(), getCollectionDefinition(), options); + + List docs = new ArrayList<>(); + + for (int i = 0; i < 200; i++) { + String partitionKey = UUID.randomUUID().toString(); + for(int j = 0; j < 7; j++) { + docs.add(getDocumentDefinition(partitionKey)); + } + } + + List insertedDocs = bulkInsert(client, docs); + for(Document doc: insertedDocs) { + partitionKeyToDocuments.put(doc.getString(PartitionKeyFieldName), doc); + } + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() throws Exception { + // set up the client + client = clientBuilder().build(); + createdDatabase = SHARED_DATABASE; + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + private static Document getDocumentDefinition(String partitionKey) { + String uuid = UUID.randomUUID().toString(); + Document doc = new Document(); + doc.id(uuid); + BridgeInternal.setProperty(doc, "mypk", partitionKey); + BridgeInternal.setProperty(doc, "prop", uuid); + return doc; + } + + private static void waitAtleastASecond(OffsetDateTime befTime) throws InterruptedException { + while (befTime.plusSeconds(1).isAfter(OffsetDateTime.now())) { + Thread.sleep(100); + } + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/CollectionCrudTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/CollectionCrudTest.java new file mode 100644 index 0000000000000..63de9f5e16f36 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/CollectionCrudTest.java @@ -0,0 +1,334 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CompositePath; +import com.azure.data.cosmos.CompositePathSortOrder; +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosContainerProperties; +import com.azure.data.cosmos.CosmosContainerRequestOptions; +import com.azure.data.cosmos.CosmosContainerResponse; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosDatabaseForTest; +import com.azure.data.cosmos.CosmosItem; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.CosmosItemRequestOptions; +import com.azure.data.cosmos.CosmosItemResponse; +import com.azure.data.cosmos.CosmosResponseValidator; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.IndexingMode; +import com.azure.data.cosmos.IndexingPolicy; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.internal.FailureValidator; +import com.azure.data.cosmos.internal.RetryAnalyzer; +import com.azure.data.cosmos.SpatialSpec; +import com.azure.data.cosmos.SpatialType; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CollectionCrudTest extends TestSuiteBase { + + private static final int TIMEOUT = 50000; + private static final int SETUP_TIMEOUT = 20000; + private static final int SHUTDOWN_TIMEOUT = 20000; + private final String databaseId = CosmosDatabaseForTest.generateId(); + + private CosmosClient client; + private CosmosDatabase database; + + @Factory(dataProvider = "clientBuildersWithDirect") + public CollectionCrudTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + this.subscriberValidationTimeout = TIMEOUT; + } + + @DataProvider(name = "collectionCrudArgProvider") + public Object[][] collectionCrudArgProvider() { + return new Object[][] { + // collection name, is name base + {UUID.randomUUID().toString()} , + + // with special characters in the name. + {"+ -_,:.|~" + UUID.randomUUID().toString() + " +-_,:.|~"} , + }; + } + + private CosmosContainerProperties getCollectionDefinition(String collectionName) { + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + + CosmosContainerProperties collectionDefinition = new CosmosContainerProperties( + collectionName, + partitionKeyDef); + + return collectionDefinition; + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT, dataProvider = "collectionCrudArgProvider") + public void createCollection(String collectionName) throws InterruptedException { + CosmosContainerProperties collectionDefinition = getCollectionDefinition(collectionName); + + Mono createObservable = database + .createContainer(collectionDefinition); + + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(collectionDefinition.id()).build(); + + validateSuccess(createObservable, validator); + safeDeleteAllCollections(database); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT) + public void createCollectionWithCompositeIndexAndSpatialSpec() throws InterruptedException { + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + + CosmosContainerProperties collection = new CosmosContainerProperties( + UUID.randomUUID().toString(), + partitionKeyDef); + + IndexingPolicy indexingPolicy = new IndexingPolicy(); + CompositePath compositePath1 = new CompositePath(); + compositePath1.path("/path1"); + compositePath1.order(CompositePathSortOrder.ASCENDING); + CompositePath compositePath2 = new CompositePath(); + compositePath2.path("/path2"); + compositePath2.order(CompositePathSortOrder.DESCENDING); + CompositePath compositePath3 = new CompositePath(); + compositePath3.path("/path3"); + CompositePath compositePath4 = new CompositePath(); + compositePath4.path("/path4"); + compositePath4.order(CompositePathSortOrder.ASCENDING); + CompositePath compositePath5 = new CompositePath(); + compositePath5.path("/path5"); + compositePath5.order(CompositePathSortOrder.DESCENDING); + CompositePath compositePath6 = new CompositePath(); + compositePath6.path("/path6"); + + ArrayList compositeIndex1 = new ArrayList(); + compositeIndex1.add(compositePath1); + compositeIndex1.add(compositePath2); + compositeIndex1.add(compositePath3); + + ArrayList compositeIndex2 = new ArrayList(); + compositeIndex2.add(compositePath4); + compositeIndex2.add(compositePath5); + compositeIndex2.add(compositePath6); + + List> compositeIndexes = new ArrayList<>(); + compositeIndexes.add(compositeIndex1); + compositeIndexes.add(compositeIndex2); + indexingPolicy.compositeIndexes(compositeIndexes); + + SpatialType[] spatialTypes = new SpatialType[] { + SpatialType.POINT, + SpatialType.LINE_STRING, + SpatialType.POLYGON, + SpatialType.MULTI_POLYGON + }; + List spatialIndexes = new ArrayList(); + for (int index = 0; index < 2; index++) { + List collectionOfSpatialTypes = new ArrayList(); + + SpatialSpec spec = new SpatialSpec(); + spec.path("/path" + index + "/*"); + + for (int i = index; i < index + 3; i++) { + collectionOfSpatialTypes.add(spatialTypes[i]); + } + spec.spatialTypes(collectionOfSpatialTypes); + spatialIndexes.add(spec); + } + + indexingPolicy.spatialIndexes(spatialIndexes); + + collection.indexingPolicy(indexingPolicy); + + Mono createObservable = database + .createContainer(collection, new CosmosContainerRequestOptions()); + + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(collection.id()) + .withCompositeIndexes(compositeIndexes) + .withSpatialIndexes(spatialIndexes) + .build(); + + validateSuccess(createObservable, validator); + safeDeleteAllCollections(database); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT, dataProvider = "collectionCrudArgProvider") + public void readCollection(String collectionName) throws InterruptedException { + CosmosContainerProperties collectionDefinition = getCollectionDefinition(collectionName); + + Mono createObservable = database.createContainer(collectionDefinition); + CosmosContainer collection = createObservable.block().container(); + + Mono readObservable = collection.read(); + + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(collection.id()).build(); + validateSuccess(readObservable, validator); + safeDeleteAllCollections(database); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT, dataProvider = "collectionCrudArgProvider") + public void readCollection_DoesntExist(String collectionName) throws Exception { + + Mono readObservable = database + .getContainer("I don't exist").read(); + + FailureValidator validator = new FailureValidator.Builder().resourceNotFound().build(); + validateFailure(readObservable, validator); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT, dataProvider = "collectionCrudArgProvider") + public void deleteCollection(String collectionName) throws InterruptedException { + CosmosContainerProperties collectionDefinition = getCollectionDefinition(collectionName); + + Mono createObservable = database.createContainer(collectionDefinition); + CosmosContainer collection = createObservable.block().container(); + + Mono deleteObservable = collection.delete(); + + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .nullResource().build(); + validateSuccess(deleteObservable, validator); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT, dataProvider = "collectionCrudArgProvider") + public void replaceCollection(String collectionName) throws InterruptedException { + // create a collection + CosmosContainerProperties collectionDefinition = getCollectionDefinition(collectionName); + Mono createObservable = database.createContainer(collectionDefinition); + CosmosContainer collection = createObservable.block().container(); + CosmosContainerProperties collectionSettings = collection.read().block().properties(); + // sanity check + assertThat(collectionSettings.indexingPolicy().indexingMode()).isEqualTo(IndexingMode.CONSISTENT); + + // replace indexing mode + IndexingPolicy indexingMode = new IndexingPolicy(); + indexingMode.indexingMode(IndexingMode.LAZY); + collectionSettings.indexingPolicy(indexingMode); + Mono readObservable = collection.replace(collectionSettings, new CosmosContainerRequestOptions()); + + // validate + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .indexingMode(IndexingMode.LAZY).build(); + validateSuccess(readObservable, validator); + safeDeleteAllCollections(database); + } + + @Test(groups = { "emulator" }, timeOut = 10 * TIMEOUT, retryAnalyzer = RetryAnalyzer.class) + public void sessionTokenConsistencyCollectionDeleteCreateSameName() { + CosmosClient client1 = clientBuilder().build(); + CosmosClient client2 = clientBuilder().build(); + + String dbId = CosmosDatabaseForTest.generateId(); + String collectionId = "coll"; + CosmosDatabase db = null; + try { + Database databaseDefinition = new Database(); + databaseDefinition.id(dbId); + db = createDatabase(client1, dbId); + + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + + CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef); + CosmosContainer collection = createCollection(db, collectionDefinition, new CosmosContainerRequestOptions()); + + CosmosItemProperties document = new CosmosItemProperties(); + document.id("doc"); + BridgeInternal.setProperty(document, "name", "New Document"); + BridgeInternal.setProperty(document, "mypk", "mypkValue"); + CosmosItem item = createDocument(collection, document); + CosmosItemRequestOptions options = new CosmosItemRequestOptions(); + options.partitionKey(new PartitionKey("mypkValue")); + CosmosItemResponse readDocumentResponse = item.read(options).block(); + logger.info("Client 1 READ Document Client Side Request Statistics {}", readDocumentResponse.cosmosResponseDiagnosticsString()); + logger.info("Client 1 READ Document Latency {}", readDocumentResponse.requestLatency()); + + BridgeInternal.setProperty(document, "name", "New Updated Document"); + CosmosItemResponse upsertDocumentResponse = collection.upsertItem(document).block(); + logger.info("Client 1 Upsert Document Client Side Request Statistics {}", upsertDocumentResponse.cosmosResponseDiagnosticsString()); + logger.info("Client 1 Upsert Document Latency {}", upsertDocumentResponse.requestLatency()); + + // DELETE the existing collection + deleteCollection(client2, dbId, collectionId); + // Recreate the collection with the same name but with different client + CosmosContainer collection2 = createCollection(client2, dbId, collectionDefinition); + + CosmosItemProperties newDocument = new CosmosItemProperties(); + newDocument.id("doc"); + BridgeInternal.setProperty(newDocument, "name", "New Created Document"); + BridgeInternal.setProperty(newDocument, "mypk", "mypk"); + createDocument(collection2, newDocument); + + readDocumentResponse = client1.getDatabase(dbId).getContainer(collectionId).getItem(newDocument.id(), newDocument.get("mypk")).read().block(); + logger.info("Client 2 READ Document Client Side Request Statistics {}", readDocumentResponse.cosmosResponseDiagnosticsString()); + logger.info("Client 2 READ Document Latency {}", readDocumentResponse.requestLatency()); + + CosmosItemProperties readDocument = readDocumentResponse.properties(); + + assertThat(readDocument.id().equals(newDocument.id())).isTrue(); + assertThat(readDocument.get("name").equals(newDocument.get("name"))).isTrue(); + } finally { + safeDeleteDatabase(db); + safeClose(client1); + safeClose(client2); + } + } + + @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + database = createDatabase(client, databaseId); + } + + @AfterClass(groups = { "emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteDatabase(database); + safeClose(client); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/CollectionQueryTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/CollectionQueryTest.java new file mode 100644 index 0000000000000..3e4a71a71c85e --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/CollectionQueryTest.java @@ -0,0 +1,153 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosContainerProperties; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosDatabaseForTest; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.internal.FeedResponseListValidator; +import com.azure.data.cosmos.internal.FeedResponseValidator; +import org.apache.commons.lang3.StringUtils; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CollectionQueryTest extends TestSuiteBase { + private final static int TIMEOUT = 30000; + private final String databaseId = CosmosDatabaseForTest.generateId(); + private List createdCollections = new ArrayList<>(); + private CosmosClient client; + private CosmosDatabase createdDatabase; + + @Factory(dataProvider = "clientBuilders") + public CollectionQueryTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + this.subscriberValidationTimeout = TIMEOUT; + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryCollectionsWithFilter() throws Exception { + + String filterCollectionId = createdCollections.get(0).id(); + String query = String.format("SELECT * from c where c.id = '%s'", filterCollectionId); + + FeedOptions options = new FeedOptions(); + options.maxItemCount(2); + Flux> queryObservable = createdDatabase.queryContainers(query, options); + + List expectedCollections = createdCollections.stream() + .filter(c -> StringUtils.equals(filterCollectionId, c.id()) ).collect(Collectors.toList()); + + assertThat(expectedCollections).isNotEmpty(); + + int expectedPageSize = (expectedCollections.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(expectedCollections.size()) + .exactlyContainsInAnyOrder(expectedCollections.stream().map(d -> d.read().block().properties().resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + + validateQuerySuccess(queryObservable, validator, 10000); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryAllCollections() throws Exception { + + String query = "SELECT * from c"; + + FeedOptions options = new FeedOptions(); + options.maxItemCount(2); + Flux> queryObservable = createdDatabase.queryContainers(query, options); + + List expectedCollections = createdCollections; + + assertThat(expectedCollections).isNotEmpty(); + + int expectedPageSize = (expectedCollections.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(expectedCollections.size()) + .exactlyContainsInAnyOrder(expectedCollections.stream().map(d -> d.read().block().properties().resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + + validateQuerySuccess(queryObservable, validator, 10000); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryCollections_NoResults() throws Exception { + + String query = "SELECT * from root r where r.id = '2'"; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdDatabase.queryContainers(query, options); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .containsExactly(new ArrayList<>()) + .numberOfPages(1) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(queryObservable, validator); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() throws Exception { + client = clientBuilder().build(); + createdDatabase = createDatabase(client, databaseId); + + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + + CosmosContainerProperties collection = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); + createdCollections.add(createCollection(client, databaseId, collection)); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteDatabase(createdDatabase); + safeClose(client); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/CosmosConflictTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/CosmosConflictTest.java new file mode 100644 index 0000000000000..62d6d330a21b4 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/CosmosConflictTest.java @@ -0,0 +1,98 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosConflictProperties; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.HttpConstants; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.Iterator; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CosmosConflictTest extends TestSuiteBase { + + private CosmosContainer createdCollection; + + private CosmosClient client; + + @Factory(dataProvider = "clientBuilders") + public CosmosConflictTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = {"simple"}, timeOut = TIMEOUT) + public void readConflicts_toBlocking_toIterator() { + + int requestPageSize = 3; + FeedOptions options = new FeedOptions(); + options.maxItemCount(requestPageSize); + + Flux> conflictReadFeedFlux = createdCollection.readAllConflicts(options); + + Iterator> it = conflictReadFeedFlux.toIterable().iterator(); + + int expectedNumberOfConflicts = 0; + + int numberOfResults = 0; + while (it.hasNext()) { + FeedResponse page = it.next(); + String pageSizeAsString = page.responseHeaders().get(HttpConstants.HttpHeaders.ITEM_COUNT); + assertThat(pageSizeAsString).isNotNull(); + // assertThat("header item count must be present", pageSizeAsString, notNullValue()); + int pageSize = Integer.valueOf(pageSizeAsString); + // Assert that Result size must match header item count + assertThat(page.results().size()).isEqualTo(pageSize); + numberOfResults += pageSize; + } + assertThat(numberOfResults).isEqualTo(expectedNumberOfConflicts); + } + + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdCollection = getSharedMultiPartitionCosmosContainer(client); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + @BeforeMethod(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeMethod() { + safeClose(client); + client = clientBuilder().build(); + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/DatabaseCrudTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/DatabaseCrudTest.java new file mode 100644 index 0000000000000..a651197fbf2b3 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/DatabaseCrudTest.java @@ -0,0 +1,145 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosDatabaseForTest; +import com.azure.data.cosmos.CosmosDatabaseProperties; +import com.azure.data.cosmos.CosmosDatabaseRequestOptions; +import com.azure.data.cosmos.CosmosDatabaseResponse; +import com.azure.data.cosmos.CosmosResponseValidator; +import com.azure.data.cosmos.internal.FailureValidator; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.util.ArrayList; +import java.util.List; + +public class DatabaseCrudTest extends TestSuiteBase { + private final String preExistingDatabaseId = CosmosDatabaseForTest.generateId(); + private final List databases = new ArrayList<>(); + private CosmosClient client; + private CosmosDatabase createdDatabase; + + @Factory(dataProvider = "clientBuilders") + public DatabaseCrudTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT) + public void createDatabase() throws Exception { + CosmosDatabaseProperties databaseDefinition = new CosmosDatabaseProperties(CosmosDatabaseForTest.generateId()); + databases.add(databaseDefinition.id()); + + // create the database + Mono createObservable = client.createDatabase(databaseDefinition, new CosmosDatabaseRequestOptions()); + + // validate + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(databaseDefinition.id()).build(); + validateSuccess(createObservable, validator); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT) + public void createDatabase_AlreadyExists() throws Exception { + CosmosDatabaseProperties databaseDefinition = new CosmosDatabaseProperties(CosmosDatabaseForTest.generateId()); + databases.add(databaseDefinition.id()); + + client.createDatabase(databaseDefinition, new CosmosDatabaseRequestOptions()).block(); + + // attempt to create the database + Mono createObservable = client.createDatabase(databaseDefinition, new CosmosDatabaseRequestOptions()); + + // validate + FailureValidator validator = new FailureValidator.Builder().resourceAlreadyExists().build(); + validateFailure(createObservable, validator); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT) + public void readDatabase() throws Exception { + // read database + Mono readObservable = client.getDatabase(preExistingDatabaseId).read(); + + // validate + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(preExistingDatabaseId).build(); + validateSuccess(readObservable, validator); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT) + public void readDatabase_DoesntExist() throws Exception { + // read database + Mono readObservable = client.getDatabase("I don't exist").read(); + + // validate + FailureValidator validator = new FailureValidator.Builder().resourceNotFound().build(); + validateFailure(readObservable, validator); + } + + + @Test(groups = { "emulator" }, timeOut = TIMEOUT) + public void deleteDatabase() throws Exception { + // create the database + CosmosDatabaseProperties databaseDefinition = new CosmosDatabaseProperties(CosmosDatabaseForTest.generateId()); + databases.add(databaseDefinition.id()); + CosmosDatabase database = client.createDatabase(databaseDefinition, new CosmosDatabaseRequestOptions()).block().database(); + + // delete the database + Mono deleteObservable = database.delete(); + + // validate + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .nullResource().build(); + validateSuccess(deleteObservable, validator); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT) + public void deleteDatabase_DoesntExist() throws Exception { + // delete the database + Mono deleteObservable = client.getDatabase("I don't exist").delete(); + + // validate + FailureValidator validator = new FailureValidator.Builder().resourceNotFound().build(); + validateFailure(deleteObservable, validator); + } + + @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdDatabase = createDatabase(client, preExistingDatabaseId); + } + + @AfterClass(groups = { "emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteDatabase(createdDatabase); + for(String dbId: databases) { + safeDeleteDatabase(client.getDatabase(dbId)); + } + safeClose(client); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/DatabaseQueryTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/DatabaseQueryTest.java new file mode 100644 index 0000000000000..7e5d94953edf4 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/DatabaseQueryTest.java @@ -0,0 +1,146 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosDatabaseForTest; +import com.azure.data.cosmos.CosmosDatabaseProperties; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.FeedResponseListValidator; +import com.azure.data.cosmos.internal.FeedResponseValidator; +import org.apache.commons.lang3.StringUtils; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; + +public class DatabaseQueryTest extends TestSuiteBase { + + public final String databaseId1 = CosmosDatabaseForTest.generateId(); + public final String databaseId2 = CosmosDatabaseForTest.generateId(); + + private List createdDatabases = new ArrayList<>(); + + private CosmosClient client; + + @Factory(dataProvider = "clientBuilders") + public DatabaseQueryTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryDatabaseWithFilter() throws Exception { + String query = String.format("SELECT * from c where c.id = '%s'", databaseId1); + + FeedOptions options = new FeedOptions(); + options.maxItemCount(2); + Flux> queryObservable = client.queryDatabases(query, options); + + List expectedDatabases = createdDatabases.stream() + .filter(d -> StringUtils.equals(databaseId1, d.id()) ).map(d -> d.read().block().properties()).collect(Collectors.toList()); + + assertThat(expectedDatabases).isNotEmpty(); + + int expectedPageSize = (expectedDatabases.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(expectedDatabases.size()) + .exactlyContainsInAnyOrder(expectedDatabases.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + + validateQuerySuccess(queryObservable, validator, 10000); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryAllDatabase() throws Exception { + + String query = String.format("SELECT * from c where c.id in ('%s', '%s')", + databaseId1, + databaseId2); + + FeedOptions options = new FeedOptions(); + options.maxItemCount(2); + Flux> queryObservable = client.queryDatabases(query, options); + + List expectedDatabases = createdDatabases.stream().map(d -> d.read().block().properties()).collect(Collectors.toList()); + + assertThat(expectedDatabases).isNotEmpty(); + + int expectedPageSize = (expectedDatabases.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(expectedDatabases.size()) + .exactlyContainsInAnyOrder(expectedDatabases.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + + validateQuerySuccess(queryObservable, validator, 10000); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryDatabases_NoResults() throws Exception { + + String query = "SELECT * from root r where r.id = '2'"; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = client.queryDatabases(query, options); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .containsExactly(new ArrayList<>()) + .numberOfPages(1) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(queryObservable, validator); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() throws Exception { + client = clientBuilder().build(); + createdDatabases.add(createDatabase(client, databaseId1)); + createdDatabases.add(createDatabase(client, databaseId2)); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteDatabase(createdDatabases.get(0)); + safeDeleteDatabase(createdDatabases.get(1)); + + safeClose(client); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/DocumentClientResourceLeakTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/DocumentClientResourceLeakTest.java new file mode 100644 index 0000000000000..5ff9d51ee28cc --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/DocumentClientResourceLeakTest.java @@ -0,0 +1,104 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosItemProperties; +import com.google.common.base.Strings; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; + +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +import static org.apache.commons.io.FileUtils.ONE_MB; +import static org.assertj.core.api.Assertions.assertThat; + +public class DocumentClientResourceLeakTest extends TestSuiteBase { + + private static final int TIMEOUT = 2400000; + private static final int MAX_NUMBER = 1000; + + private CosmosDatabase createdDatabase; + private CosmosContainer createdCollection; + + @Factory(dataProvider = "simpleClientBuildersWithDirect") + public DocumentClientResourceLeakTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(enabled = false, groups = {"emulator"}, timeOut = TIMEOUT) + public void resourceLeak() throws Exception { + + System.gc(); + TimeUnit.SECONDS.sleep(10); + long usedMemoryInBytesBefore = (Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()); + + for (int i = 0; i < MAX_NUMBER; i++) { + logger.info("CLIENT {}", i); + CosmosClient client = this.clientBuilder().build(); + try { + logger.info("creating document"); + createDocument(client.getDatabase(createdDatabase.id()).getContainer(createdCollection.id()), + getDocumentDefinition()); + } finally { + logger.info("closing client"); + client.close(); + } + } + + System.gc(); + TimeUnit.SECONDS.sleep(10); + + long usedMemoryInBytesAfter = (Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()); + + logger.info("Memory delta: {} - {} = {} MB", + usedMemoryInBytesAfter / (double)ONE_MB, + usedMemoryInBytesBefore / (double)ONE_MB, + (usedMemoryInBytesAfter - usedMemoryInBytesBefore) / (double)ONE_MB); + + assertThat(usedMemoryInBytesAfter - usedMemoryInBytesBefore).isLessThan(125 * ONE_MB); + } + + @BeforeClass(groups = {"emulator"}, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + CosmosClient client = this.clientBuilder().build(); + try { + createdDatabase = getSharedCosmosDatabase(client); + createdCollection = getSharedMultiPartitionCosmosContainer(client); + } finally { + client.close(); + } + } + + private CosmosItemProperties getDocumentDefinition() { + String uuid = UUID.randomUUID().toString(); + return new CosmosItemProperties(Strings.lenientFormat( + "{\"id\":\"%s\",\"mypk\":\"%s\",\"sgmts\":[[6519456,1471916863],[2498434,1455671440]]}", uuid, uuid + )); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/DocumentCrudTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/DocumentCrudTest.java new file mode 100644 index 0000000000000..6ae3c8c730f65 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/DocumentCrudTest.java @@ -0,0 +1,365 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosItem; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.CosmosItemRequestOptions; +import com.azure.data.cosmos.CosmosItemResponse; +import com.azure.data.cosmos.CosmosResponseValidator; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.internal.FailureValidator; +import org.apache.commons.lang3.StringUtils; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.time.Duration; +import java.time.OffsetDateTime; +import java.util.UUID; +import java.util.concurrent.TimeoutException; + +import static org.apache.commons.io.FileUtils.ONE_MB; +import static org.assertj.core.api.Assertions.assertThat; + +public class DocumentCrudTest extends TestSuiteBase { + + private CosmosClient client; + private CosmosContainer container; + + @Factory(dataProvider = "clientBuildersWithDirect") + public DocumentCrudTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @DataProvider(name = "documentCrudArgProvider") + public Object[][] documentCrudArgProvider() { + return new Object[][] { + // collection name, is name base + { UUID.randomUUID().toString() }, + // with special characters in the name. + { "+ -_,:.|~" + UUID.randomUUID().toString() + " +-_,:.|~" }, + }; + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "documentCrudArgProvider") + public void createDocument(String documentId) throws InterruptedException { + + CosmosItemProperties properties = getDocumentDefinition(documentId); + Mono createObservable = container.createItem(properties, new CosmosItemRequestOptions()); + + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(properties.id()) + .build(); + + validateSuccess(createObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "documentCrudArgProvider") + public void createLargeDocument(String documentId) throws InterruptedException { + CosmosItemProperties docDefinition = getDocumentDefinition(documentId); + + //Keep size as ~ 1.5MB to account for size of other props + int size = (int) (ONE_MB * 1.5); + BridgeInternal.setProperty(docDefinition, "largeString", StringUtils.repeat("x", size)); + + Mono createObservable = container.createItem(docDefinition, new CosmosItemRequestOptions()); + + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(docDefinition.id()) + .build(); + + validateSuccess(createObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "documentCrudArgProvider") + public void createDocumentWithVeryLargePartitionKey(String documentId) throws InterruptedException { + CosmosItemProperties docDefinition = getDocumentDefinition(documentId); + StringBuilder sb = new StringBuilder(); + for(int i = 0; i < 100; i++) { + sb.append(i).append("x"); + } + BridgeInternal.setProperty(docDefinition, "mypk", sb.toString()); + + Mono createObservable = container.createItem(docDefinition, new CosmosItemRequestOptions()); + + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(docDefinition.id()) + .withProperty("mypk", sb.toString()) + .build(); + validateSuccess(createObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "documentCrudArgProvider") + public void readDocumentWithVeryLargePartitionKey(String documentId) throws InterruptedException { + CosmosItemProperties docDefinition = getDocumentDefinition(documentId); + StringBuilder sb = new StringBuilder(); + for(int i = 0; i < 100; i++) { + sb.append(i).append("x"); + } + BridgeInternal.setProperty(docDefinition, "mypk", sb.toString()); + + CosmosItem createdDocument = TestSuiteBase.createDocument(container, docDefinition); + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + + CosmosItemRequestOptions options = new CosmosItemRequestOptions(); + options.partitionKey(new PartitionKey(sb.toString())); + Mono readObservable = createdDocument.read(options); + + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(docDefinition.id()) + .withProperty("mypk", sb.toString()) + .build(); + validateSuccess(readObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "documentCrudArgProvider") + public void createDocument_AlreadyExists(String documentId) throws InterruptedException { + CosmosItemProperties docDefinition = getDocumentDefinition(documentId); + container.createItem(docDefinition, new CosmosItemRequestOptions()).block(); + Mono createObservable = container.createItem(docDefinition, new CosmosItemRequestOptions()); + FailureValidator validator = new FailureValidator.Builder().resourceAlreadyExists().build(); + validateFailure(createObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "documentCrudArgProvider") + public void createDocumentTimeout(String documentId) throws InterruptedException { + CosmosItemProperties docDefinition = getDocumentDefinition(documentId); + Mono createObservable = container.createItem(docDefinition, new CosmosItemRequestOptions()).timeout(Duration.ofMillis(1)); + FailureValidator validator = new FailureValidator.Builder().instanceOf(TimeoutException.class).build(); + validateFailure(createObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "documentCrudArgProvider") + public void readDocument(String documentId) throws InterruptedException { + + CosmosItemProperties docDefinition = getDocumentDefinition(documentId); + CosmosItem document = container.createItem(docDefinition, new CosmosItemRequestOptions()).block().item(); + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + + CosmosItemRequestOptions options = new CosmosItemRequestOptions(); + options.partitionKey(new PartitionKey(docDefinition.get("mypk"))); + Mono readObservable = document.read(options); + + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(document.id()) + .build(); + + validateSuccess(readObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "documentCrudArgProvider") + public void timestamp(String documentId) throws Exception { + OffsetDateTime before = OffsetDateTime.now(); + CosmosItemProperties docDefinition = getDocumentDefinition(documentId); + Thread.sleep(1000); + CosmosItem document = container.createItem(docDefinition, new CosmosItemRequestOptions()).block().item(); + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + + CosmosItemRequestOptions options = new CosmosItemRequestOptions(); + options.partitionKey(new PartitionKey(docDefinition.get("mypk"))); + CosmosItemProperties readDocument = document.read(options).block().properties(); + Thread.sleep(1000); + OffsetDateTime after = OffsetDateTime.now(); + + assertThat(readDocument.timestamp()).isAfterOrEqualTo(before); + assertThat(readDocument.timestamp()).isBeforeOrEqualTo(after); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "documentCrudArgProvider") + public void readDocument_DoesntExist(String documentId) throws InterruptedException { + CosmosItemProperties docDefinition = getDocumentDefinition(documentId); + + CosmosItem document = container.createItem(docDefinition, new CosmosItemRequestOptions()).block().item(); + + CosmosItemRequestOptions options = new CosmosItemRequestOptions(); + options.partitionKey(new PartitionKey(docDefinition.get("mypk"))); + document.delete(options).block(); + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + + options.partitionKey(new PartitionKey("looloo")); + Mono readObservable = document.read(options); + + FailureValidator validator = new FailureValidator.Builder().instanceOf(CosmosClientException.class) + .statusCode(404).build(); + validateFailure(readObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "documentCrudArgProvider") + public void deleteDocument(String documentId) throws InterruptedException { + CosmosItemProperties docDefinition = getDocumentDefinition(documentId); + + CosmosItem document = container.createItem(docDefinition, new CosmosItemRequestOptions()).block().item(); + + CosmosItemRequestOptions options = new CosmosItemRequestOptions(); + options.partitionKey(new PartitionKey(docDefinition.get("mypk"))); + Mono deleteObservable = document.delete(options); + + + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .nullResource().build(); + validateSuccess(deleteObservable, validator); + + // attempt to read document which was deleted + waitIfNeededForReplicasToCatchUp(clientBuilder()); + + Mono readObservable = document.read(options); + FailureValidator notFoundValidator = new FailureValidator.Builder().resourceNotFound().build(); + validateFailure(readObservable, notFoundValidator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "documentCrudArgProvider") + public void deleteDocument_undefinedPK(String documentId) throws InterruptedException { + CosmosItemProperties docDefinition = new CosmosItemProperties(); + docDefinition.id(documentId); + + CosmosItem document = container.createItem(docDefinition, new CosmosItemRequestOptions()).block().item(); + + CosmosItemRequestOptions options = new CosmosItemRequestOptions(); + options.partitionKey(PartitionKey.None); + Mono deleteObservable = document.delete(options); + + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .nullResource().build(); + validateSuccess(deleteObservable, validator); + + // attempt to read document which was deleted + waitIfNeededForReplicasToCatchUp(clientBuilder()); + + Mono readObservable = document.read(options); + FailureValidator notFoundValidator = new FailureValidator.Builder().resourceNotFound().build(); + validateFailure(readObservable, notFoundValidator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "documentCrudArgProvider") + public void deleteDocument_DoesntExist(String documentId) throws InterruptedException { + CosmosItemProperties docDefinition = getDocumentDefinition(documentId); + + CosmosItem document = container.createItem(docDefinition, new CosmosItemRequestOptions()).block().item(); + + CosmosItemRequestOptions options = new CosmosItemRequestOptions(); + options.partitionKey(new PartitionKey(docDefinition.get("mypk"))); + document.delete(options).block(); + + // delete again + Mono deleteObservable = document.delete(options); + + FailureValidator validator = new FailureValidator.Builder().resourceNotFound().build(); + validateFailure(deleteObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "documentCrudArgProvider") + public void replaceDocument(String documentId) throws InterruptedException { + // create a document + CosmosItemProperties docDefinition = getDocumentDefinition(documentId); + + CosmosItem document = container.createItem(docDefinition, new CosmosItemRequestOptions()).block().item(); + + String newPropValue = UUID.randomUUID().toString(); + BridgeInternal.setProperty(docDefinition, "newProp", newPropValue); + + CosmosItemRequestOptions options = new CosmosItemRequestOptions(); + options.partitionKey(new PartitionKey(docDefinition.get("mypk"))); + // replace document + Mono replaceObservable = document.replace(docDefinition, options); + + // validate + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withProperty("newProp", newPropValue).build(); + validateSuccess(replaceObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "documentCrudArgProvider") + public void upsertDocument_CreateDocument(String documentId) throws Throwable { + // create a document + CosmosItemProperties docDefinition = getDocumentDefinition(documentId); + + + // replace document + Mono upsertObservable = container.upsertItem(docDefinition, new CosmosItemRequestOptions()); + + // validate + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(docDefinition.id()).build(); + + validateSuccess(upsertObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "documentCrudArgProvider") + public void upsertDocument_ReplaceDocument(String documentId) throws Throwable { + + CosmosItemProperties properties = getDocumentDefinition(documentId); + properties = container.createItem(properties, new CosmosItemRequestOptions()).block().properties(); + + String newPropValue = UUID.randomUUID().toString(); + BridgeInternal.setProperty(properties, "newProp", newPropValue); + + // Replace document + + Mono readObservable = container.upsertItem(properties, new CosmosItemRequestOptions()); + System.out.println(properties); + + // Validate result + + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withProperty("newProp", newPropValue).build(); + + validateSuccess(readObservable, validator); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + assertThat(this.client).isNull(); + this.client = this.clientBuilder().build(); + this.container = getSharedMultiPartitionCosmosContainer(this.client); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + assertThat(this.client).isNotNull(); + this.client.close(); + } + + private CosmosItemProperties getDocumentDefinition(String documentId) { + final String uuid = UUID.randomUUID().toString(); + final CosmosItemProperties properties = new CosmosItemProperties(String.format("{ " + + "\"id\": \"%s\", " + + "\"mypk\": \"%s\", " + + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + + "}" + , documentId, uuid)); + return properties; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/LogLevelTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/LogLevelTest.java new file mode 100644 index 0000000000000..290526847e061 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/LogLevelTest.java @@ -0,0 +1,295 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.CosmosItemRequestOptions; +import com.azure.data.cosmos.CosmosItemResponse; +import com.azure.data.cosmos.CosmosResponseValidator; +import org.apache.log4j.Level; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; +import org.apache.log4j.PatternLayout; +import org.apache.log4j.PropertyConfigurator; +import org.apache.log4j.WriterAppender; +import org.testng.annotations.AfterClass; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.io.StringWriter; +import java.lang.reflect.Method; +import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; + +public class LogLevelTest extends TestSuiteBase { + public final static String COSMOS_DB_LOGGING_CATEGORY = "com.azure.data.cosmos"; + public final static String NETWORK_LOGGING_CATEGORY = "com.azure.data.cosmos.netty-network"; + public final static String LOG_PATTERN_1 = "HTTP/1.1 200 Ok."; + public final static String LOG_PATTERN_2 = "| 0 1 2 3 4 5 6 7 8 9 a b c d e f |"; + public final static String LOG_PATTERN_3 = "USER_EVENT: SslHandshakeCompletionEvent(SUCCESS)"; + public final static String LOG_PATTERN_4 = "CONNECT: "; + + private static CosmosContainer createdCollection; + private static CosmosClient client; + + public LogLevelTest() { + super(createGatewayRxDocumentClient()); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdCollection = getSharedMultiPartitionCosmosContainer(client); + } + + /** + * This test will try to create document with netty wire DEBUG logging and + * validate it. + * + * @throws Exception + */ + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void createDocumentWithDebugLevel() throws Exception { + LogManager.getLogger(NETWORK_LOGGING_CATEGORY).setLevel(Level.DEBUG); + StringWriter consoleWriter = new StringWriter(); + WriterAppender appender = new WriterAppender(new PatternLayout(), consoleWriter); + LogManager.getLogger(NETWORK_LOGGING_CATEGORY).addAppender(appender); + + CosmosClient client = clientBuilder().build(); + try { + CosmosItemProperties docDefinition = getDocumentDefinition(); + Mono createObservable = createdCollection.createItem(docDefinition, + new CosmosItemRequestOptions()); + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(docDefinition.id()).build(); + validateSuccess(createObservable, validator); + + assertThat(consoleWriter.toString()).isEmpty(); + + } finally { + safeClose(client); + } + } + + /** + * This test will try to create document with netty wire WARN logging and + * validate it. + * + * @throws Exception + */ + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void createDocumentWithWarningLevel() throws Exception { + LogManager.getRootLogger().setLevel(Level.INFO); + LogManager.getLogger(NETWORK_LOGGING_CATEGORY).setLevel(Level.WARN); + StringWriter consoleWriter = new StringWriter(); + WriterAppender appender = new WriterAppender(new PatternLayout(), consoleWriter); + Logger.getLogger(NETWORK_LOGGING_CATEGORY).addAppender(appender); + + CosmosClient client = clientBuilder().build(); + try { + CosmosItemProperties docDefinition = getDocumentDefinition(); + Mono createObservable = createdCollection.createItem(docDefinition, + new CosmosItemRequestOptions()); + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(docDefinition.id()).build(); + validateSuccess(createObservable, validator); + + assertThat(consoleWriter.toString()).isEmpty(); + } finally { + safeClose(client); + } + } + + /** + * This test will try to create document with netty wire TRACE logging and + * validate it. + * + * @throws Exception + */ + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void createDocumentWithTraceLevel() throws Exception { + LogManager.getRootLogger().setLevel(Level.INFO); + LogManager.getLogger(NETWORK_LOGGING_CATEGORY).setLevel(Level.TRACE); + StringWriter consoleWriter = new StringWriter(); + WriterAppender appender = new WriterAppender(new PatternLayout(), consoleWriter); + Logger.getLogger(NETWORK_LOGGING_CATEGORY).addAppender(appender); + + CosmosClient client = clientBuilder().build(); + try { + CosmosItemProperties docDefinition = getDocumentDefinition(); + Mono createObservable = createdCollection.createItem(docDefinition, + new CosmosItemRequestOptions()); + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(docDefinition.id()).build(); + validateSuccess(createObservable, validator); + + assertThat(consoleWriter.toString()).contains(LOG_PATTERN_1); + assertThat(consoleWriter.toString()).contains(LOG_PATTERN_2); + assertThat(consoleWriter.toString()).contains(LOG_PATTERN_3); + assertThat(consoleWriter.toString()).contains(LOG_PATTERN_4); + + } finally { + safeClose(client); + } + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void createDocumentWithTraceLevelAtRoot() throws Exception { + LogManager.getRootLogger().setLevel(Level.INFO); + LogManager.getLogger(COSMOS_DB_LOGGING_CATEGORY).setLevel(Level.TRACE); + StringWriter consoleWriter = new StringWriter(); + WriterAppender appender = new WriterAppender(new PatternLayout(), consoleWriter); + Logger.getLogger(NETWORK_LOGGING_CATEGORY).addAppender(appender); + + CosmosClient client = clientBuilder().build(); + try { + CosmosItemProperties docDefinition = getDocumentDefinition(); + Mono createObservable = createdCollection.createItem(docDefinition, + new CosmosItemRequestOptions()); + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(docDefinition.id()).build(); + validateSuccess(createObservable, validator); + + assertThat(consoleWriter.toString()).contains(LOG_PATTERN_1); + assertThat(consoleWriter.toString()).contains(LOG_PATTERN_2); + assertThat(consoleWriter.toString()).contains(LOG_PATTERN_3); + assertThat(consoleWriter.toString()).contains(LOG_PATTERN_4); + } finally { + safeClose(client); + } + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void createDocumentWithDebugLevelAtRoot() throws Exception { + LogManager.getRootLogger().setLevel(Level.INFO); + LogManager.getLogger(COSMOS_DB_LOGGING_CATEGORY).setLevel(Level.DEBUG); + StringWriter consoleWriter = new StringWriter(); + WriterAppender appender = new WriterAppender(new PatternLayout(), consoleWriter); + Logger.getLogger(NETWORK_LOGGING_CATEGORY).addAppender(appender); + + CosmosClient client = clientBuilder().build(); + try { + CosmosItemProperties docDefinition = getDocumentDefinition(); + Mono createObservable = createdCollection.createItem(docDefinition, + new CosmosItemRequestOptions()); + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(docDefinition.id()).build(); + validateSuccess(createObservable, validator); + + assertThat(consoleWriter.toString()).isEmpty(); + } finally { + safeClose(client); + } + } + + /** + * This test will try to create document with netty wire ERROR logging and + * validate it. + * + * @throws Exception + */ + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void createDocumentWithErrorClient() throws Exception { + LogManager.getRootLogger().setLevel(Level.INFO); + LogManager.getLogger(NETWORK_LOGGING_CATEGORY).setLevel(Level.ERROR); + StringWriter consoleWriter = new StringWriter(); + WriterAppender appender = new WriterAppender(new PatternLayout(), consoleWriter); + Logger.getLogger(NETWORK_LOGGING_CATEGORY).addAppender(appender); + + CosmosClient client = clientBuilder().build(); + try { + CosmosItemProperties docDefinition = getDocumentDefinition(); + Mono createObservable = createdCollection.createItem(docDefinition, + new CosmosItemRequestOptions()); + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(docDefinition.id()).build(); + validateSuccess(createObservable, validator); + + assertThat(consoleWriter.toString()).isEmpty(); + } finally { + safeClose(client); + } + } + + /** + * This test will try to create document with netty wire INFO logging and + * validate it. + * + * @throws Exception + */ + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void createDocumentWithInfoLevel() throws Exception { + LogManager.getRootLogger().setLevel(Level.INFO); + LogManager.getLogger(NETWORK_LOGGING_CATEGORY).setLevel(Level.INFO); + StringWriter consoleWriter = new StringWriter(); + WriterAppender appender = new WriterAppender(new PatternLayout(), consoleWriter); + Logger.getLogger(NETWORK_LOGGING_CATEGORY).addAppender(appender); + + CosmosClient client = clientBuilder().build(); + try { + CosmosItemProperties docDefinition = getDocumentDefinition(); + Mono createObservable = createdCollection.createItem(docDefinition, + new CosmosItemRequestOptions()); + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(docDefinition.id()).build(); + validateSuccess(createObservable, validator); + + assertThat(consoleWriter.toString()).isEmpty(); + } finally { + safeClose(client); + } + } + + private CosmosItemProperties getDocumentDefinition() { + String uuid = UUID.randomUUID().toString(); + CosmosItemProperties doc = new CosmosItemProperties( + String.format("{ " + "\"id\": \"%s\", " + "\"mypk\": \"%s\", " + + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}", uuid, uuid)); + return doc; + } + + @BeforeMethod(groups = { "simple" }) + public void beforeMethod(Method method) { + LogManager.resetConfiguration(); + PropertyConfigurator.configure(this.getClass().getClassLoader().getResource("log4j.properties")); + } + + @AfterMethod(groups = { "simple" }) + public void afterMethod() { + LogManager.resetConfiguration(); + PropertyConfigurator.configure(this.getClass().getClassLoader().getResource("log4j.properties")); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT) + public void afterClass() { + LogManager.resetConfiguration(); + PropertyConfigurator.configure(this.getClass().getClassLoader().getResource("log4j.properties")); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/MultiMasterConflictResolutionTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/MultiMasterConflictResolutionTest.java new file mode 100644 index 0000000000000..cf3879466ee7c --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/MultiMasterConflictResolutionTest.java @@ -0,0 +1,216 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.BridgeUtils; +import com.azure.data.cosmos.ConflictResolutionMode; +import com.azure.data.cosmos.ConflictResolutionPolicy; +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosContainerProperties; +import com.azure.data.cosmos.CosmosContainerRequestOptions; +import com.azure.data.cosmos.CosmosContainerResponse; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosDatabaseForTest; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.internal.FailureValidator; +import com.azure.data.cosmos.internal.TestUtils; +import com.azure.data.cosmos.internal.Utils; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.util.ArrayList; +import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +// assumes multi master is enabled in endpoint +public class MultiMasterConflictResolutionTest extends TestSuiteBase { + private static final int TIMEOUT = 40000; + + private final String databaseId = CosmosDatabaseForTest.generateId(); + + private PartitionKeyDefinition partitionKeyDef; + private CosmosClient client; + private CosmosDatabase database; + + @Factory(dataProvider = "clientBuilders") + public MultiMasterConflictResolutionTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = "multi-master", timeOut = 10 * TIMEOUT) + public void conflictResolutionPolicyCRUD() { + + // default last writer wins, path _ts + CosmosContainerProperties collectionSettings = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); + CosmosContainer collection = database.createContainer(collectionSettings, new CosmosContainerRequestOptions()).block().container(); + collectionSettings = collection.read().block().properties(); + + assertThat(collectionSettings.conflictResolutionPolicy().mode()).isEqualTo(ConflictResolutionMode.LAST_WRITER_WINS); + + // LWW without path specified, should default to _ts + collectionSettings.conflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy()); + collectionSettings = collection.replace(collectionSettings, null).block().properties(); + + assertThat(collectionSettings.conflictResolutionPolicy().mode()).isEqualTo(ConflictResolutionMode.LAST_WRITER_WINS); + assertThat(collectionSettings.conflictResolutionPolicy().conflictResolutionPath()).isEqualTo("/_ts"); + + // Tests the following scenarios + // 1. LWW with valid path + // 2. LWW with null path, should default to _ts + // 3. LWW with empty path, should default to _ts + testConflictResolutionPolicyRequiringPath(ConflictResolutionMode.LAST_WRITER_WINS, + new String[] { "/a", null, "" }, new String[] { "/a", "/_ts", "/_ts" }); + + // LWW invalid path + collectionSettings.conflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy("/a/b")); + + try { + collectionSettings = collection.replace(collectionSettings, null).block().properties(); + fail("Expected exception on invalid path."); + } catch (Exception e) { + + // when (e.StatusCode == HttpStatusCode.BadRequest) + CosmosClientException dce = Utils.as(e.getCause(), CosmosClientException.class); + if (dce != null && dce.statusCode() == 400) { + assertThat(dce.getMessage()).contains("Invalid path '\\/a\\/b' for last writer wins conflict resolution"); + } else { + throw e; + } + } + + // LWW invalid path + + collectionSettings.conflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy("someText")); + + try { + collectionSettings = collection.replace(collectionSettings, null).block().properties(); + fail("Expected exception on invalid path."); + } catch (Exception e) { + // when (e.StatusCode == HttpStatusCode.BadRequest) + CosmosClientException dce = Utils.as(e.getCause(), CosmosClientException.class); + if (dce != null && dce.statusCode() == 400) { + assertThat(dce.getMessage()).contains("Invalid path 'someText' for last writer wins conflict resolution"); + } else { + throw e; + } + } + + // Tests the following scenarios + // 1. CUSTOM with valid sprocLink + // 2. CUSTOM with null sprocLink, should default to empty string + // 3. CUSTOM with empty sprocLink, should default to empty string + testConflictResolutionPolicyRequiringPath(ConflictResolutionMode.CUSTOM, + new String[] { "randomSprocName", null, "" }, new String[] { "randomSprocName", "", "" }); + } + + private void testConflictResolutionPolicyRequiringPath(ConflictResolutionMode conflictResolutionMode, + String[] paths, String[] expectedPaths) { + for (int i = 0; i < paths.length; i++) { + CosmosContainerProperties collectionSettings = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); + + if (conflictResolutionMode == ConflictResolutionMode.LAST_WRITER_WINS) { + collectionSettings.conflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy(paths[i])); + } else { + collectionSettings.conflictResolutionPolicy(ConflictResolutionPolicy.createCustomPolicy(paths[i])); + } + collectionSettings = database.createContainer(collectionSettings, new CosmosContainerRequestOptions()).block().properties(); + assertThat(collectionSettings.conflictResolutionPolicy().mode()).isEqualTo(conflictResolutionMode); + + if (conflictResolutionMode == ConflictResolutionMode.LAST_WRITER_WINS) { + assertThat(collectionSettings.conflictResolutionPolicy().conflictResolutionPath()).isEqualTo(expectedPaths[i]); + } else { + assertThat(collectionSettings.conflictResolutionPolicy().conflictResolutionProcedure()).isEqualTo(expectedPaths[i]); + } + } + } + + @Test(groups = "multi-master", timeOut = TIMEOUT) + public void invalidConflictResolutionPolicy_LastWriterWinsWithStoredProc() throws Exception { + CosmosContainerProperties collection = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); + + // LWW without path specified, should default to _ts + ConflictResolutionPolicy policy = BridgeUtils.createConflictResolutionPolicy(); + BridgeUtils.setMode(policy, ConflictResolutionMode.LAST_WRITER_WINS); + BridgeUtils.setStoredProc(policy,"randomSprocName"); + collection.conflictResolutionPolicy(policy); + + Mono createObservable = database.createContainer( + collection, + new CosmosContainerRequestOptions()); + + FailureValidator validator = new FailureValidator.Builder() + .instanceOf(CosmosClientException.class) + .statusCode(400) + .errorMessageContains("LastWriterWins conflict resolution mode should not have conflict resolution procedure set.") + .build(); + validateFailure(createObservable, validator); + } + + @Test(groups = "multi-master", timeOut = TIMEOUT) + public void invalidConflictResolutionPolicy_CustomWithPath() throws Exception { + CosmosContainerProperties collection = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); + + // LWW without path specified, should default to _ts + ConflictResolutionPolicy policy = BridgeUtils.createConflictResolutionPolicy(); + BridgeUtils.setMode(policy, ConflictResolutionMode.CUSTOM); + BridgeUtils.setPath(policy,"/mypath"); + collection.conflictResolutionPolicy(policy); + + Mono createObservable = database.createContainer( + collection, + new CosmosContainerRequestOptions()); + + FailureValidator validator = new FailureValidator.Builder() + .instanceOf(CosmosClientException.class) + .statusCode(400) + .errorMessageContains("Custom conflict resolution mode should not have conflict resolution path set.") + .build(); + validateFailure(createObservable, validator); + } + + @BeforeClass(groups = {"multi-master"}, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + // set up the client + + client = clientBuilder().build(); + database = createDatabase(client, databaseId); + partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + } + + @AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteDatabase(database); + safeClose(client); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/MultiOrderByQueryTests.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/MultiOrderByQueryTests.java new file mode 100644 index 0000000000000..7aecad17ddea4 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/MultiOrderByQueryTests.java @@ -0,0 +1,344 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CompositePath; +import com.azure.data.cosmos.CompositePathSortOrder; +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosContainerProperties; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.CosmosItemRequestOptions; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.FailureValidator; +import com.azure.data.cosmos.internal.FeedResponseListValidator; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.commons.collections4.ComparatorUtils; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.Random; +import java.util.UUID; + +public class MultiOrderByQueryTests extends TestSuiteBase { + + private static final int TIMEOUT = 35000; + private static final String NUMBER_FIELD = "numberField"; + private static final String STRING_FIELD = "stringField"; + private static final String NUMBER_FIELD_2 = "numberField2"; + private static final String STRING_FIELD_2 = "stringField2"; + private static final String BOOL_FIELD = "boolField"; + private static final String NULL_FIELD = "nullField"; + private static final String OBJECT_FIELD = "objectField"; + private static final String ARRAY_FIELD = "arrayField"; + private static final String SHORT_STRING_FIELD = "shortStringField"; + private static final String MEDIUM_STRING_FIELD = "mediumStringField"; + private static final String LONG_STRING_FIELD = "longStringField"; + private static final String PARTITION_KEY = "pk"; + private List documents = new ArrayList(); + private CosmosContainer documentCollection; + private CosmosClient client; + + class CustomComparator implements Comparator { + String path; + CompositePathSortOrder order; + boolean isNumericPath = false; + boolean isStringPath = false; + boolean isBooleanPath = false; + boolean isNullPath = false; + + public CustomComparator(String path, CompositePathSortOrder order) { + this.path = path; + this.order = order; + if (this.path.contains("number")) { + isNumericPath = true; + } else if (this.path.toLowerCase().contains("string")) { + isStringPath = true; + } else if (this.path.contains("bool")) { + isBooleanPath = true; + } else if (this.path.contains("null")) { + isNullPath = true; + } + } + + @Override + public int compare(CosmosItemProperties doc1, CosmosItemProperties doc2) { + boolean isAsc = order == CompositePathSortOrder.ASCENDING; + if (isNumericPath) { + if (doc1.getInt(path) < doc2.getInt(path)) + return isAsc ? -1 : 1; + else if (doc1.getInt(path) > doc2.getInt(path)) + return isAsc ? 1 : -1; + else + return 0; + } else if (isStringPath) { + if (!isAsc) { + CosmosItemProperties temp = doc1; + doc1 = doc2; + doc2 = temp; + } + return doc1.getString(path).compareTo(doc2.getString(path)); + } else if (isBooleanPath) { + if (doc1.getBoolean(path) == false && doc2.getBoolean(path) == true) + return isAsc ? -1 : 1; + else if (doc1.getBoolean(path) == true && doc2.getBoolean(path) == false) + return isAsc ? 1 : -1; + else + return 0; + } else if (isNullPath) { + // all nulls are equal + return 0; + } else { + throw new IllegalStateException("data type not handled by comparator!"); + } + } + } + + @Factory(dataProvider = "clientBuilders") + public MultiOrderByQueryTests(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() throws Exception { + client = clientBuilder().build(); + documentCollection = getSharedMultiPartitionCosmosContainerWithCompositeAndSpatialIndexes(client); + truncateCollection(documentCollection); + + int numberOfDocuments = 4; + + Random random = new Random(); + for (int i = 0; i < numberOfDocuments; ++i) { + CosmosItemProperties multiOrderByDocument = generateMultiOrderByDocument(); + String multiOrderByDocumentString = multiOrderByDocument.toJson(); + int numberOfDuplicates = 5; + + for (int j = 0; j < numberOfDuplicates; j++) { + // Add the document itself for exact duplicates + CosmosItemProperties initialDocument = new CosmosItemProperties(multiOrderByDocumentString); + initialDocument.id(UUID.randomUUID().toString()); + this.documents.add(initialDocument); + + // Permute all the fields so that there are duplicates with tie breaks + CosmosItemProperties numberClone = new CosmosItemProperties(multiOrderByDocumentString); + BridgeInternal.setProperty(numberClone, NUMBER_FIELD, random.nextInt(5)); + numberClone.id(UUID.randomUUID().toString()); + this.documents.add(numberClone); + + CosmosItemProperties stringClone = new CosmosItemProperties(multiOrderByDocumentString); + BridgeInternal.setProperty(stringClone, STRING_FIELD, Integer.toString(random.nextInt(5))); + stringClone.id(UUID.randomUUID().toString()); + this.documents.add(stringClone); + + CosmosItemProperties boolClone = new CosmosItemProperties(multiOrderByDocumentString); + BridgeInternal.setProperty(boolClone, BOOL_FIELD, random.nextInt(2) % 2 == 0); + boolClone.id(UUID.randomUUID().toString()); + this.documents.add(boolClone); + + // Also fuzz what partition it goes to + CosmosItemProperties partitionClone = new CosmosItemProperties(multiOrderByDocumentString); + BridgeInternal.setProperty(partitionClone, PARTITION_KEY, random.nextInt(5)); + partitionClone.id(UUID.randomUUID().toString()); + this.documents.add(partitionClone); + } + } + + voidBulkInsertBlocking(documentCollection, documents); + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + } + + private CosmosItemProperties generateMultiOrderByDocument() { + Random random = new Random(); + CosmosItemProperties document = new CosmosItemProperties(); + document.id(UUID.randomUUID().toString()); + BridgeInternal.setProperty(document, NUMBER_FIELD, random.nextInt(5)); + BridgeInternal.setProperty(document, NUMBER_FIELD_2, random.nextInt(5)); + BridgeInternal.setProperty(document, BOOL_FIELD, (random.nextInt() % 2) == 0); + BridgeInternal.setProperty(document, STRING_FIELD, Integer.toString(random.nextInt(5))); + BridgeInternal.setProperty(document, STRING_FIELD_2, Integer.toString(random.nextInt(5))); + BridgeInternal.setProperty(document, NULL_FIELD, null); + BridgeInternal.setProperty(document, OBJECT_FIELD, ""); + BridgeInternal.setProperty(document, ARRAY_FIELD, (new ObjectMapper()).createArrayNode()); + BridgeInternal.setProperty(document, SHORT_STRING_FIELD, "a" + random.nextInt(100)); + BridgeInternal.setProperty(document, MEDIUM_STRING_FIELD, "a" + random.nextInt(128) + 100); + BridgeInternal.setProperty(document, LONG_STRING_FIELD, "a" + random.nextInt(255) + 128); + BridgeInternal.setProperty(document, PARTITION_KEY, random.nextInt(5)); + return document; + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryDocumentsWithMultiOrder() throws CosmosClientException, InterruptedException { + FeedOptions feedOptions = new FeedOptions(); + feedOptions.enableCrossPartitionQuery(true); + + boolean[] booleanValues = new boolean[] {true, false}; + CosmosContainerProperties containerSettings = documentCollection.read().block().properties(); + Iterator> compositeIndexesIterator = containerSettings.indexingPolicy().compositeIndexes().iterator(); + while (compositeIndexesIterator.hasNext()) { + List compositeIndex = compositeIndexesIterator.next(); + // for every order + for (boolean invert : booleanValues) { + // for normal and inverted order + for (boolean hasTop : booleanValues) { + // with and without top + for (boolean hasFilter : booleanValues) { + // with and without filter + // Generate a multi order by from that index + List orderByItems = new ArrayList(); + List selectItems = new ArrayList(); + boolean isDesc; + Iterator compositeIndexiterator = compositeIndex.iterator(); + while (compositeIndexiterator.hasNext()) { + CompositePath compositePath = compositeIndexiterator.next(); + isDesc = compositePath.order() == CompositePathSortOrder.DESCENDING ? true : false; + if (invert) { + isDesc = !isDesc; + } + + String isDescString = isDesc ? "DESC" : "ASC"; + String compositePathName = compositePath.path().replaceAll("/", ""); + String orderByItemsString = "root." + compositePathName + " " + isDescString; + String selectItemsString = "root." + compositePathName; + orderByItems.add(orderByItemsString); + selectItems.add(selectItemsString); + } + + int topCount = 10; + StringBuilder selectItemStringBuilder = new StringBuilder(); + for (String selectItem: selectItems) { + selectItemStringBuilder.append(selectItem); + selectItemStringBuilder.append(","); + } + selectItemStringBuilder.deleteCharAt(selectItemStringBuilder.length() - 1); + StringBuilder orderByItemStringBuilder = new StringBuilder(); + for (String orderByItem : orderByItems) { + orderByItemStringBuilder.append(orderByItem); + orderByItemStringBuilder.append(","); + } + orderByItemStringBuilder.deleteCharAt(orderByItemStringBuilder.length() - 1); + + String topString = hasTop ? "TOP " + topCount : ""; + String whereString = hasFilter ? "WHERE root." + NUMBER_FIELD + " % 2 = 0" : ""; + String query = "SELECT " + topString + " [" + selectItemStringBuilder.toString() + "] " + + "FROM root " + whereString + " " + + "ORDER BY " + orderByItemStringBuilder.toString(); + + List expectedOrderedList = top(sort(filter(this.documents, hasFilter), compositeIndex, invert), hasTop, topCount) ; + + Flux> queryObservable = documentCollection.queryItems(query, feedOptions); + + FeedResponseListValidator validator = new FeedResponseListValidator + .Builder() + .withOrderedResults(expectedOrderedList, compositeIndex) + .build(); + + validateQuerySuccess(queryObservable, validator); + } + } + } + } + + // CREATE document with numberField not set. + // This query would then be invalid. + CosmosItemProperties documentWithEmptyField = generateMultiOrderByDocument(); + BridgeInternal.remove(documentWithEmptyField, NUMBER_FIELD); + documentCollection.createItem(documentWithEmptyField, new CosmosItemRequestOptions()).block(); + String query = "SELECT [root." + NUMBER_FIELD + ",root." + STRING_FIELD + "] FROM root ORDER BY root." + NUMBER_FIELD + " ASC ,root." + STRING_FIELD + " DESC"; + Flux> queryObservable = documentCollection.queryItems(query, feedOptions); + + FailureValidator validator = new FailureValidator.Builder() + .instanceOf(UnsupportedOperationException.class) + .build(); + + validateQueryFailure(queryObservable, validator); + } + + private List top(List arrayList, boolean hasTop, int topCount) { + List result = new ArrayList(); + int counter = 0; + if (hasTop) { + while (counter < topCount && counter < arrayList.size()) { + result.add(arrayList.get(counter)); + counter++; + } + } else { + result.addAll(arrayList); + } + return result; + } + + private List sort(List arrayList, List compositeIndex, + boolean invert) { + Collection> comparators = new ArrayList>(); + Iterator compositeIndexIterator = compositeIndex.iterator(); + while (compositeIndexIterator.hasNext()) { + CompositePath compositePath = compositeIndexIterator.next(); + CompositePathSortOrder order = compositePath.order(); + if (invert) { + if (order == CompositePathSortOrder.DESCENDING) { + order = CompositePathSortOrder.ASCENDING; + } else { + order = CompositePathSortOrder.DESCENDING; + } + } + String path = compositePath.path().replace("/", ""); + comparators.add(new CustomComparator(path, order)); + } + Collections.sort(arrayList, ComparatorUtils.chainedComparator(comparators)); + return arrayList; + } + + private List filter(List cosmosItemSettings, boolean hasFilter) { + List result = new ArrayList(); + if (hasFilter) { + for (CosmosItemProperties document : cosmosItemSettings) { + if (document.getInt(NUMBER_FIELD) % 2 == 0) { + result.add(document); + } + } + } else { + result.addAll(cosmosItemSettings); + } + return result; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/OfferQueryTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/OfferQueryTest.java new file mode 100644 index 0000000000000..131a83dfeba3c --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/OfferQueryTest.java @@ -0,0 +1,169 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.internal.AsyncDocumentClient.Builder; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.*; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.internal.TestSuiteBase; +import org.assertj.core.util.Strings; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; + +//TODO: change to use external TestSuiteBase +public class OfferQueryTest extends TestSuiteBase { + + public final static int SETUP_TIMEOUT = 40000; + public final String databaseId = DatabaseForTest.generateId(); + + private List createdCollections = new ArrayList<>(); + + private AsyncDocumentClient client; + + private String getDatabaseLink() { + return TestUtils.getDatabaseNameLink(databaseId); + } + + @Factory(dataProvider = "clientBuilders") + public OfferQueryTest(Builder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT) + public void queryOffersWithFilter() throws Exception { + String collectionResourceId = createdCollections.get(0).resourceId(); + String query = String.format("SELECT * from c where c.offerResourceId = '%s'", collectionResourceId); + + FeedOptions options = new FeedOptions(); + options.maxItemCount(2); + Flux> queryObservable = client.queryOffers(query, null); + + List allOffers = client.readOffers(null).flatMap(f -> Flux.fromIterable(f.results())).collectList().single().block(); + List expectedOffers = allOffers.stream().filter(o -> collectionResourceId.equals(o.getString("offerResourceId"))).collect(Collectors.toList()); + + assertThat(expectedOffers).isNotEmpty(); + + int expectedPageSize = (expectedOffers.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(expectedOffers.size()) + .exactlyContainsInAnyOrder(expectedOffers.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + + validateQuerySuccess(queryObservable, validator, 10000); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT * 100) + public void queryOffersFilterMorePages() throws Exception { + + List collectionResourceIds = createdCollections.stream().map(c -> c.resourceId()).collect(Collectors.toList()); + String query = String.format("SELECT * from c where c.offerResourceId in (%s)", + Strings.join(collectionResourceIds.stream().map(s -> "'" + s + "'").collect(Collectors.toList())).with(",")); + + FeedOptions options = new FeedOptions(); + options.maxItemCount(1); + Flux> queryObservable = client.queryOffers(query, options); + + List expectedOffers = client.readOffers(null).flatMap(f -> Flux.fromIterable(f.results())) + .collectList() + .single().block() + .stream().filter(o -> collectionResourceIds.contains(o.getOfferResourceId())) + .collect(Collectors.toList()); + + assertThat(expectedOffers).hasSize(createdCollections.size()); + + int expectedPageSize = (expectedOffers.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(expectedOffers.size()) + .exactlyContainsInAnyOrder(expectedOffers.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + + validateQuerySuccess(queryObservable, validator, 10000); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT) + public void queryCollections_NoResults() throws Exception { + + String query = "SELECT * from root r where r.id = '2'"; + FeedOptions options = new FeedOptions(); + Flux> queryObservable = client.queryCollections(getDatabaseLink(), query, options); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .containsExactly(new ArrayList<>()) + .numberOfPages(1) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(queryObservable, validator); + } + + @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() throws Exception { + client = clientBuilder().build(); + + Database d1 = new Database(); + d1.id(databaseId); + createDatabase(client, d1); + + for(int i = 0; i < 3; i++) { + DocumentCollection collection = new DocumentCollection(); + collection.id(UUID.randomUUID().toString()); + + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + collection.setPartitionKey(partitionKeyDef); + + createdCollections.add(createCollection(client, databaseId, collection)); + } + } + + @AfterClass(groups = { "emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteDatabase(client, databaseId); + safeClose(client); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/OfferReadReplaceTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/OfferReadReplaceTest.java new file mode 100644 index 0000000000000..f6fe31df0045b --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/OfferReadReplaceTest.java @@ -0,0 +1,112 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.*; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.internal.TestSuiteBase; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.List; + +//TODO: change to use external TestSuiteBase +public class OfferReadReplaceTest extends TestSuiteBase { + + public final String databaseId = DatabaseForTest.generateId(); + + private Database createdDatabase; + private DocumentCollection createdCollection; + + private AsyncDocumentClient client; + + @Factory(dataProvider = "clientBuilders") + public OfferReadReplaceTest(AsyncDocumentClient.Builder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT) + public void readAndReplaceOffer() { + + client.readOffers(null).subscribe((offersFeed) -> { + try { + int i; + List offers = offersFeed.results(); + for (i = 0; i < offers.size(); i++) { + if (offers.get(i).getOfferResourceId().equals(createdCollection.resourceId())) { + break; + } + } + + Offer rOffer = client.readOffer(offers.get(i).selfLink()).single().block().getResource(); + int oldThroughput = rOffer.getThroughput(); + + Flux> readObservable = client.readOffer(offers.get(i).selfLink()); + + // validate offer read + ResourceResponseValidator validatorForRead = new ResourceResponseValidator.Builder() + .withOfferThroughput(oldThroughput) + .notNullEtag() + .build(); + + validateSuccess(readObservable, validatorForRead); + + // update offer + int newThroughput = oldThroughput + 100; + offers.get(i).setThroughput(newThroughput); + Flux> replaceObservable = client.replaceOffer(offers.get(i)); + + // validate offer replace + ResourceResponseValidator validatorForReplace = new ResourceResponseValidator.Builder() + .withOfferThroughput(newThroughput) + .notNullEtag() + .build(); + + validateSuccess(replaceObservable, validatorForReplace); + + } catch (Exception e) { + e.printStackTrace(); + } + + }); + } + + @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdDatabase = createDatabase(client, databaseId); + createdCollection = createCollection(client, createdDatabase.id(), + getCollectionDefinition()); + } + + @AfterClass(groups = { "emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteDatabase(client, createdDatabase); + safeClose(client); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/OrderbyDocumentQueryTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/OrderbyDocumentQueryTest.java new file mode 100644 index 0000000000000..4dae14a588277 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/OrderbyDocumentQueryTest.java @@ -0,0 +1,571 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosBridgeInternal; +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.CosmosItemRequestOptions; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.internal.*; +import com.azure.data.cosmos.internal.Utils.ValueHolder; +import com.azure.data.cosmos.internal.query.CompositeContinuationToken; +import com.azure.data.cosmos.internal.query.OrderByContinuationToken; +import com.azure.data.cosmos.internal.query.QueryItem; +import com.azure.data.cosmos.internal.routing.Range; +import com.fasterxml.jackson.core.JsonProcessingException; +import io.reactivex.subscribers.TestSubscriber; +import org.apache.commons.lang3.StringUtils; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; + +public class OrderbyDocumentQueryTest extends TestSuiteBase { + private final double minQueryRequestChargePerPartition = 2.0; + + private CosmosClient client; + private CosmosContainer createdCollection; + private CosmosDatabase createdDatabase; + private List createdDocuments = new ArrayList<>(); + + private int numberOfPartitions; + + @Factory(dataProvider = "clientBuildersWithDirect") + public OrderbyDocumentQueryTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") + public void queryDocumentsValidateContent(boolean qmEnabled) throws Exception { + CosmosItemProperties expectedDocument = createdDocuments.get(0); + + String query = String.format("SELECT * from root r where r.propStr = '%s'" + + " ORDER BY r.propInt" + , expectedDocument.getString("propStr")); + + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + options.populateQueryMetrics(qmEnabled); + + Flux> queryObservable = createdCollection.queryItems(query, options); + + List expectedResourceIds = new ArrayList<>(); + expectedResourceIds.add(expectedDocument.resourceId()); + + Map> resourceIDToValidator = new HashMap<>(); + + resourceIDToValidator.put(expectedDocument.resourceId(), + new ResourceValidator.Builder().areEqual(expectedDocument).build()); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .numberOfPages(1) + .containsExactly(expectedResourceIds) + .validateAllResources(resourceIDToValidator) + .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) + .allPagesSatisfy(new FeedResponseValidator.Builder().hasRequestChargeHeader().build()) + .hasValidQueryMetrics(qmEnabled) + .build(); + + validateQuerySuccess(queryObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryDocuments_NoResults() throws Exception { + String query = "SELECT * from root r where r.id = '2' ORDER BY r.propInt"; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdCollection.queryItems(query, options); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .containsExactly(new ArrayList<>()) + .numberOfPages(1) + .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .hasRequestChargeHeader().build()) + .build(); + + validateQuerySuccess(queryObservable, validator); + } + + @DataProvider(name = "sortOrder") + public Object[][] sortOrder() { + return new Object[][] { { "ASC" }, {"DESC"} }; + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") + public void queryOrderBy(String sortOrder) throws Exception { + String query = String.format("SELECT * FROM r ORDER BY r.propInt %s", sortOrder); + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + int pageSize = 3; + options.maxItemCount(pageSize); + Flux> queryObservable = createdCollection.queryItems(query, options); + Comparator validatorComparator = Comparator.nullsFirst(Comparator.naturalOrder()); + + List expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator); + if ("DESC".equals(sortOrder)) { + Collections.reverse(expectedResourceIds); + } + + int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .containsExactly(expectedResourceIds) + .numberOfPages(expectedPageSize) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .hasRequestChargeHeader().build()) + .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) + .build(); + + validateQuerySuccess(queryObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryOrderByInt() throws Exception { + String query = "SELECT * FROM r ORDER BY r.propInt"; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + int pageSize = 3; + options.maxItemCount(pageSize); + Flux> queryObservable = createdCollection.queryItems(query, options); + + Comparator validatorComparator = Comparator.nullsFirst(Comparator.naturalOrder()); + List expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator); + int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .containsExactly(expectedResourceIds) + .numberOfPages(expectedPageSize) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .hasRequestChargeHeader().build()) + .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) + .build(); + + validateQuerySuccess(queryObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryOrderByString() throws Exception { + String query = "SELECT * FROM r ORDER BY r.propStr"; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + int pageSize = 3; + options.maxItemCount(pageSize); + Flux> queryObservable = createdCollection.queryItems(query, options); + + Comparator validatorComparator = Comparator.nullsFirst(Comparator.naturalOrder()); + List expectedResourceIds = sortDocumentsAndCollectResourceIds("propStr", d -> d.getString("propStr"), validatorComparator); + int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .containsExactly(expectedResourceIds) + .numberOfPages(expectedPageSize) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .hasRequestChargeHeader().build()) + .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) + .build(); + + validateQuerySuccess(queryObservable, validator); + } + + @DataProvider(name = "topValue") + public Object[][] topValueParameter() { + return new Object[][] { { 0 }, { 1 }, { 5 }, { createdDocuments.size() - 1 }, { createdDocuments.size() }, + { createdDocuments.size() + 1 }, { 2 * createdDocuments.size() } }; + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "topValue") + public void queryOrderWithTop(int topValue) throws Exception { + String query = String.format("SELECT TOP %d * FROM r ORDER BY r.propInt", topValue); + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + int pageSize = 3; + options.maxItemCount(pageSize); + Flux> queryObservable = createdCollection.queryItems(query, options); + + Comparator validatorComparator = Comparator.nullsFirst(Comparator.naturalOrder()); + + List expectedResourceIds = + sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator) + .stream().limit(topValue).collect(Collectors.toList()); + + int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .containsExactly(expectedResourceIds) + .numberOfPages(expectedPageSize) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .hasRequestChargeHeader().build()) + .totalRequestChargeIsAtLeast(numberOfPartitions * (topValue > 0 ? minQueryRequestChargePerPartition : 1)) + .build(); + + validateQuerySuccess(queryObservable, validator); + } + + private List sortDocumentsAndCollectResourceIds(String propName, Function extractProp, Comparator comparer) { + return createdDocuments.stream() + .filter(d -> d.getMap().containsKey(propName)) // removes undefined + .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) + .map(Resource::resourceId).collect(Collectors.toList()); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void crossPartitionQueryNotEnabled() throws Exception { + String query = "SELECT * FROM r ORDER BY r.propInt"; + FeedOptions options = new FeedOptions(); + Flux> queryObservable = createdCollection.queryItems(query, options); + + FailureValidator validator = new FailureValidator.Builder() + .instanceOf(CosmosClientException.class) + .statusCode(400) + .build(); + validateQueryFailure(queryObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryScopedToSinglePartition_StartWithContinuationToken() throws Exception { + String query = "SELECT * FROM r ORDER BY r.propScopedPartitionInt ASC"; + FeedOptions options = new FeedOptions(); + options.partitionKey(new PartitionKey("duplicateParitionKeyValue")); + options.maxItemCount(3); + Flux> queryObservable = createdCollection.queryItems(query, options); + + TestSubscriber> subscriber = new TestSubscriber<>(); + queryObservable.take(1).subscribe(subscriber); + + subscriber.awaitTerminalEvent(); + subscriber.assertComplete(); + subscriber.assertNoErrors(); + assertThat(subscriber.valueCount()).isEqualTo(1); + FeedResponse page = (FeedResponse) subscriber.getEvents().get(0).get(0); + assertThat(page.results()).hasSize(3); + + assertThat(page.continuationToken()).isNotEmpty(); + + + options.requestContinuation(page.continuationToken()); + queryObservable = createdCollection.queryItems(query, options); + + List expectedDocs = createdDocuments.stream() + .filter(d -> (StringUtils.equals("duplicateParitionKeyValue", d.getString("mypk")))) + .filter(d -> (d.getInt("propScopedPartitionInt") > 2)).collect(Collectors.toList()); + int expectedPageSize = (expectedDocs.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + assertThat(expectedDocs).hasSize(10 - 3); + + FeedResponseListValidator validator = null; + + validator = new FeedResponseListValidator.Builder() + .containsExactly(expectedDocs.stream() + .sorted((e1, e2) -> Integer.compare(e1.getInt("propScopedPartitionInt"), e2.getInt("propScopedPartitionInt"))) + .map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + + validateQuerySuccess(queryObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void orderByContinuationTokenRoundTrip() throws Exception { + { + // Positive + OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( + new CompositeContinuationToken( + "asdf", + new Range("A", "D", false, true)), + new QueryItem[] {new QueryItem("{\"item\" : 42}")}, + "rid", + false); + String serialized = orderByContinuationToken.toString(); + ValueHolder outOrderByContinuationToken = new ValueHolder(); + + assertThat(OrderByContinuationToken.tryParse(serialized, outOrderByContinuationToken)).isTrue(); + OrderByContinuationToken deserialized = outOrderByContinuationToken.v; + CompositeContinuationToken compositeContinuationToken = deserialized.getCompositeContinuationToken(); + String token = compositeContinuationToken.getToken(); + Range range = compositeContinuationToken.getRange(); + assertThat(token).isEqualTo("asdf"); + assertThat(range.getMin()).isEqualTo("A"); + assertThat(range.getMax()).isEqualTo("D"); + assertThat(range.isMinInclusive()).isEqualTo(false); + assertThat(range.isMaxInclusive()).isEqualTo(true); + + QueryItem[] orderByItems = deserialized.getOrderByItems(); + assertThat(orderByItems).isNotNull(); + assertThat(orderByItems.length).isEqualTo(1); + assertThat(orderByItems[0].getItem()).isEqualTo(42); + + String rid = deserialized.getRid(); + assertThat(rid).isEqualTo("rid"); + + boolean inclusive = deserialized.getInclusive(); + assertThat(inclusive).isEqualTo(false); + } + + { + // Negative + ValueHolder outOrderByContinuationToken = new ValueHolder(); + assertThat(OrderByContinuationToken.tryParse("{\"property\" : \"Not a valid Order By Token\"}", outOrderByContinuationToken)).isFalse(); + } + } + @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder", + retryAnalyzer = RetryAnalyzer.class) + public void queryDocumentsWithOrderByContinuationTokensInteger(String sortOrder) throws Exception { + // Get Actual + String query = String.format("SELECT * FROM c ORDER BY c.propInt %s", sortOrder); + + // Get Expected + Comparator order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); + Comparator validatorComparator = Comparator.nullsFirst(order); + + List expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator); + this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100}, expectedResourceIds); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") + public void queryDocumentsWithOrderByContinuationTokensString(String sortOrder) throws Exception { + // Get Actual + String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); + + // Get Expected + Comparator order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); + Comparator validatorComparator = Comparator.nullsFirst(order); + + List expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> d.getString("id"), validatorComparator); + this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") + public void queryDocumentsWithInvalidOrderByContinuationTokensString(String sortOrder) throws Exception { + // Get Actual + String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); + + // Get Expected + Comparator validatorComparator; + if(sortOrder.equals("ASC")) { + validatorComparator = Comparator.nullsFirst(Comparator.naturalOrder()); + }else{ + validatorComparator = Comparator.nullsFirst(Comparator.reverseOrder()); + } + List expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> d.getString("id"), validatorComparator); + this.assertInvalidContinuationToken(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); + } + + public CosmosItemProperties createDocument(CosmosContainer cosmosContainer, Map keyValueProps) + throws CosmosClientException { + CosmosItemProperties docDefinition = getDocumentDefinition(keyValueProps); + return cosmosContainer.createItem(docDefinition).block().properties(); + } + + public List bulkInsert(CosmosContainer cosmosContainer, List> keyValuePropsList) { + + ArrayList result = new ArrayList(); + + for(Map keyValueProps: keyValuePropsList) { + CosmosItemProperties docDefinition = getDocumentDefinition(keyValueProps); + result.add(docDefinition); + } + + return bulkInsertBlocking(cosmosContainer, result); + } + + @BeforeMethod(groups = { "simple" }) + public void beforeMethod() throws Exception { + // add a cool off time + TimeUnit.SECONDS.sleep(10); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() throws Exception { + client = clientBuilder().build(); + createdDatabase = getSharedCosmosDatabase(client); + createdCollection = getSharedMultiPartitionCosmosContainer(client); + truncateCollection(createdCollection); + + List> keyValuePropsList = new ArrayList<>(); + Map props; + + for(int i = 0; i < 30; i++) { + props = new HashMap<>(); + props.put("propInt", i); + props.put("propStr", String.valueOf(i)); + keyValuePropsList.add(props); + } + + //undefined values + props = new HashMap<>(); + keyValuePropsList.add(props); + + createdDocuments = bulkInsert(createdCollection, keyValuePropsList); + + for(int i = 0; i < 10; i++) { + Map p = new HashMap<>(); + p.put("propScopedPartitionInt", i); + CosmosItemProperties doc = getDocumentDefinition("duplicateParitionKeyValue", UUID.randomUUID().toString(), p); + CosmosItemRequestOptions options = new CosmosItemRequestOptions(); + options.partitionKey(new PartitionKey(doc.get("mypk"))); + createdDocuments.add(createDocument(createdCollection, doc).read(options).block().properties()); + + } + + numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client) + .readPartitionKeyRanges("dbs/" + createdDatabase.id() + "/colls/" + createdCollection.id(), null) + .flatMap(p -> Flux.fromIterable(p.results())).collectList().single().block().size(); + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + private void assertInvalidContinuationToken(String query, int[] pageSize, List expectedIds) { + String requestContinuation = null; + do { + FeedOptions options = new FeedOptions(); + options.maxItemCount(1); + options.enableCrossPartitionQuery(true); + options.maxDegreeOfParallelism(2); + OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( + new CompositeContinuationToken( + "asdf", + new Range("A", "D", false, true)), + new QueryItem[] {new QueryItem("{\"item\" : 42}")}, + "rid", + false); + options.requestContinuation(orderByContinuationToken.toString()); + Flux> queryObservable = createdCollection.queryItems(query, + options); + + //Observable> firstPageObservable = queryObservable.first(); + TestSubscriber> testSubscriber = new TestSubscriber<>(); + queryObservable.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); + testSubscriber.assertError(CosmosClientException.class); + } while (requestContinuation != null); + } + + private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List expectedIds) { + for (int pageSize : pageSizes) { + List receivedDocuments = this.queryWithContinuationTokens(query, pageSize); + List actualIds = new ArrayList(); + for (CosmosItemProperties document : receivedDocuments) { + actualIds.add(document.resourceId()); + } + + assertThat(actualIds).containsExactlyElementsOf(expectedIds); + } + } + + private List queryWithContinuationTokens(String query, int pageSize) { + String requestContinuation = null; + List continuationTokens = new ArrayList(); + List receivedDocuments = new ArrayList(); + do { + FeedOptions options = new FeedOptions(); + options.maxItemCount(pageSize); + options.enableCrossPartitionQuery(true); + options.maxDegreeOfParallelism(2); + options.requestContinuation(requestContinuation); + Flux> queryObservable = createdCollection.queryItems(query, + options); + + //Observable> firstPageObservable = queryObservable.first(); + TestSubscriber> testSubscriber = new TestSubscriber<>(); + queryObservable.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + + FeedResponse firstPage = (FeedResponse) testSubscriber.getEvents().get(0).get(0); + requestContinuation = firstPage.continuationToken(); + receivedDocuments.addAll(firstPage.results()); + continuationTokens.add(requestContinuation); + } while (requestContinuation != null); + + return receivedDocuments; + } + + private static CosmosItemProperties getDocumentDefinition(String partitionKey, String id, Map keyValuePair) { + StringBuilder sb = new StringBuilder(); + sb.append("{\n"); + + for(String key: keyValuePair.keySet()) { + Object val = keyValuePair.get(key); + sb.append(" "); + sb.append("\"").append(key).append("\"").append(" :" ); + if (val == null) { + sb.append("null"); + } else { + sb.append(toJson(val)); + } + sb.append(",\n"); + } + + sb.append(String.format(" \"id\": \"%s\",\n", id)); + sb.append(String.format(" \"mypk\": \"%s\"\n", partitionKey)); + sb.append("}"); + + return new CosmosItemProperties(sb.toString()); + } + + private static CosmosItemProperties getDocumentDefinition(Map keyValuePair) { + String uuid = UUID.randomUUID().toString(); + return getDocumentDefinition(uuid, uuid, keyValuePair); + } + + private static String toJson(Object object){ + try { + return Utils.getSimpleObjectMapper().writeValueAsString(object); + } catch (JsonProcessingException e) { + throw new IllegalStateException(e); + } + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ParallelDocumentQueryTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ParallelDocumentQueryTest.java new file mode 100644 index 0000000000000..3bfcaa3679bc8 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ParallelDocumentQueryTest.java @@ -0,0 +1,383 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosBridgeInternal; +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.*; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.internal.Utils.ValueHolder; +import com.azure.data.cosmos.internal.query.CompositeContinuationToken; +import com.azure.data.cosmos.internal.routing.Range; +import io.reactivex.subscribers.TestSubscriber; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static com.azure.data.cosmos.CommonsBridgeInternal.partitionKeyRangeIdInternal; +import static org.assertj.core.api.Assertions.assertThat; + +public class ParallelDocumentQueryTest extends TestSuiteBase { + private CosmosDatabase createdDatabase; + private CosmosContainer createdCollection; + private List createdDocuments; + + private CosmosClient client; + + public String getCollectionLink() { + return TestUtils.getCollectionNameLink(createdDatabase.id(), createdCollection.id()); + } + + @Factory(dataProvider = "clientBuildersWithDirect") + public ParallelDocumentQueryTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @DataProvider(name = "queryMetricsArgProvider") + public Object[][] queryMetricsArgProvider() { + return new Object[][]{ + {true}, + {false}, + }; + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") + public void queryDocuments(boolean qmEnabled) { + String query = "SELECT * from c where c.prop = 99"; + FeedOptions options = new FeedOptions(); + options.maxItemCount(5); + options.enableCrossPartitionQuery(true); + options.populateQueryMetrics(qmEnabled); + options.maxDegreeOfParallelism(2); + Flux> queryObservable = createdCollection.queryItems(query, options); + + List expectedDocs = createdDocuments.stream().filter(d -> 99 == d.getInt("prop") ).collect(Collectors.toList()); + assertThat(expectedDocs).isNotEmpty(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(expectedDocs.size()) + .exactlyContainsInAnyOrder(expectedDocs.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .hasValidQueryMetrics(qmEnabled) + .build(); + + validateQuerySuccess(queryObservable, validator, TIMEOUT); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryMetricEquality() throws Exception { + String query = "SELECT * from c where c.prop = 99"; + FeedOptions options = new FeedOptions(); + options.maxItemCount(5); + options.enableCrossPartitionQuery(true); + options.populateQueryMetrics(true); + options.maxDegreeOfParallelism(0); + + Flux> queryObservable = createdCollection.queryItems(query, options); + List> resultList1 = queryObservable.collectList().block(); + + options.maxDegreeOfParallelism(4); + Flux> threadedQueryObs = createdCollection.queryItems(query, options); + List> resultList2 = threadedQueryObs.collectList().block(); + + assertThat(resultList1.size()).isEqualTo(resultList2.size()); + for(int i = 0; i < resultList1.size(); i++){ + compareQueryMetrics(BridgeInternal.queryMetricsFromFeedResponse(resultList1.get(i)), + BridgeInternal.queryMetricsFromFeedResponse(resultList2.get(i))); + } + } + + private void compareQueryMetrics(Map qm1, Map qm2) { + assertThat(qm1.keySet().size()).isEqualTo(qm2.keySet().size()); + QueryMetrics queryMetrics1 = BridgeInternal.createQueryMetricsFromCollection(qm1.values()); + QueryMetrics queryMetrics2 = BridgeInternal.createQueryMetricsFromCollection(qm2.values()); + assertThat(queryMetrics1.getRetrievedDocumentSize()).isEqualTo(queryMetrics2.getRetrievedDocumentSize()); + assertThat(queryMetrics1.getRetrievedDocumentCount()).isEqualTo(queryMetrics2.getRetrievedDocumentCount()); + assertThat(queryMetrics1.getIndexHitDocumentCount()).isEqualTo(queryMetrics2.getIndexHitDocumentCount()); + assertThat(queryMetrics1.getOutputDocumentCount()).isEqualTo(queryMetrics2.getOutputDocumentCount()); + assertThat(queryMetrics1.getOutputDocumentSize()).isEqualTo(queryMetrics2.getOutputDocumentSize()); + assertThat(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()) + .isEqualTo(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryDocuments_NoResults() { + String query = "SELECT * from root r where r.id = '2'"; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdCollection.queryItems(query, options); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .containsExactly(new ArrayList<>()) + .numberOfPagesIsGreaterThanOrEqualTo(1) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .pageSizeIsLessThanOrEqualTo(0) + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(queryObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) + public void queryDocumentsWithPageSize() { + String query = "SELECT * from root"; + FeedOptions options = new FeedOptions(); + int pageSize = 3; + options.maxItemCount(pageSize); + options.maxDegreeOfParallelism(-1); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdCollection.queryItems(query, options); + + List expectedDocs = createdDocuments; + assertThat(expectedDocs).isNotEmpty(); + + FeedResponseListValidator validator = new FeedResponseListValidator + .Builder() + .exactlyContainsInAnyOrder(expectedDocs + .stream() + .map(d -> d.resourceId()) + .collect(Collectors.toList())) + .numberOfPagesIsGreaterThanOrEqualTo((expectedDocs.size() + 1) / 3) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0) + .pageSizeIsLessThanOrEqualTo(pageSize) + .build()) + .build(); + validateQuerySuccess(queryObservable, validator, 2 * subscriberValidationTimeout); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void invalidQuerySyntax() { + String query = "I am an invalid query"; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdCollection.queryItems(query, options); + + FailureValidator validator = new FailureValidator.Builder() + .instanceOf(CosmosClientException.class) + .statusCode(400) + .notNullActivityId() + .build(); + validateQueryFailure(queryObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void crossPartitionQueryNotEnabled() { + String query = "SELECT * from root"; + FeedOptions options = new FeedOptions(); + Flux> queryObservable = createdCollection.queryItems(query, options); + + FailureValidator validator = new FailureValidator.Builder() + .instanceOf(CosmosClientException.class) + .statusCode(400) + .build(); + validateQueryFailure(queryObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) + public void partitionKeyRangeId() { + int sum = 0; + + for (String partitionKeyRangeId : + CosmosBridgeInternal.getAsyncDocumentClient(client).readPartitionKeyRanges(getCollectionLink(), null) + .flatMap(p -> Flux.fromIterable(p.results())) + .map(Resource::id).collectList().single().block()) { + String query = "SELECT * from root"; + FeedOptions options = new FeedOptions(); + partitionKeyRangeIdInternal(options, partitionKeyRangeId); + int queryResultCount = createdCollection.queryItems(query, options) + .flatMap(p -> Flux.fromIterable(p.results())) + .collectList().block().size(); + + sum += queryResultCount; + } + + assertThat(sum).isEqualTo(createdDocuments.size()); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void compositeContinuationTokenRoundTrip() throws Exception { + { + // Positive + CompositeContinuationToken compositeContinuationToken = new CompositeContinuationToken("asdf", + new Range("A", "D", false, true)); + String serialized = compositeContinuationToken.toString(); + ValueHolder outCompositeContinuationToken = new ValueHolder(); + boolean succeeed = CompositeContinuationToken.tryParse(serialized, outCompositeContinuationToken); + assertThat(succeeed).isTrue(); + CompositeContinuationToken deserialized = outCompositeContinuationToken.v; + String token = deserialized.getToken(); + Range range = deserialized.getRange(); + assertThat(token).isEqualTo("asdf"); + assertThat(range.getMin()).isEqualTo("A"); + assertThat(range.getMax()).isEqualTo("D"); + assertThat(range.isMinInclusive()).isEqualTo(false); + assertThat(range.isMaxInclusive()).isEqualTo(true); + } + + { + // Negative + ValueHolder outCompositeContinuationToken = new ValueHolder(); + boolean succeeed = CompositeContinuationToken.tryParse("{\"property\" : \"not a valid composite continuation token\"}", outCompositeContinuationToken); + assertThat(succeeed).isFalse(); + } + + { + // Negative - GATEWAY composite continuation token + ValueHolder outCompositeContinuationToken = new ValueHolder(); + boolean succeeed = CompositeContinuationToken.tryParse("{\"token\":\"-RID:tZFQAImzNLQLAAAAAAAAAA==#RT:1#TRC:10\",\"range\":{\"min\":\"\",\"max\":\"FF\"}}", outCompositeContinuationToken); + assertThat(succeeed).isFalse(); + } + } + + // TODO: This test has been timing out on build, related work item - https://msdata.visualstudio.com/CosmosDB/_workitems/edit/402438/ + @Test(groups = { "non-emulator" }, timeOut = TIMEOUT * 10) + public void queryDocumentsWithCompositeContinuationTokens() throws Exception { + String query = "SELECT * FROM c"; + + // Get Expected + List expectedDocs = new ArrayList<>(createdDocuments); + assertThat(expectedDocs).isNotEmpty(); + + this.queryWithContinuationTokensAndPageSizes(query, new int[] {1, 10, 100}, expectedDocs); + } + + @BeforeClass(groups = { "simple", "non-emulator" }, timeOut = 2 * SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdDatabase = getSharedCosmosDatabase(client); + createdCollection = getSharedMultiPartitionCosmosContainer(client); + truncateCollection(createdCollection); + List docDefList = new ArrayList<>(); + for(int i = 0; i < 13; i++) { + docDefList.add(getDocumentDefinition(i)); + } + + for(int i = 0; i < 21; i++) { + docDefList.add(getDocumentDefinition(99)); + } + + createdDocuments = bulkInsertBlocking(createdCollection, docDefList); + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + } + + @AfterClass(groups = { "simple", "non-emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + private static CosmosItemProperties getDocumentDefinition(int cnt) { + String uuid = UUID.randomUUID().toString(); + CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + + "\"id\": \"%s\", " + + "\"prop\" : %d, " + + "\"mypk\": \"%s\", " + + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + + "}" + , uuid, cnt, uuid)); + return doc; + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, enabled = false) + public void invalidQuerySytax() throws Exception { + + String query = "I am an invalid query"; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdCollection.queryItems(query, options); + + FailureValidator validator = new FailureValidator.Builder().instanceOf(CosmosClientException.class) + .statusCode(400).notNullActivityId().build(); + validateQueryFailure(queryObservable, validator); + } + + public CosmosItemProperties createDocument(CosmosContainer cosmosContainer, int cnt) throws CosmosClientException { + + CosmosItemProperties docDefinition = getDocumentDefinition(cnt); + + return cosmosContainer.createItem(docDefinition).block().properties(); + } + + private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List expectedDocs) { + for (int pageSize : pageSizes) { + List receivedDocuments = this.queryWithContinuationTokens(query, pageSize); + List actualIds = new ArrayList(); + for (CosmosItemProperties document : receivedDocuments) { + actualIds.add(document.resourceId()); + } + + List expectedIds = new ArrayList(); + for (CosmosItemProperties document : expectedDocs) { + expectedIds.add(document.resourceId()); + } + + assertThat(actualIds).containsOnlyElementsOf(expectedIds); + } + } + + private List queryWithContinuationTokens(String query, int pageSize) { + String requestContinuation = null; + List continuationTokens = new ArrayList(); + List receivedDocuments = new ArrayList(); + do { + FeedOptions options = new FeedOptions(); + options.maxItemCount(pageSize); + options.enableCrossPartitionQuery(true); + options.maxDegreeOfParallelism(2); + options.requestContinuation(requestContinuation); + Flux> queryObservable = createdCollection.queryItems(query, options); + + TestSubscriber> testSubscriber = new TestSubscriber<>(); + queryObservable.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + + FeedResponse firstPage = (FeedResponse) testSubscriber.getEvents().get(0).get(0); + requestContinuation = firstPage.continuationToken(); + receivedDocuments.addAll(firstPage.results()); + continuationTokens.add(requestContinuation); + } while (requestContinuation != null); + + return receivedDocuments; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ParsingEnvTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ParsingEnvTest.java new file mode 100644 index 0000000000000..91828d54395d4 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ParsingEnvTest.java @@ -0,0 +1,76 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.internal.directconnectivity.Protocol; +import org.testng.annotations.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ParsingEnvTest { + + @Test(groups = "unit") + public void parseDesiredConsistencies() { + assertThat(TestSuiteBase.parseDesiredConsistencies("[ \"BoundedStaleness\" ]")).containsExactly(ConsistencyLevel.BOUNDED_STALENESS); + assertThat(TestSuiteBase.parseDesiredConsistencies("[ \"Session\" , \"Strong\" ]")).containsExactly( + ConsistencyLevel.SESSION, ConsistencyLevel.STRONG); + } + + @Test(groups = "unit") + public void parseDesiredConsistencies_null() { + assertThat(TestSuiteBase.parseDesiredConsistencies(null)).isNull(); + } + + @Test(groups = "unit") + public void lowerConsistencies() { + assertThat(TestSuiteBase.allEqualOrLowerConsistencies(ConsistencyLevel.SESSION)) + .containsExactly(ConsistencyLevel.SESSION, ConsistencyLevel.CONSISTENT_PREFIX, ConsistencyLevel.EVENTUAL); + } + + @Test(groups = "unit") + public void parseAccountConsistency() { + assertThat(TestSuiteBase.parseConsistency("Strong")).isEqualTo(ConsistencyLevel.STRONG); + assertThat(TestSuiteBase.parseConsistency("Session")).isEqualTo(ConsistencyLevel.SESSION); + assertThat(TestSuiteBase.parseConsistency("BoundedStaleness")).isEqualTo(ConsistencyLevel.BOUNDED_STALENESS); + assertThat(TestSuiteBase.parseConsistency("ConsistentPrefix")).isEqualTo(ConsistencyLevel.CONSISTENT_PREFIX); + assertThat(TestSuiteBase.parseConsistency("Eventual")).isEqualTo(ConsistencyLevel.EVENTUAL); + } + + @Test(groups = "unit") + public void parsePreferredLocation() { + assertThat(TestSuiteBase.parsePreferredLocation("[ \"central us\" , \"central us2\" ]")) + .containsExactly("central us", "central us2"); + } + + @Test(groups = "unit") + public void parsePreferredLocation_null() { + assertThat(TestSuiteBase.parsePreferredLocation(null)).isNull(); + } + + @Test(groups = "unit") + public void protocols() { + assertThat(TestSuiteBase.parseProtocols("[ \"Tcp\" , \"Https\" ]")).containsExactly(Protocol.TCP, Protocol.HTTPS); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/PermissionCrudTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/PermissionCrudTest.java new file mode 100644 index 0000000000000..141de2536d86d --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/PermissionCrudTest.java @@ -0,0 +1,238 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosDatabaseForTest; +import com.azure.data.cosmos.CosmosPermission; +import com.azure.data.cosmos.CosmosPermissionResponse; +import com.azure.data.cosmos.CosmosPermissionProperties; +import com.azure.data.cosmos.CosmosResponseValidator; +import com.azure.data.cosmos.CosmosUser; +import com.azure.data.cosmos.CosmosUserProperties; +import com.azure.data.cosmos.PermissionMode; +import com.azure.data.cosmos.internal.FailureValidator; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.util.UUID; + +//TODO: change to use external TestSuiteBase +public class PermissionCrudTest extends TestSuiteBase { + + private CosmosDatabase createdDatabase; + private CosmosUser createdUser; + private final String databaseId = CosmosDatabaseForTest.generateId(); + + private CosmosClient client; + + @Factory(dataProvider = "clientBuilders") + public PermissionCrudTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void createPermission() throws Exception { + + createdUser = safeCreateUser(client, createdDatabase.id(), getUserDefinition()); + //create permission + CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties() + .id(UUID.randomUUID().toString()) + .permissionMode(PermissionMode.READ) + .resourceLink("dbs/AQAAAA==/colls/AQAAAJ0fgTc="); + + Mono createObservable = createdUser.createPermission(permissionSettings, null); + + // validate permission creation + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(permissionSettings.id()) + .withPermissionMode(PermissionMode.READ) + .withPermissionResourceLink("dbs/AQAAAA==/colls/AQAAAJ0fgTc=") + .notNullEtag() + .build(); + validateSuccess(createObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void readPermission() throws Exception { + createdUser = safeCreateUser(client, createdDatabase.id(), getUserDefinition()); + + // create permission + CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties() + .id(UUID.randomUUID().toString()) + .permissionMode(PermissionMode.READ) + .resourceLink("dbs/AQAAAA==/colls/AQAAAJ0fgTc="); + CosmosPermissionResponse readBackPermission = createdUser.createPermission(permissionSettings, null) + .block(); + + // read Permission + Mono readObservable = readBackPermission.permission().read(null); + + // validate permission read + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(permissionSettings.id()) + .withPermissionMode(PermissionMode.READ) + .withPermissionResourceLink("dbs/AQAAAA==/colls/AQAAAJ0fgTc=") + .notNullEtag() + .build(); + validateSuccess(readObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void deletePermission() throws Exception { + + createdUser = safeCreateUser(client, createdDatabase.id(), getUserDefinition()); + + // create permission + CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties() + .id(UUID.randomUUID().toString()) + .permissionMode(PermissionMode.READ) + .resourceLink("dbs/AQAAAA==/colls/AQAAAJ0fgTc="); + CosmosPermissionResponse readBackPermission = createdUser.createPermission(permissionSettings, null) + .block(); + // delete + Mono deleteObservable = readBackPermission.permission() + .delete(null); + + // validate delete permission + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .nullResource() + .build(); + validateSuccess(deleteObservable, validator); + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + + // attempt to read the permission which was deleted + Mono readObservable = readBackPermission.permission() + .read( null); + FailureValidator notFoundValidator = new FailureValidator.Builder().resourceNotFound().build(); + validateFailure(readObservable, notFoundValidator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void upsertPermission() throws Exception { + + createdUser = safeCreateUser(client, createdDatabase.id(), getUserDefinition()); + + // create permission + CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties() + .id(UUID.randomUUID().toString()) + .permissionMode(PermissionMode.READ) + .resourceLink("dbs/AQAAAA==/colls/AQAAAJ0fgTc="); + CosmosPermissionResponse readBackPermissionResponse = createdUser.createPermission(permissionSettings, null) + .block(); + CosmosPermissionProperties readBackPermission = readBackPermissionResponse.properties(); + // read Permission + Mono readObservable = readBackPermissionResponse.permission() + .read( null); + + // validate permission creation + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(readBackPermission.id()) + .withPermissionMode(PermissionMode.READ) + .withPermissionResourceLink("dbs/AQAAAA==/colls/AQAAAJ0fgTc=") + .notNullEtag() + .build(); + validateSuccess(readObservable, validator); + + //update permission + readBackPermission = readBackPermission.permissionMode(PermissionMode.ALL); + + Mono updateObservable = createdUser.upsertPermission(readBackPermission, null); + + // validate permission update + CosmosResponseValidator validatorForUpdate = new CosmosResponseValidator.Builder() + .withId(readBackPermission.id()) + .withPermissionMode(PermissionMode.ALL) + .withPermissionResourceLink("dbs/AQAAAA==/colls/AQAAAJ0fgTc=") + .notNullEtag() + .build(); + validateSuccess(updateObservable, validatorForUpdate); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void replacePermission() throws Exception { + + createdUser = safeCreateUser(client, createdDatabase.id(), getUserDefinition()); + + String id = UUID.randomUUID().toString(); + // create permission + CosmosPermissionProperties permissionSettings = new CosmosPermissionProperties() + .id(id) + .permissionMode(PermissionMode.READ) + .resourceLink("dbs/AQAAAA==/colls/AQAAAJ0fgTc="); + CosmosPermissionResponse readBackPermissionResponse = createdUser.createPermission(permissionSettings, null) + .block(); + // read Permission + Mono readObservable = readBackPermissionResponse.permission() + .read(null); + + // validate permission creation + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(readBackPermissionResponse.permission().id()) + .withPermissionMode(PermissionMode.READ) + .withPermissionResourceLink("dbs/AQAAAA==/colls/AQAAAJ0fgTc=") + .notNullEtag() + .build(); + validateSuccess(readObservable, validator); + + //update permission + CosmosPermissionProperties readBackPermission = readBackPermissionResponse.properties(); + readBackPermission = readBackPermission.permissionMode(PermissionMode.ALL); + + CosmosPermission cosmosPermission = createdUser.getPermission(id); + Mono updateObservable = readBackPermissionResponse.permission() + .replace(readBackPermission, null); + + // validate permission replace + CosmosResponseValidator validatorForUpdate = new CosmosResponseValidator.Builder() + .withId(readBackPermission.id()) + .withPermissionMode(PermissionMode.ALL) + .withPermissionResourceLink("dbs/AQAAAA==/colls/AQAAAJ0fgTc=") + .notNullEtag() + .build(); + validateSuccess(updateObservable, validatorForUpdate); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdDatabase = createDatabase(client, databaseId); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + private static CosmosUserProperties getUserDefinition() { + return new CosmosUserProperties() + .id(UUID.randomUUID().toString()); + } + +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/PermissionQueryTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/PermissionQueryTest.java new file mode 100644 index 0000000000000..bf24efffd94ab --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/PermissionQueryTest.java @@ -0,0 +1,198 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.*; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.PermissionMode; +import com.azure.data.cosmos.internal.TestSuiteBase; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; + +//TODO: change to use external TestSuiteBase +public class PermissionQueryTest extends TestSuiteBase { + + public final String databaseId = DatabaseForTest.generateId(); + + private Database createdDatabase; + private User createdUser; + private List createdPermissions = new ArrayList<>(); + + private AsyncDocumentClient client; + + @Factory(dataProvider = "clientBuilders") + public PermissionQueryTest(AsyncDocumentClient.Builder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryWithFilter() throws Exception { + + String filterId = createdPermissions.get(0).id(); + String query = String.format("SELECT * from c where c.id = '%s'", filterId); + + FeedOptions options = new FeedOptions(); + options.maxItemCount(5); + Flux> queryObservable = client + .queryPermissions(getUserLink(), query, options); + + List expectedDocs = createdPermissions.stream().filter(sp -> filterId.equals(sp.id()) ).collect(Collectors.toList()); + assertThat(expectedDocs).isNotEmpty(); + + int expectedPageSize = (expectedDocs.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(expectedDocs.size()) + .exactlyContainsInAnyOrder(expectedDocs.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(queryObservable, validator, TIMEOUT); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void query_NoResults() throws Exception { + + String query = "SELECT * from root r where r.id = '2'"; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = client + .queryPermissions(getUserLink(), query, options); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .containsExactly(new ArrayList<>()) + .numberOfPages(1) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(queryObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryAll() throws Exception { + + String query = "SELECT * from root"; + FeedOptions options = new FeedOptions(); + options.maxItemCount(3); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = client + .queryPermissions(getUserLink(), query, options); + + int expectedPageSize = (createdPermissions.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator + .Builder() + .exactlyContainsInAnyOrder(createdPermissions + .stream() + .map(d -> d.resourceId()) + .collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(queryObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void invalidQuerySytax() throws Exception { + String query = "I am an invalid query"; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = client + .queryPermissions(getUserLink(), query, options); + + FailureValidator validator = new FailureValidator.Builder() + .instanceOf(CosmosClientException.class) + .statusCode(400) + .notNullActivityId() + .build(); + validateQueryFailure(queryObservable, validator); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + Database d = new Database(); + d.id(databaseId); + createdDatabase = createDatabase(client, d); + createdUser = safeCreateUser(client, createdDatabase.id(), getUserDefinition()); + + for(int i = 0; i < 5; i++) { + createdPermissions.add(createPermissions(client, i)); + } + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteDatabase(client, createdDatabase); + safeClose(client); + } + + private static User getUserDefinition() { + User user = new User(); + user.id(UUID.randomUUID().toString()); + return user; + } + + public Permission createPermissions(AsyncDocumentClient client, int index) { + DocumentCollection collection = new DocumentCollection(); + collection.id(UUID.randomUUID().toString()); + + Permission permission = new Permission(); + permission.id(UUID.randomUUID().toString()); + permission.setPermissionMode(PermissionMode.READ); + permission.setResourceLink("dbs/AQAAAA==/colls/AQAAAJ0fgT" + Integer.toString(index) + "="); + + return client.createPermission(getUserLink(), permission, null).single().block().getResource(); + } + + private String getUserLink() { + return "dbs/" + getDatabaseId() + "/users/" + getUserId(); + } + + private String getDatabaseId() { + return createdDatabase.id(); + } + + private String getUserId() { + return createdUser.id(); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ProxyHostTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ProxyHostTest.java new file mode 100644 index 0000000000000..e7a7315141be9 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ProxyHostTest.java @@ -0,0 +1,184 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.CosmosItemRequestOptions; +import com.azure.data.cosmos.CosmosItemResponse; +import com.azure.data.cosmos.CosmosResponseValidator; +import com.azure.data.cosmos.internal.TestConfigurations; +import com.azure.data.cosmos.rx.proxy.HttpProxyServer; +import org.apache.log4j.Level; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; +import org.apache.log4j.PatternLayout; +import org.apache.log4j.PropertyConfigurator; +import org.apache.log4j.WriterAppender; +import org.testng.annotations.AfterClass; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.io.StringWriter; +import java.lang.reflect.Method; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * This class help to test proxy host feature scenarios where user can provide proxy + * host server during AsyncDocumentClient initialization and all its request will + * go through that particular host. + * + */ +public class ProxyHostTest extends TestSuiteBase { + + private static CosmosDatabase createdDatabase; + private static CosmosContainer createdCollection; + + private CosmosClient client; + private final String PROXY_HOST = "localhost"; + private final int PROXY_PORT = 8080; + private HttpProxyServer httpProxyServer; + + public ProxyHostTest() { + super(createGatewayRxDocumentClient()); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() throws Exception { + client = clientBuilder().build(); + createdDatabase = getSharedCosmosDatabase(client); + createdCollection = getSharedMultiPartitionCosmosContainer(client); + httpProxyServer = new HttpProxyServer(); + httpProxyServer.start(); + // wait for proxy server to be ready + TimeUnit.SECONDS.sleep(1); + } + + /** + * This test will try to create document via http proxy server and validate it. + * + * @throws Exception + */ + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void createDocumentWithValidHttpProxy() throws Exception { + CosmosClient clientWithRightProxy = null; + try { + ConnectionPolicy connectionPolicy =new ConnectionPolicy(); + connectionPolicy.proxy(PROXY_HOST, PROXY_PORT); + clientWithRightProxy = CosmosClient.builder().endpoint(TestConfigurations.HOST) + .key(TestConfigurations.MASTER_KEY) + .connectionPolicy(connectionPolicy) + .consistencyLevel(ConsistencyLevel.SESSION).build(); + CosmosItemProperties docDefinition = getDocumentDefinition(); + Mono createObservable = clientWithRightProxy.getDatabase(createdDatabase.id()).getContainer(createdCollection.id()) + .createItem(docDefinition, new CosmosItemRequestOptions()); + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(docDefinition.id()) + .build(); + validateSuccess(createObservable, validator); + } finally { + safeClose(clientWithRightProxy); + } + } + + /** + * This test will try to create document via http proxy server with netty wire logging and validate it. + * + * @throws Exception + */ + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void createDocumentWithValidHttpProxyWithNettyWireLogging() throws Exception { + LogManager.getRootLogger().setLevel(Level.INFO); + LogManager.getLogger(LogLevelTest.NETWORK_LOGGING_CATEGORY).setLevel(Level.TRACE); + CosmosClient clientWithRightProxy = null; + try { + StringWriter consoleWriter = new StringWriter(); + WriterAppender appender = new WriterAppender(new PatternLayout(), consoleWriter); + Logger.getLogger(LogLevelTest.NETWORK_LOGGING_CATEGORY).addAppender(appender); + + ConnectionPolicy connectionPolicy =new ConnectionPolicy(); + connectionPolicy.proxy(PROXY_HOST, PROXY_PORT); + clientWithRightProxy = CosmosClient.builder().endpoint(TestConfigurations.HOST) + .key(TestConfigurations.MASTER_KEY) + .connectionPolicy(connectionPolicy) + .consistencyLevel(ConsistencyLevel.SESSION).build(); + CosmosItemProperties docDefinition = getDocumentDefinition(); + Mono createObservable = clientWithRightProxy.getDatabase(createdDatabase.id()).getContainer(createdCollection.id()) + .createItem(docDefinition, new CosmosItemRequestOptions()); + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(docDefinition.id()) + .build(); + validateSuccess(createObservable, validator); + + assertThat(consoleWriter.toString()).contains(LogLevelTest.LOG_PATTERN_1); + assertThat(consoleWriter.toString()).contains(LogLevelTest.LOG_PATTERN_2); + assertThat(consoleWriter.toString()).contains(LogLevelTest.LOG_PATTERN_3); + } finally { + safeClose(clientWithRightProxy); + } + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() throws Exception { + safeClose(client); + httpProxyServer.shutDown(); + // wait for proxy server to be shutdown + TimeUnit.SECONDS.sleep(1); + + LogManager.resetConfiguration(); + PropertyConfigurator.configure(this.getClass().getClassLoader().getResource("log4j.properties")); + } + + @BeforeMethod(groups = { "simple"}) + public void beforeMethod() { + LogManager.resetConfiguration(); + PropertyConfigurator.configure(this.getClass().getClassLoader().getResource("log4j.properties")); + } + + @AfterMethod(groups = { "simple" }) + public void afterMethod(Method method) { + LogManager.resetConfiguration(); + PropertyConfigurator.configure(this.getClass().getClassLoader().getResource("log4j.properties")); + } + + private CosmosItemProperties getDocumentDefinition() { + String uuid = UUID.randomUUID().toString(); + CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + + "\"id\": \"%s\", " + + "\"mypk\": \"%s\", " + + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + + "}" + , uuid, uuid)); + return doc; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedCollectionsTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedCollectionsTest.java new file mode 100644 index 0000000000000..0cba3b0535bca --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedCollectionsTest.java @@ -0,0 +1,112 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosContainerProperties; +import com.azure.data.cosmos.CosmosContainerRequestOptions; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosDatabaseForTest; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.internal.FeedResponseListValidator; +import com.azure.data.cosmos.internal.FeedResponseValidator; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +public class ReadFeedCollectionsTest extends TestSuiteBase { + + protected static final int FEED_TIMEOUT = 60000; + protected static final int SETUP_TIMEOUT = 60000; + protected static final int SHUTDOWN_TIMEOUT = 20000; + + public final String databaseId = CosmosDatabaseForTest.generateId(); + + private CosmosDatabase createdDatabase; + private List createdCollections = new ArrayList<>(); + + private CosmosClient client; + + @Factory(dataProvider = "clientBuilders") + public ReadFeedCollectionsTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = FEED_TIMEOUT) + public void readCollections() throws Exception { + + FeedOptions options = new FeedOptions(); + options.maxItemCount(2); + + Flux> feedObservable = createdDatabase.readAllContainers(options); + + int expectedPageSize = (createdCollections.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(createdCollections.size()) + .exactlyContainsInAnyOrder(createdCollections.stream().map(d -> d.read().block().properties().resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + + validateQuerySuccess(feedObservable, validator, FEED_TIMEOUT); + + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdDatabase = createDatabase(client, databaseId); + + for(int i = 0; i < 3; i++) { + createdCollections.add(createCollections(createdDatabase)); + } + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteDatabase(createdDatabase); + safeClose(client); + } + + public CosmosContainer createCollections(CosmosDatabase database) { + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + CosmosContainerProperties collection = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); + return database.createContainer(collection, new CosmosContainerRequestOptions()).block().container(); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedDatabasesTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedDatabasesTest.java new file mode 100644 index 0000000000000..3aebc3b916975 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedDatabasesTest.java @@ -0,0 +1,103 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosDatabaseProperties; +import com.azure.data.cosmos.CosmosDatabaseRequestOptions; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.FeedResponseListValidator; +import com.azure.data.cosmos.internal.FeedResponseValidator; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +public class ReadFeedDatabasesTest extends TestSuiteBase { + + private List createdDatabases = new ArrayList<>(); + private List allDatabases = new ArrayList<>(); + + private CosmosClient client; + + @Factory(dataProvider = "clientBuilders") + public ReadFeedDatabasesTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = FEED_TIMEOUT) + public void readDatabases() throws Exception { + + FeedOptions options = new FeedOptions(); + options.maxItemCount(2); + + Flux> feedObservable = client.readAllDatabases(options); + + int expectedPageSize = (allDatabases.size() + options.maxItemCount() - 1) / options.maxItemCount(); + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(allDatabases.size()) + .exactlyContainsInAnyOrder(allDatabases.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + + validateQuerySuccess(feedObservable, validator, FEED_TIMEOUT); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() throws URISyntaxException { + client = clientBuilder().build(); + allDatabases = client.readAllDatabases(null) + .map(frp -> frp.results()) + .collectList() + .map(list -> list.stream().flatMap(x -> x.stream()).collect(Collectors.toList())) + .block(); + for(int i = 0; i < 5; i++) { + createdDatabases.add(createDatabase(client)); + } + allDatabases.addAll(createdDatabases); + } + + public CosmosDatabaseProperties createDatabase(CosmosClient client) { + CosmosDatabaseProperties db = new CosmosDatabaseProperties(UUID.randomUUID().toString()); + return client.createDatabase(db, new CosmosDatabaseRequestOptions()).block().properties(); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + for (int i = 0; i < 5; i ++) { + safeDeleteDatabase(client.getDatabase(createdDatabases.get(i).id())); + } + safeClose(client); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedDocumentsTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedDocumentsTest.java new file mode 100644 index 0000000000000..eee99f3003f1c --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedDocumentsTest.java @@ -0,0 +1,138 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.FailureValidator; +import com.azure.data.cosmos.internal.FeedResponseListValidator; +import com.azure.data.cosmos.internal.FeedResponseValidator; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +public class ReadFeedDocumentsTest extends TestSuiteBase { + + private CosmosDatabase createdDatabase; + private CosmosContainer createdCollection; + private List createdDocuments; + + private CosmosClient client; + + @Factory(dataProvider = "clientBuildersWithDirect") + public ReadFeedDocumentsTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = FEED_TIMEOUT) + public void readDocuments() { + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + options.maxItemCount(2); + + Flux> feedObservable = createdCollection.readAllItems(options); + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(createdDocuments.size()) + .numberOfPagesIsGreaterThanOrEqualTo(1) + .exactlyContainsInAnyOrder(createdDocuments.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0) + .pageSizeIsLessThanOrEqualTo(options.maxItemCount()) + .build()) + .build(); + validateQuerySuccess(feedObservable, validator, FEED_TIMEOUT); + } + + @Test(groups = { "simple" }, timeOut = FEED_TIMEOUT) + public void readDocuments_withoutEnableCrossPartitionQuery() { + FeedOptions options = new FeedOptions(); + options.maxItemCount(2); + + Flux> feedObservable = createdCollection.readAllItems(options); + FailureValidator validator = FailureValidator.builder().instanceOf(CosmosClientException.class) + .statusCode(400) + .errorMessageContains("Cross partition query is required but disabled." + + " Please set x-ms-documentdb-query-enablecrosspartition to true," + + " specify x-ms-documentdb-partitionkey," + + " or revise your query to avoid this exception.") + .build(); + validateQueryFailure(feedObservable, validator, FEED_TIMEOUT); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT, alwaysRun = true) + public void beforeClass() { + client = clientBuilder().build(); + createdCollection = getSharedMultiPartitionCosmosContainer(client); + truncateCollection(createdCollection); + + List docDefList = new ArrayList<>(); + + for(int i = 0; i < 100; i++) { + docDefList.add(getDocumentDefinition()); + } + + createdDocuments = bulkInsertBlocking(createdCollection, docDefList); + waitIfNeededForReplicasToCatchUp(clientBuilder()); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + private CosmosItemProperties getDocumentDefinition() { + String uuid = UUID.randomUUID().toString(); + CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + + "\"id\": \"%s\", " + + "\"mypk\": \"%s\", " + + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + + "}" + , uuid, uuid)); + return doc; + } + + public String getCollectionLink() { + return "dbs/" + getDatabaseId() + "/colls/" + getCollectionId(); + } + + private String getCollectionId() { + return createdCollection.id(); + } + + private String getDatabaseId() { + return createdDatabase.id(); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedExceptionHandlingTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedExceptionHandlingTest.java new file mode 100644 index 0000000000000..623bbd1090ffa --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedExceptionHandlingTest.java @@ -0,0 +1,85 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosDatabaseProperties; +import com.azure.data.cosmos.FeedResponse; +import io.reactivex.subscribers.TestSubscriber; +import org.mockito.Mockito; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ReadFeedExceptionHandlingTest extends TestSuiteBase { + + private CosmosClient client; + + @Factory(dataProvider = "clientBuildersWithDirect") + public ReadFeedExceptionHandlingTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void readFeedException() throws Exception { + + ArrayList dbs = new ArrayList(); + dbs.add(new CosmosDatabaseProperties("db1")); + dbs.add(new CosmosDatabaseProperties("db2")); + + ArrayList> frps = new ArrayList>(); + frps.add(BridgeInternal.createFeedResponse(dbs, null)); + frps.add(BridgeInternal.createFeedResponse(dbs, null)); + + Flux> response = Flux.merge(Flux.fromIterable(frps)) + .mergeWith(Flux.error(BridgeInternal.createCosmosClientException(0))) + .mergeWith(Flux.fromIterable(frps)); + + final CosmosClient mockClient = Mockito.spy(client); + Mockito.when(mockClient.readAllDatabases(null)).thenReturn(response); + TestSubscriber> subscriber = new TestSubscriber>(); + mockClient.readAllDatabases(null).subscribe(subscriber); + assertThat(subscriber.valueCount()).isEqualTo(2); + assertThat(subscriber.assertNotComplete()); + assertThat(subscriber.assertTerminated()); + assertThat(subscriber.errorCount()).isEqualTo(1); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(this.client); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedOffersTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedOffersTest.java new file mode 100644 index 0000000000000..69fd0cd1a5028 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedOffersTest.java @@ -0,0 +1,123 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.*; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.internal.TestSuiteBase; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +//TODO: change to use external TestSuiteBase +public class ReadFeedOffersTest extends TestSuiteBase { + + protected static final int FEED_TIMEOUT = 60000; + protected static final int SETUP_TIMEOUT = 60000; + protected static final int SHUTDOWN_TIMEOUT = 20000; + + public final String databaseId = DatabaseForTest.generateId(); + + private Database createdDatabase; + private List allOffers = new ArrayList<>(); + + private AsyncDocumentClient client; + + @Factory(dataProvider = "clientBuilders") + public ReadFeedOffersTest(AsyncDocumentClient.Builder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "emulator" }, timeOut = FEED_TIMEOUT) + public void readOffers() throws Exception { + + FeedOptions options = new FeedOptions(); + options.maxItemCount(2); + + Flux> feedObservable = client.readOffers(options); + + int expectedPageSize = (allOffers.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(allOffers.size()) + .exactlyContainsInAnyOrder(allOffers.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(feedObservable, validator, FEED_TIMEOUT); + } + + @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdDatabase = createDatabase(client, databaseId); + + for(int i = 0; i < 3; i++) { + createCollections(client); + } + + allOffers = client.readOffers(null) + .map(FeedResponse::results) + .collectList() + .map(list -> list.stream().flatMap(Collection::stream).collect(Collectors.toList())) + .single() + .block(); + } + + @AfterClass(groups = { "emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteDatabase(client, createdDatabase); + safeClose(client); + } + + public DocumentCollection createCollections(AsyncDocumentClient client) { + DocumentCollection collection = new DocumentCollection(); + collection.id(UUID.randomUUID().toString()); + + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + collection.setPartitionKey(partitionKeyDef); + + return client.createCollection(getDatabaseLink(), collection, null).single().block().getResource(); + } + + private String getDatabaseLink() { + return "dbs/" + createdDatabase.id(); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedPermissionsTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedPermissionsTest.java new file mode 100644 index 0000000000000..7581457b333eb --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedPermissionsTest.java @@ -0,0 +1,126 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.*; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.PermissionMode; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.internal.TestSuiteBase; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +//TODO: change to use external TestSuiteBase +public class ReadFeedPermissionsTest extends TestSuiteBase { + + public final String databaseId = DatabaseForTest.generateId(); + + private Database createdDatabase; + private User createdUser; + private List createdPermissions = new ArrayList<>(); + + private AsyncDocumentClient client; + + @Factory(dataProvider = "clientBuilders") + public ReadFeedPermissionsTest(AsyncDocumentClient.Builder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = FEED_TIMEOUT) + public void readPermissions() throws Exception { + + FeedOptions options = new FeedOptions(); + options.maxItemCount(2); + + Flux> feedObservable = client.readPermissions(getUserLink(), options); + + int expectedPageSize = (createdPermissions.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(createdPermissions.size()) + .numberOfPages(expectedPageSize) + .exactlyContainsInAnyOrder(createdPermissions.stream().map(Resource::resourceId).collect(Collectors.toList())) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(feedObservable, validator, FEED_TIMEOUT); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + Database d = new Database(); + d.id(databaseId); + createdDatabase = createDatabase(client, d); + createdUser = safeCreateUser(client, createdDatabase.id(), getUserDefinition()); + + for(int i = 0; i < 5; i++) { + createdPermissions.add(createPermissions(client, i)); + } + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteDatabase(client, databaseId); + safeClose(client); + } + + private static User getUserDefinition() { + User user = new User(); + user.id(UUID.randomUUID().toString()); + return user; + } + + public Permission createPermissions(AsyncDocumentClient client, int index) { + Permission permission = new Permission(); + permission.id(UUID.randomUUID().toString()); + permission.setPermissionMode(PermissionMode.READ); + permission.setResourceLink("dbs/AQAAAA==/colls/AQAAAJ0fgT" + Integer.toString(index) + "="); + return client.createPermission(getUserLink(), permission, null).single().block().getResource(); + } + + private String getUserLink() { + return "dbs/" + getDatabaseId() + "/users/" + getUserId(); + } + + private String getDatabaseId() { + return createdDatabase.id(); + } + + private String getUserId() { + return createdUser.id(); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedPkrTests.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedPkrTests.java new file mode 100644 index 0000000000000..0b5150755c89d --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedPkrTests.java @@ -0,0 +1,87 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.CosmosBridgeInternal; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosContainerRequestOptions; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.FeedResponseListValidator; +import com.azure.data.cosmos.internal.PartitionKeyRange; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + + +public class ReadFeedPkrTests extends TestSuiteBase { + + private CosmosDatabase createdDatabase; + private CosmosContainer createdCollection; + + private AsyncDocumentClient client; + + @Factory(dataProvider = "clientBuildersWithDirect") + public ReadFeedPkrTests(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "emulator" }, timeOut = FEED_TIMEOUT) + public void readPartitionKeyRanges() throws Exception { + + FeedOptions options = new FeedOptions(); + options.maxItemCount(2); + + Flux> feedObservable = client.readPartitionKeyRanges(getCollectionLink(), options); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(1) + .numberOfPages(1) + .build(); + validateQuerySuccess(feedObservable, validator, FEED_TIMEOUT); + } + + @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = CosmosBridgeInternal.getAsyncDocumentClient(clientBuilder().build()); + createdDatabase = getSharedCosmosDatabase(clientBuilder().build()); + createdCollection = createCollection(createdDatabase, + getCollectionDefinition(), + new CosmosContainerRequestOptions()); + } + + @AfterClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteCollection(createdCollection); + client.close(); + } + + private String getCollectionLink() { + return "dbs/" + createdDatabase.id() + "/colls/" + createdCollection.id(); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedStoredProceduresTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedStoredProceduresTest.java new file mode 100644 index 0000000000000..144e5c6e7c1c0 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedStoredProceduresTest.java @@ -0,0 +1,104 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosStoredProcedureRequestOptions; +import com.azure.data.cosmos.CosmosStoredProcedureProperties; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.FeedResponseListValidator; +import com.azure.data.cosmos.internal.FeedResponseValidator; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +public class ReadFeedStoredProceduresTest extends TestSuiteBase { + + private CosmosContainer createdCollection; + private List createdStoredProcedures = new ArrayList<>(); + + private CosmosClient client; + + @Factory(dataProvider = "clientBuildersWithDirect") + public ReadFeedStoredProceduresTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = FEED_TIMEOUT) + public void readStoredProcedures() throws Exception { + + FeedOptions options = new FeedOptions(); + options.maxItemCount(2); + + Flux> feedObservable = createdCollection.getScripts() + .readAllStoredProcedures(options); + + int expectedPageSize = (createdStoredProcedures.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(createdStoredProcedures.size()) + .exactlyContainsInAnyOrder( + createdStoredProcedures.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(feedObservable, validator, FEED_TIMEOUT); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdCollection = getSharedMultiPartitionCosmosContainer(client); + truncateCollection(createdCollection); + + for (int i = 0; i < 5; i++) { + createdStoredProcedures.add(createStoredProcedures(createdCollection)); + } + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + public CosmosStoredProcedureProperties createStoredProcedures(CosmosContainer cosmosContainer) { + CosmosStoredProcedureProperties sproc = new CosmosStoredProcedureProperties(); + sproc.id(UUID.randomUUID().toString()); + sproc.body("function() {var x = 10;}"); + return cosmosContainer.getScripts().createStoredProcedure(sproc, new CosmosStoredProcedureRequestOptions()) + .block().properties(); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedTriggersTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedTriggersTest.java new file mode 100644 index 0000000000000..18d106d58135c --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedTriggersTest.java @@ -0,0 +1,105 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosTriggerProperties; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.TriggerOperation; +import com.azure.data.cosmos.TriggerType; +import com.azure.data.cosmos.internal.FeedResponseListValidator; +import com.azure.data.cosmos.internal.FeedResponseValidator; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +public class ReadFeedTriggersTest extends TestSuiteBase { + + private CosmosContainer createdCollection; + private List createdTriggers = new ArrayList<>(); + + private CosmosClient client; + + @Factory(dataProvider = "clientBuildersWithDirect") + public ReadFeedTriggersTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = FEED_TIMEOUT) + public void readTriggers() throws Exception { + + FeedOptions options = new FeedOptions(); + options.maxItemCount(2); + + Flux> feedObservable = createdCollection.getScripts().readAllTriggers(options); + + int expectedPageSize = (createdTriggers.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(createdTriggers.size()) + .exactlyContainsInAnyOrder( + createdTriggers.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(feedObservable, validator, FEED_TIMEOUT); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdCollection = getSharedMultiPartitionCosmosContainer(client); + truncateCollection(createdCollection); + + for (int i = 0; i < 5; i++) { + this.createdTriggers.add(this.createTriggers(createdCollection)); + } + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + public CosmosTriggerProperties createTriggers(CosmosContainer cosmosContainer) { + CosmosTriggerProperties trigger = new CosmosTriggerProperties(); + trigger.id(UUID.randomUUID().toString()); + trigger.body("function() {var x = 10;}"); + trigger.triggerOperation(TriggerOperation.CREATE); + trigger.triggerType(TriggerType.PRE); + return cosmosContainer.getScripts().createTrigger(trigger).block().properties(); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedUdfsTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedUdfsTest.java new file mode 100644 index 0000000000000..687ab8268640e --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedUdfsTest.java @@ -0,0 +1,118 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosUserDefinedFunctionProperties; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.FeedResponseListValidator; +import com.azure.data.cosmos.internal.FeedResponseValidator; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +public class ReadFeedUdfsTest extends TestSuiteBase { + + private Database createdDatabase; + private CosmosContainer createdCollection; + private List createdUserDefinedFunctions = new ArrayList<>(); + + private CosmosClient client; + + @Factory(dataProvider = "clientBuildersWithDirect") + public ReadFeedUdfsTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = FEED_TIMEOUT) + public void readUserDefinedFunctions() throws Exception { + + FeedOptions options = new FeedOptions(); + options.maxItemCount(2); + + Flux> feedObservable = createdCollection.getScripts() + .readAllUserDefinedFunctions(options); + + int expectedPageSize = (createdUserDefinedFunctions.size() + options.maxItemCount() - 1) + / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(createdUserDefinedFunctions.size()) + .exactlyContainsInAnyOrder( + createdUserDefinedFunctions.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(feedObservable, validator, FEED_TIMEOUT); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdCollection = getSharedMultiPartitionCosmosContainer(client); + truncateCollection(createdCollection); + + for (int i = 0; i < 5; i++) { + createdUserDefinedFunctions.add(createUserDefinedFunctions(createdCollection)); + } + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + public CosmosUserDefinedFunctionProperties createUserDefinedFunctions(CosmosContainer cosmosContainer) { + CosmosUserDefinedFunctionProperties udf = new CosmosUserDefinedFunctionProperties(); + udf.id(UUID.randomUUID().toString()); + udf.body("function() {var x = 10;}"); + return cosmosContainer.getScripts().createUserDefinedFunction(udf).block() + .properties(); + } + + private String getCollectionLink() { + return "dbs/" + getDatabaseId() + "/colls/" + getCollectionId(); + } + + private String getCollectionId() { + return createdCollection.id(); + } + + private String getDatabaseId() { + return createdDatabase.id(); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedUsersTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedUsersTest.java new file mode 100644 index 0000000000000..47331a6725f96 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ReadFeedUsersTest.java @@ -0,0 +1,101 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosDatabaseForTest; +import com.azure.data.cosmos.CosmosUserProperties; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.FeedResponseListValidator; +import com.azure.data.cosmos.internal.FeedResponseValidator; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +public class ReadFeedUsersTest extends TestSuiteBase { + + public final String databaseId = CosmosDatabaseForTest.generateId(); + private CosmosDatabase createdDatabase; + + private CosmosClient client; + private List createdUsers = new ArrayList<>(); + + @Factory(dataProvider = "clientBuilders") + public ReadFeedUsersTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = FEED_TIMEOUT) + public void readUsers() throws Exception { + + FeedOptions options = new FeedOptions(); + options.maxItemCount(2); + + Flux> feedObservable = createdDatabase.readAllUsers(options); + + int expectedPageSize = (createdUsers.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(createdUsers.size()) + .exactlyContainsInAnyOrder(createdUsers.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(feedObservable, validator, FEED_TIMEOUT); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdDatabase = createDatabase(client, databaseId); + + for(int i = 0; i < 5; i++) { + createdUsers.add(createUsers(createdDatabase)); + } + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteDatabase(createdDatabase); + safeClose(client); + } + + public CosmosUserProperties createUsers(CosmosDatabase cosmosDatabase) { + CosmosUserProperties user = new CosmosUserProperties(); + user.id(UUID.randomUUID().toString()); + return cosmosDatabase.createUser(user).block().properties(); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ResourceTokenTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ResourceTokenTest.java new file mode 100644 index 0000000000000..388b03cd7ca3b --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/ResourceTokenTest.java @@ -0,0 +1,542 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.*; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.PermissionMode; +import com.azure.data.cosmos.internal.TestSuiteBase; +import org.apache.commons.lang3.StringUtils; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +/** + * This class try to test different scenario related to fetching various + * resources from resource token directly or via permission feed . + * + */ + +// TODO: change to use external TestSuiteBase +public class ResourceTokenTest extends TestSuiteBase { + public final String databaseId = DatabaseForTest.generateId(); + + private Database createdDatabase; + private DocumentCollection createdCollection; + private DocumentCollection createdCollectionWithPartitionKey; + private Document createdDocument; + private Document createdDocumentWithPartitionKey; + private Document createdDocumentWithPartitionKey2; + private User createdUser; + private Permission createdCollPermission; + private Permission createdCollPermissionWithName; + private Permission createdDocPermission; + private Permission createdDocPermissionWithName; + private Permission createdDocPermissionWithPartitionKey; + private Permission createdDocPermissionWithPartitionKeyWithName; + private Permission createdDocPermissionWithPartitionKey2; + private Permission createdDocPermissionWithPartitionKey2WithName; + private Permission createdColPermissionWithPartitionKey; + private Permission createdColPermissionWithPartitionKeyWithName; + private Permission createdColPermissionWithPartitionKey2; + private Permission createdColPermissionWithPartitionKey2WithName; + + private AsyncDocumentClient client; + + // ALL static string used in below test cases + private final static String DOCUMENT_DEFINITION = "{ 'id': 'doc%d', 'counter': '%d'}"; + private final static String DOCUMENT_DEFINITION_WITH_PERMISSION_KEY = "{ " + "\"id\": \"%s\", " + + "\"mypk\": \"%s\", " + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}"; + private final static String PARTITION_KEY_PATH_1 = "/mypk"; + private final static String PARTITION_KEY_PATH_2 = "/mypk2"; + + private static final String PARTITION_KEY_VALUE = "1"; + private static final String PARTITION_KEY_VALUE_2 = "2"; + private static final String PERMISSION_DEFINITION = "{" + " 'id': 'PermissionForDocWithPartitionKey'," + + " 'permissionMode': 'read'," + " 'resource': '%s'," + " 'resourcePartitionKey': ['%s']" + "}"; + private static final String COLLECTION_PERMISSION_DEFINITION = "{" + " 'id': 'PermissionForColWithPartitionKey'," + + " 'permissionMode': 'read'," + " 'resource': '%s'," + " 'resourcePartitionKey': ['%s']" + "}"; + private static final String USER_NAME = "TestUser"; + private static final String PERMISSION_FOR_COLL = "PermissionForColl"; + private static final String PERMISSION_FOR_COLL_WITH_NAME = "PermissionForCollWithName"; + private static final String PERMISSION_FOR_DOC = "PermissionForDoc"; + private static final String PERMISSION_FOR_DOC_WITH_NAME = "PermissionForDocWithName"; + + @Factory(dataProvider = "clientBuilders") + public ResourceTokenTest(AsyncDocumentClient.Builder clientBuilder) { + super(clientBuilder); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() throws Exception { + client = clientBuilder().build(); + Database d = new Database(); + d.id(databaseId); + createdDatabase = createDatabase(client, d); + // CREATE collection + createdCollection = createCollection(client, createdDatabase.id(), getCollectionDefinitionWithPartitionKey(PARTITION_KEY_PATH_2)); + // CREATE document + createdDocument = createDocument(client, createdDatabase.id(),createdCollection.id(), getDocument()); + // CREATE collection with partition key + createdCollectionWithPartitionKey = createCollection(client, createdDatabase.id(), getCollectionDefinitionWithPartitionKey(PARTITION_KEY_PATH_1)); + // CREATE document with partition key + createdDocumentWithPartitionKey = createDocument(client, createdDatabase.id(), createdCollectionWithPartitionKey.id(), + getDocumentDefinitionWithPartitionKey()); + // CREATE second document with partition key + createdDocumentWithPartitionKey2 = createDocument(client, createdDatabase.id(),createdCollectionWithPartitionKey.id(), + getDocumentDefinitionWithPartitionKey2()); + // CREATE user + createdUser = createUser(client, createdDatabase.id(), getUserDefinition()); + // CREATE permission for collection + createdCollPermission = client.createPermission(getUserLink(), getCollPermission(), null).single().block() + .getResource(); + createdCollPermissionWithName = client.createPermission(getUserLink(), getCollPermissionWithName(), null).single().block() + .getResource(); + // CREATE permission for document + createdDocPermission = client.createPermission(getUserLink(), getDocPermission(), null).single().block() + .getResource(); + createdDocPermissionWithName = client.createPermission(getUserLink(), getDocPermissionWithName(), null).single().block() + .getResource(); + // CREATE permission for document with partition key + createdDocPermissionWithPartitionKey = client + .createPermission(getUserLink(), getDocPermissionWithPartitionKey(), null).single().block() + .getResource(); + createdDocPermissionWithPartitionKeyWithName = client + .createPermission(getUserLink(), getDocPermissionWithPartitionKeyWithName(), null).single().block() + .getResource(); + // CREATE permission for document with partition key 2 + createdDocPermissionWithPartitionKey2 = client + .createPermission(getUserLink(), getDocPermissionWithPartitionKey2(), null).single().block() + .getResource(); + createdDocPermissionWithPartitionKey2WithName = client + .createPermission(getUserLink(), getDocPermissionWithPartitionKey2WithName(), null).single().block() + .getResource(); + // CREATE permission for collection with partition key + createdColPermissionWithPartitionKey = client + .createPermission(getUserLink(), getColPermissionWithPartitionKey(), null).single().block() + .getResource(); + createdColPermissionWithPartitionKeyWithName = client + .createPermission(getUserLink(), getColPermissionWithPartitionKeyWithName(), null).single().block() + .getResource(); + // CREATE permission for collection with partition key + createdColPermissionWithPartitionKey2 = client + .createPermission(getUserLink(), getColPermissionWithPartitionKey2(), null).single().block() + .getResource(); + createdColPermissionWithPartitionKey2WithName = client + .createPermission(getUserLink(), getColPermissionWithPartitionKey2WithName(), null).single().block() + .getResource(); + } + + @DataProvider(name = "collectionAndPermissionData") + public Object[][] collectionAndPermissionData() { + return new Object[][]{ + //This test will try to read collection from its own permission and validate it, both with request Id and name. + {createdCollection.selfLink(), createdCollPermission}, + {TestUtils.getCollectionNameLink(createdDatabase.id(), createdCollection.id()), createdDocPermissionWithName}, + }; + } + + @DataProvider(name = "documentAndPermissionData") + public Object[][] documentAndPermissionData() { + return new Object[][]{ + //These tests will try to read document from its own permission and validate it, both with request Id and name. + {createdDocument.selfLink(), createdDocPermission, createdDocument.id(), null}, + {TestUtils.getDocumentNameLink(createdDatabase.id(), createdCollection.id(), createdDocument.id()), createdDocPermissionWithName, createdDocument.id(), null}, + + //These tests will try to read document from its permission having partition key 1 and validate it, both with request Id and name. + {createdDocumentWithPartitionKey.selfLink(), createdDocPermissionWithPartitionKey, createdDocumentWithPartitionKey.id(), PARTITION_KEY_VALUE}, + {TestUtils.getDocumentNameLink(createdDatabase.id(), createdCollectionWithPartitionKey.id(), createdDocumentWithPartitionKey.id()), createdDocPermissionWithPartitionKeyWithName + , createdDocumentWithPartitionKey.id(), PARTITION_KEY_VALUE}, + + //These tests will try to read document from its permission having partition key 2 and validate it, both with request Id and name. + {createdDocumentWithPartitionKey2.selfLink(), createdDocPermissionWithPartitionKey2, createdDocumentWithPartitionKey2.id(), PARTITION_KEY_VALUE_2}, + {TestUtils.getDocumentNameLink(createdDatabase.id(), createdCollectionWithPartitionKey.id(), createdDocumentWithPartitionKey2.id()), + createdDocPermissionWithPartitionKey2WithName, createdDocumentWithPartitionKey2.id(), PARTITION_KEY_VALUE_2}, + + // These tests will try to read document from its parent collection permission and validate it, both with request Id and name. + {createdDocument.selfLink(), createdCollPermission, createdDocument.id(), null}, + {TestUtils.getDocumentNameLink(createdDatabase.id(), createdCollection.id(), createdDocument.id()), createdCollPermissionWithName, createdDocument.id(), null}, + + //This test will try to read document from collection permission having partition key 1 and validate it, both with request Id and name. + {createdDocumentWithPartitionKey.selfLink(), createdColPermissionWithPartitionKey, createdDocumentWithPartitionKey.id(), PARTITION_KEY_VALUE}, + {TestUtils.getDocumentNameLink(createdDatabase.id(), createdCollectionWithPartitionKey.id(), createdDocumentWithPartitionKey.id()), createdColPermissionWithPartitionKeyWithName, createdDocumentWithPartitionKey.id(), PARTITION_KEY_VALUE}, + + //This test will try to read document from collection permission having partition key 2 and validate it, both with request Id and name. + {createdDocumentWithPartitionKey2.selfLink(), createdColPermissionWithPartitionKey2, createdDocumentWithPartitionKey2.id(), PARTITION_KEY_VALUE_2}, + {TestUtils.getDocumentNameLink(createdDatabase.id(), createdCollectionWithPartitionKey.id(), createdDocumentWithPartitionKey2.id()), createdColPermissionWithPartitionKey2WithName, createdDocumentWithPartitionKey2.id(), PARTITION_KEY_VALUE_2} + + }; + } + + @DataProvider(name = "documentAndPermissionDataForResourceNotFound") + public Object[][] documentAndPermissionDataForResourceNotFound() { + return new Object[][]{ + //This test will try to read document from its resource token directly and validate it. + {createdDocumentWithPartitionKey2.selfLink(), createdColPermissionWithPartitionKey, PARTITION_KEY_VALUE}, + //This test will try to read document from its parent collection resource token directly and validate it. + {TestUtils.getDocumentNameLink(createdDatabase.id(), createdCollectionWithPartitionKey.id(), createdDocumentWithPartitionKey2.id()), + createdColPermissionWithPartitionKeyWithName, PARTITION_KEY_VALUE} + }; + } + + @DataProvider(name = "documentAndMultipleCollPermissionData") + public Object[][] documentAndMultipleCollPermissionData() { + return new Object[][]{ + //These tests will try to read document from partition 1 with two collection permissions having different partition keys and validate it, both with request Id and name. + {createdDocumentWithPartitionKey.selfLink(), createdColPermissionWithPartitionKey, createdColPermissionWithPartitionKey2, createdDocumentWithPartitionKey.id(), + PARTITION_KEY_VALUE}, + {TestUtils.getDocumentNameLink(createdDatabase.id(), createdCollectionWithPartitionKey.id(), createdDocumentWithPartitionKey.id()), createdColPermissionWithPartitionKeyWithName + , createdColPermissionWithPartitionKey2WithName, createdDocumentWithPartitionKey.id(), PARTITION_KEY_VALUE}, + + //These tests will try to read document from partition 1 with two collection permissions having different partition keys and validate it, both with request Id and name. + {createdDocumentWithPartitionKey2.selfLink(), createdColPermissionWithPartitionKey, createdColPermissionWithPartitionKey2, createdDocumentWithPartitionKey2.id(), + PARTITION_KEY_VALUE_2}, + {TestUtils.getDocumentNameLink(createdDatabase.id(), createdCollectionWithPartitionKey.id(), createdDocumentWithPartitionKey2.id()), createdColPermissionWithPartitionKeyWithName + , createdColPermissionWithPartitionKey2WithName, createdDocumentWithPartitionKey2.id(), PARTITION_KEY_VALUE_2} + }; + } + + @DataProvider(name = "resourceToken") + public Object[][] resourceToken() { + return new Object[][]{ + //This test will try to read document from its resource token directly and validate it. + {createdDocPermission.getToken()}, + //This test will try to read document from its parent collection resource token directly and validate it. + {createdCollPermission.getToken()} + }; + } + + /** + * This test will try to read collection from permission and validate it. + * + * @throws Exception + */ + @Test(groups = { "simple" }, dataProvider = "collectionAndPermissionData", timeOut = TIMEOUT) + public void readCollectionFromPermissionFeed(String collectionUrl, Permission permission) throws Exception { + AsyncDocumentClient asyncClientResourceToken = null ; + try { + List permissionFeed = new ArrayList<>(); + permissionFeed.add(permission); + asyncClientResourceToken = new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withPermissionFeed(permissionFeed).withConnectionPolicy(ConnectionPolicy.defaultPolicy()) + .withConsistencyLevel(ConsistencyLevel.SESSION).build(); + Flux> readObservable = asyncClientResourceToken + .readCollection(collectionUrl, null); + + ResourceResponseValidator validator = new ResourceResponseValidator.Builder() + .withId(createdCollection.id()).build(); + validateSuccess(readObservable, validator); + } finally { + safeClose(asyncClientResourceToken); + } + } + + /** + * This test will try to read document from permission and validate it. + * + * @throws Exception + */ + @Test(groups = { "simple" }, dataProvider = "documentAndPermissionData", timeOut = TIMEOUT) + public void readDocumentFromPermissionFeed(String documentUrl, Permission permission, String documentId, String partitionKey) throws Exception { + AsyncDocumentClient asyncClientResourceToken = null; + try { + List permissionFeed = new ArrayList<>(); + permissionFeed.add(permission); + asyncClientResourceToken = new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withPermissionFeed(permissionFeed).withConnectionPolicy(ConnectionPolicy.defaultPolicy()) + .withConsistencyLevel(ConsistencyLevel.SESSION).build(); + RequestOptions options = new RequestOptions(); + if (StringUtils.isNotEmpty(partitionKey)) { + options.setPartitionKey(new PartitionKey((String)partitionKey)); + } else { + options.setPartitionKey(PartitionKey.None); + } + Flux> readObservable = asyncClientResourceToken + .readDocument(documentUrl, options); + ResourceResponseValidator validator = new ResourceResponseValidator.Builder() + .withId(documentId).build(); + validateSuccess(readObservable, validator); + } finally { + safeClose(asyncClientResourceToken); + } + } + + /** + * This test will try to read document from resource token directly and validate it. + * + * @throws Exception + */ + @Test(groups = { "simple" }, dataProvider = "resourceToken", timeOut = TIMEOUT) + public void readDocumentFromResouceToken(String resourceToken) throws Exception { + AsyncDocumentClient asyncClientResourceToken = null; + try { + asyncClientResourceToken = new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withMasterKeyOrResourceToken(resourceToken) + .withConnectionPolicy(ConnectionPolicy.defaultPolicy()).withConsistencyLevel(ConsistencyLevel.SESSION) + .build(); + RequestOptions options = new RequestOptions(); + options.setPartitionKey(PartitionKey.None); + Flux> readObservable = asyncClientResourceToken + .readDocument(createdDocument.selfLink(), options); + ResourceResponseValidator validator = new ResourceResponseValidator.Builder() + .withId(createdDocument.id()).build(); + validateSuccess(readObservable, validator); + } finally { + safeClose(asyncClientResourceToken); + } + } + + /** + * This test will try to read document from multiple collection permissions having different keys and validate it. + * + * @throws Exception + */ + @Test(groups = {"simple"}, dataProvider = "documentAndMultipleCollPermissionData", timeOut = TIMEOUT) + public void readDocumentOfParKeyFromTwoCollPermissionWithDiffPartitionKeys(String documentUrl, Permission collPermission1, Permission collPermission2, String documentId, String partitionKey) throws Exception { + AsyncDocumentClient asyncClientResourceToken = null; + try { + List permissionFeed = new ArrayList<>(); + permissionFeed.add(collPermission1); + permissionFeed.add(collPermission2); + asyncClientResourceToken = new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withPermissionFeed(permissionFeed).withConnectionPolicy(ConnectionPolicy.defaultPolicy()) + .withConsistencyLevel(ConsistencyLevel.SESSION).build(); + RequestOptions options = new RequestOptions(); + options.setPartitionKey(new PartitionKey(partitionKey)); + Flux> readObservable = asyncClientResourceToken + .readDocument(documentUrl, options); + ResourceResponseValidator validator = new ResourceResponseValidator.Builder() + .withId(documentId).build(); + validateSuccess(readObservable, validator); + } finally { + safeClose(asyncClientResourceToken); + } + } + + /** + * This test will try to read document with wrong collection permission hence + * expecting resource not found failure. + * + * @throws Exception + */ + @Test(groups = { "simple" },dataProvider = "documentAndPermissionDataForResourceNotFound", timeOut = TIMEOUT) + public void readDocumentFromCollPermissionWithDiffPartitionKey_ResourceNotFound(String documentUrl, Permission permission, String partitionKey) throws Exception { + AsyncDocumentClient asyncClientResourceToken = null; + try { + List permissionFeed = new ArrayList<>(); + permissionFeed.add(permission); + asyncClientResourceToken = new AsyncDocumentClient.Builder().withServiceEndpoint(TestConfigurations.HOST) + .withPermissionFeed(permissionFeed).withConnectionPolicy(ConnectionPolicy.defaultPolicy()) + .withConsistencyLevel(ConsistencyLevel.SESSION).build(); + RequestOptions options = new RequestOptions(); + options.setPartitionKey(new PartitionKey(partitionKey)); + Flux> readObservable = asyncClientResourceToken + .readDocument(documentUrl, options); + FailureValidator validator = new FailureValidator.Builder().resourceNotFound().build(); + validateFailure(readObservable, validator); + } finally { + safeClose(asyncClientResourceToken); + } + } + + /** + * This test will try to read document with collection permissions and passing wrong partitionkey + * in request options hence expecting exception. + * + * @throws Exception + */ + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void readDocumentFromCollPermissionWithDiffPartitionKey_WithException() throws Exception { + AsyncDocumentClient asyncClientResourceToken = null; + try { + List permissionFeed = new ArrayList<>(); + permissionFeed.add(createdColPermissionWithPartitionKey); + asyncClientResourceToken = new AsyncDocumentClient.Builder() + .withServiceEndpoint(TestConfigurations.HOST) + .withConnectionPolicy(ConnectionPolicy.defaultPolicy()) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .withPermissionFeed(permissionFeed) + .build(); + RequestOptions options = new RequestOptions(); + options.setPartitionKey(new PartitionKey(PARTITION_KEY_VALUE_2)); + Flux> readObservable = asyncClientResourceToken + .readDocument(createdDocumentWithPartitionKey.selfLink(), options); + FailureValidator validator = new FailureValidator.Builder().resourceTokenNotFound().build(); + validateFailure(readObservable, validator); + } finally { + safeClose(asyncClientResourceToken); + } + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteDatabase(client, databaseId); + safeClose(client); + } + + private static User getUserDefinition() { + User user = new User(); + user.id(USER_NAME); + return user; + } + + private static Document getDocument() { + Document doc = new Document(String.format(DOCUMENT_DEFINITION, 1, 1)); + return doc; + } + + private Permission getCollPermission() { + Permission permission = new Permission(); + permission.id(PERMISSION_FOR_COLL); + permission.setPermissionMode(PermissionMode.READ); + permission.setResourceLink(createdCollection.selfLink()); + return permission; + } + + private Permission getCollPermissionWithName() { + Permission permission = new Permission(); + permission.id(PERMISSION_FOR_COLL_WITH_NAME); + permission.setPermissionMode(PermissionMode.READ); + permission.setResourceLink(TestUtils.getCollectionNameLink(createdDatabase.id(), createdCollection.id())); + return permission; + } + + private Permission getDocPermission() { + Permission permission = new Permission(); + permission.id(PERMISSION_FOR_DOC); + permission.setPermissionMode(PermissionMode.READ); + permission.setResourceLink(createdDocument.selfLink()); + return permission; + } + private Permission getDocPermissionWithName() { + Permission permission = new Permission(); + permission.id(PERMISSION_FOR_DOC_WITH_NAME); + permission.setPermissionMode(PermissionMode.READ); + permission.setResourceLink(TestUtils.getDocumentNameLink(createdDatabase.id(),createdCollection.id(),createdDocument.id())); + return permission; + } + + private Permission getDocPermissionWithPartitionKey() { + String permissionStr = String.format(PERMISSION_DEFINITION, createdDocumentWithPartitionKey.selfLink(), + PARTITION_KEY_VALUE); + Permission permission = new Permission(permissionStr); + return permission; + } + + private Permission getDocPermissionWithPartitionKeyWithName() { + String permissionStr = String.format(PERMISSION_DEFINITION, TestUtils.getDocumentNameLink(createdDatabase.id(), createdCollectionWithPartitionKey.id(), createdDocumentWithPartitionKey.id()), + PARTITION_KEY_VALUE); + Permission permission = new Permission(permissionStr); + permission.id("PermissionForDocWithPartitionKeyWithName"); + return permission; + } + + private Permission getDocPermissionWithPartitionKey2() { + String permissionStr = String.format(PERMISSION_DEFINITION, createdDocumentWithPartitionKey2.selfLink(), + PARTITION_KEY_VALUE_2); + Permission permission = new Permission(permissionStr); + permission.id("PermissionForDocWithPartitionKey2"); + return permission; + } + + private Permission getDocPermissionWithPartitionKey2WithName() { + String permissionStr = String.format(PERMISSION_DEFINITION, TestUtils.getDocumentNameLink(createdDatabase.id(),createdCollectionWithPartitionKey.id(),createdDocumentWithPartitionKey2.id()), + PARTITION_KEY_VALUE_2); + Permission permission = new Permission(permissionStr); + permission.id("PermissionForDocWithPartitionKey2WithName"); + return permission; + } + + private Permission getColPermissionWithPartitionKey() { + String permissionStr = String.format(COLLECTION_PERMISSION_DEFINITION, createdCollectionWithPartitionKey.selfLink(), + PARTITION_KEY_VALUE); + Permission permission = new Permission(permissionStr); + return permission; + } + + private Permission getColPermissionWithPartitionKeyWithName() { + String permissionStr = String.format(COLLECTION_PERMISSION_DEFINITION, TestUtils.getCollectionNameLink(createdDatabase.id(), createdCollectionWithPartitionKey.id()), + PARTITION_KEY_VALUE); + Permission permission = new Permission(permissionStr); + permission.id("PermissionForColWithPartitionKeyWithName"); + return permission; + } + + private Permission getColPermissionWithPartitionKey2() { + String permissionStr = String.format(COLLECTION_PERMISSION_DEFINITION, createdCollectionWithPartitionKey.selfLink(), + PARTITION_KEY_VALUE_2); + Permission permission = new Permission(permissionStr); + permission.id("PermissionForColWithPartitionKey2"); + return permission; + } + + private Permission getColPermissionWithPartitionKey2WithName() { + String permissionStr = String.format(COLLECTION_PERMISSION_DEFINITION, TestUtils.getCollectionNameLink(createdDatabase.id(), createdCollectionWithPartitionKey.id()), + PARTITION_KEY_VALUE_2); + Permission permission = new Permission(permissionStr); + permission.id("PermissionForColWithPartitionKey2WithName"); + return permission; + } + + private String getUserLink() { + return createdUser.selfLink(); + } + + private Document getDocumentDefinitionWithPartitionKey() { + String uuid = UUID.randomUUID().toString(); + Document doc = new Document(String.format(DOCUMENT_DEFINITION_WITH_PERMISSION_KEY, uuid, PARTITION_KEY_VALUE)); + return doc; + } + private Document getDocumentDefinitionWithPartitionKey2() { + String uuid = UUID.randomUUID().toString(); + Document doc = new Document(String.format(DOCUMENT_DEFINITION_WITH_PERMISSION_KEY, uuid, PARTITION_KEY_VALUE_2)); + return doc; + } + + private DocumentCollection getCollectionDefinitionWithPartitionKey(String pkDefPath) { + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add(pkDefPath); + partitionKeyDef.paths(paths); + + DocumentCollection collectionDefinition = new DocumentCollection(); + collectionDefinition.id(UUID.randomUUID().toString()); + collectionDefinition.setPartitionKey(partitionKeyDef); + + return collectionDefinition; + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/SimpleSerializationTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/SimpleSerializationTest.java new file mode 100644 index 0000000000000..613568e747a86 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/SimpleSerializationTest.java @@ -0,0 +1,101 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.JsonMappingException; +import com.fasterxml.jackson.databind.JsonSerializer; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import org.apache.commons.lang3.NotImplementedException; +import org.testng.Assert; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; + +import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; + +public class SimpleSerializationTest extends TestSuiteBase { + + private CosmosContainer createdCollection; + private CosmosClient client; + + private static class TestObject { + public static class BadSerializer extends JsonSerializer { + @Override + public void serialize(String value, JsonGenerator gen, SerializerProvider serializers) { + throw new NotImplementedException("bad"); + } + } + + @JsonProperty("mypk") + private String mypk; + + @JsonProperty("id") + private String id; + + @JsonProperty("prop") + @JsonSerialize(using = BadSerializer.class) + private String prop; + } + + @Factory(dataProvider = "clientBuildersWithDirect") + public SimpleSerializationTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = {"simple"}, timeOut = TIMEOUT) + public void createDocument() throws InterruptedException { + TestObject testObject = new TestObject(); + testObject.id = UUID.randomUUID().toString(); + testObject.mypk = UUID.randomUUID().toString(); + testObject.prop = UUID.randomUUID().toString(); + + try { + createdCollection.createItem(testObject); + Assert.fail(); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage()).contains("Can't serialize the object into the json string"); + assertThat(e.getCause()).isInstanceOf(JsonMappingException.class); + assertThat(e.getCause().getMessage()).contains("bad"); + } + } + + @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdCollection = getSharedMultiPartitionCosmosContainer(client); + } + + @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/SinglePartitionDocumentQueryTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/SinglePartitionDocumentQueryTest.java new file mode 100644 index 0000000000000..56e4cbe282812 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/SinglePartitionDocumentQueryTest.java @@ -0,0 +1,314 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.CosmosItemRequestOptions; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.SqlParameter; +import com.azure.data.cosmos.SqlParameterList; +import com.azure.data.cosmos.SqlQuerySpec; +import com.azure.data.cosmos.internal.FailureValidator; +import com.azure.data.cosmos.internal.FeedResponseListValidator; +import com.azure.data.cosmos.internal.FeedResponseValidator; +import com.azure.data.cosmos.internal.TestUtils; +import io.reactivex.subscribers.TestSubscriber; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; + +public class SinglePartitionDocumentQueryTest extends TestSuiteBase { + + private Database createdDatabase; + private CosmosContainer createdCollection; + private List createdDocuments = new ArrayList<>(); + + private CosmosClient client; + + public String getCollectionLink() { + return TestUtils.getCollectionNameLink(createdDatabase.id(), createdCollection.id()); + } + + @Factory(dataProvider = "clientBuildersWithDirect") + public SinglePartitionDocumentQueryTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") + public void queryDocuments(boolean queryMetricsEnabled) throws Exception { + + String query = "SELECT * from c where c.prop = 99"; + + FeedOptions options = new FeedOptions(); + options.maxItemCount(5); + options.enableCrossPartitionQuery(true); + options.populateQueryMetrics(queryMetricsEnabled); + Flux> queryObservable = createdCollection.queryItems(query, options); + + List expectedDocs = createdDocuments.stream().filter(d -> 99 == d.getInt("prop") ).collect(Collectors.toList()); + assertThat(expectedDocs).isNotEmpty(); + + int expectedPageSize = (expectedDocs.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(expectedDocs.size()) + .exactlyContainsInAnyOrder(expectedDocs.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .hasValidQueryMetrics(queryMetricsEnabled) + .build(); + + validateQuerySuccess(queryObservable, validator, 10000); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryDocuments_ParameterizedQueryWithInClause() throws Exception { + String query = "SELECT * from c where c.prop IN (@param1, @param2)"; + SqlParameterList params = new SqlParameterList(new SqlParameter("@param1", 3), new SqlParameter("@param2", 4)); + SqlQuerySpec sqs = new SqlQuerySpec(query, params); + + FeedOptions options = new FeedOptions(); + options.maxItemCount(5); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdCollection.queryItems(sqs, options); + + List expectedDocs = createdDocuments.stream().filter(d -> (3 == d.getInt("prop") || 4 == d.getInt("prop"))).collect(Collectors.toList()); + assertThat(expectedDocs).isNotEmpty(); + + int expectedPageSize = (expectedDocs.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(expectedDocs.size()) + .exactlyContainsInAnyOrder(expectedDocs.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + + validateQuerySuccess(queryObservable, validator, 10000); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryDocuments_ParameterizedQuery() throws Exception { + String query = "SELECT * from c where c.prop = @param"; + SqlParameterList params = new SqlParameterList(new SqlParameter("@param", 3)); + SqlQuerySpec sqs = new SqlQuerySpec(query, params); + + FeedOptions options = new FeedOptions(); + options.maxItemCount(5); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdCollection.queryItems(sqs, options); + + List expectedDocs = createdDocuments.stream().filter(d -> 3 == d.getInt("prop")).collect(Collectors.toList()); + assertThat(expectedDocs).isNotEmpty(); + + int expectedPageSize = (expectedDocs.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(expectedDocs.size()) + .exactlyContainsInAnyOrder(expectedDocs.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + + validateQuerySuccess(queryObservable, validator, 10000); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryDocuments_NoResults() throws Exception { + + String query = "SELECT * from root r where r.id = '2'"; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdCollection.queryItems(query, options); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .containsExactly(new ArrayList<>()) + .numberOfPages(1) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(queryObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryDocumentsWithPageSize() throws Exception { + + String query = "SELECT * from root"; + FeedOptions options = new FeedOptions(); + options.maxItemCount(3); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdCollection.queryItems(query, options); + + List expectedDocs = createdDocuments; + int expectedPageSize = (expectedDocs.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator + .Builder() + .exactlyContainsInAnyOrder(createdDocuments + .stream() + .map(d -> d.resourceId()) + .collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + + validateQuerySuccess(queryObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryOrderBy() throws Exception { + + String query = "SELECT * FROM r ORDER BY r.prop ASC"; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + options.maxItemCount(3); + Flux> queryObservable = createdCollection.queryItems(query, options); + + List expectedDocs = createdDocuments; + int expectedPageSize = (expectedDocs.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .containsExactly(createdDocuments.stream() + .sorted((e1, e2) -> Integer.compare(e1.getInt("prop"), e2.getInt("prop"))) + .map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + + validateQuerySuccess(queryObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT * 1000) + public void continuationToken() throws Exception { + String query = "SELECT * FROM r ORDER BY r.prop ASC"; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + options.maxItemCount(3); + Flux> queryObservable = createdCollection.queryItems(query, options); + + TestSubscriber> subscriber = new TestSubscriber<>(); + queryObservable.take(1).subscribe(subscriber); + + subscriber.awaitTerminalEvent(); + subscriber.assertComplete(); + subscriber.assertNoErrors(); + assertThat(subscriber.valueCount()).isEqualTo(1); + FeedResponse page = ((FeedResponse) subscriber.getEvents().get(0).get(0)); + assertThat(page.results()).hasSize(3); + + assertThat(page.continuationToken()).isNotEmpty(); + + + options.requestContinuation(page.continuationToken()); + queryObservable = createdCollection.queryItems(query, options); + + List expectedDocs = createdDocuments.stream().filter(d -> (d.getInt("prop") > 2)).collect(Collectors.toList()); + int expectedPageSize = (expectedDocs.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + assertThat(expectedDocs).hasSize(createdDocuments.size() -3); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .containsExactly(expectedDocs.stream() + .sorted((e1, e2) -> Integer.compare(e1.getInt("prop"), e2.getInt("prop"))) + .map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(queryObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void invalidQuerySytax() throws Exception { + String query = "I am an invalid query"; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdCollection.queryItems(query, options); + + FailureValidator validator = new FailureValidator.Builder() + .instanceOf(CosmosClientException.class) + .statusCode(400) + .notNullActivityId() + .build(); + validateQueryFailure(queryObservable, validator); + } + + public CosmosItemProperties createDocument(CosmosContainer cosmosContainer, int cnt) { + CosmosItemProperties docDefinition = getDocumentDefinition(cnt); + return cosmosContainer.createItem(docDefinition, new CosmosItemRequestOptions()).block().properties(); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() throws Exception { + client = clientBuilder().build(); + createdCollection = getSharedSinglePartitionCosmosContainer(client); + truncateCollection(createdCollection); + + for(int i = 0; i < 5; i++) { + createdDocuments.add(createDocument(createdCollection, i)); + } + + for(int i = 0; i < 8; i++) { + createdDocuments.add(createDocument(createdCollection, 99)); + } + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + private static CosmosItemProperties getDocumentDefinition(int cnt) { + String uuid = UUID.randomUUID().toString(); + CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + + "\"id\": \"%s\", " + + "\"prop\" : %d, " + + "\"mypk\": \"%s\", " + + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + + "}" + , uuid, cnt, uuid)); + return doc; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/SinglePartitionReadFeedDocumentsTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/SinglePartitionReadFeedDocumentsTest.java new file mode 100644 index 0000000000000..af1a5ab29b591 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/SinglePartitionReadFeedDocumentsTest.java @@ -0,0 +1,105 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.FeedResponseListValidator; +import com.azure.data.cosmos.internal.FeedResponseValidator; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +public class SinglePartitionReadFeedDocumentsTest extends TestSuiteBase { + + private CosmosContainer createdCollection; + private List createdDocuments; + + private CosmosClient client; + + @Factory(dataProvider = "clientBuildersWithDirect") + public SinglePartitionReadFeedDocumentsTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = FEED_TIMEOUT) + public void readDocuments() { + final FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + options.maxItemCount(2); + final Flux> feedObservable = createdCollection.readAllItems(options); + final int expectedPageSize = (createdDocuments.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(createdDocuments.size()) + .numberOfPages(expectedPageSize) + .exactlyContainsInAnyOrder(createdDocuments.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(feedObservable, validator, FEED_TIMEOUT); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdCollection = getSharedSinglePartitionCosmosContainer(client); + truncateCollection(createdCollection); + + List docDefList = new ArrayList<>(); + + for(int i = 0; i < 5; i++) { + docDefList.add(getDocumentDefinition()); + } + + createdDocuments = bulkInsertBlocking(createdCollection, docDefList); + waitIfNeededForReplicasToCatchUp(clientBuilder()); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + private CosmosItemProperties getDocumentDefinition() { + String uuid = UUID.randomUUID().toString(); + CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + + "\"id\": \"%s\", " + + "\"mypk\": \"%s\", " + + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + + "}" + , uuid, uuid)); + return doc; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/StoredProcedureCrudTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/StoredProcedureCrudTest.java new file mode 100644 index 0000000000000..6f2662e89ff7e --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/StoredProcedureCrudTest.java @@ -0,0 +1,128 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosResponse; +import com.azure.data.cosmos.CosmosResponseValidator; +import com.azure.data.cosmos.CosmosStoredProcedure; +import com.azure.data.cosmos.CosmosStoredProcedureRequestOptions; +import com.azure.data.cosmos.CosmosStoredProcedureResponse; +import com.azure.data.cosmos.CosmosStoredProcedureProperties; +import com.azure.data.cosmos.internal.FailureValidator; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; + +public class StoredProcedureCrudTest extends TestSuiteBase { + + private CosmosClient client; + private CosmosContainer container; + + @Factory(dataProvider = "clientBuildersWithDirect") + public StoredProcedureCrudTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void createStoredProcedure() throws Exception { + + CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(); + storedProcedureDef.id(UUID.randomUUID().toString()); + storedProcedureDef.body("function() {var x = 10;}"); + + Mono createObservable = container.getScripts().createStoredProcedure(storedProcedureDef, new CosmosStoredProcedureRequestOptions()); + + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(storedProcedureDef.id()) + .withStoredProcedureBody("function() {var x = 10;}") + .notNullEtag() + .build(); + + validateSuccess(createObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void readStoredProcedure() throws Exception { + + CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(); + storedProcedureDef.id(UUID.randomUUID().toString()); + storedProcedureDef.body("function() {var x = 10;}"); + CosmosStoredProcedure storedProcedure = container.getScripts().createStoredProcedure(storedProcedureDef, new CosmosStoredProcedureRequestOptions()).block().storedProcedure(); + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + Mono readObservable = storedProcedure.read(null); + + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(storedProcedureDef.id()) + .withStoredProcedureBody("function() {var x = 10;}") + .notNullEtag() + .build(); + + validateSuccess(readObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void deleteStoredProcedure() throws Exception { + + CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(); + storedProcedureDef.id(UUID.randomUUID().toString()); + storedProcedureDef.body("function() {var x = 10;}"); + + CosmosStoredProcedure storedProcedure = this.container.getScripts().createStoredProcedure(storedProcedureDef, new CosmosStoredProcedureRequestOptions()).block().storedProcedure(); + Mono deleteObservable = storedProcedure.delete(new CosmosStoredProcedureRequestOptions()); + + CosmosResponseValidator validator = new CosmosResponseValidator.Builder<>() + .nullResource() + .build(); + + validateSuccess(deleteObservable, validator); + + waitIfNeededForReplicasToCatchUp(this.clientBuilder()); + + Mono readObservable = storedProcedure.read(null); + FailureValidator notFoundValidator = new FailureValidator.Builder().resourceNotFound().build(); + validateFailure(readObservable, notFoundValidator); + } + + @BeforeClass(groups = { "simple" }, timeOut = 10_000 * SETUP_TIMEOUT) + public void beforeClass() { + assertThat(this.client).isNull(); + this.client = clientBuilder().build(); + this.container = getSharedMultiPartitionCosmosContainer(this.client); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + assertThat(this.client).isNotNull(); + this.client.close(); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/StoredProcedureQueryTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/StoredProcedureQueryTest.java new file mode 100644 index 0000000000000..7ac227a1b8a61 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/StoredProcedureQueryTest.java @@ -0,0 +1,171 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosStoredProcedureProperties; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.FailureValidator; +import com.azure.data.cosmos.internal.FeedResponseListValidator; +import com.azure.data.cosmos.internal.FeedResponseValidator; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; + +public class StoredProcedureQueryTest extends TestSuiteBase { + + private CosmosContainer createdCollection; + private List createdStoredProcs = new ArrayList<>(); + + private CosmosClient client; + + @Factory(dataProvider = "clientBuildersWithDirect") + public StoredProcedureQueryTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryWithFilter() throws Exception { + + String filterId = createdStoredProcs.get(0).id(); + String query = String.format("SELECT * from c where c.id = '%s'", filterId); + + FeedOptions options = new FeedOptions(); + options.maxItemCount(5); + Flux> queryObservable = createdCollection.getScripts() + .queryStoredProcedures(query, options); + + List expectedDocs = createdStoredProcs.stream() + .filter(sp -> filterId.equals(sp.id())).collect(Collectors.toList()); + assertThat(expectedDocs).isNotEmpty(); + + int expectedPageSize = (expectedDocs.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(expectedDocs.size()) + .exactlyContainsInAnyOrder(expectedDocs.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + + validateQuerySuccess(queryObservable, validator, 10000); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void query_NoResults() throws Exception { + + String query = "SELECT * from root r where r.id = '2'"; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdCollection.getScripts() + .queryStoredProcedures(query, options); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .containsExactly(new ArrayList<>()).numberOfPages(1) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(queryObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryAll() throws Exception { + + String query = "SELECT * from root"; + FeedOptions options = new FeedOptions(); + options.maxItemCount(3); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdCollection.getScripts() + .queryStoredProcedures(query, options); + + List expectedDocs = createdStoredProcs; + + int expectedPageSize = (expectedDocs.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .exactlyContainsInAnyOrder(expectedDocs.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + + validateQuerySuccess(queryObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void invalidQuerySytax() throws Exception { + String query = "I am an invalid query"; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdCollection.getScripts() + .queryStoredProcedures(query, options); + + FailureValidator validator = new FailureValidator.Builder().instanceOf(CosmosClientException.class) + .statusCode(400).notNullActivityId().build(); + validateQueryFailure(queryObservable, validator); + } + + public CosmosStoredProcedureProperties createStoredProc(CosmosContainer cosmosContainer) { + CosmosStoredProcedureProperties storedProcedure = getStoredProcedureDef(); + return cosmosContainer.getScripts().createStoredProcedure(storedProcedure).block().properties(); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() throws Exception { + client = clientBuilder().build(); + createdCollection = getSharedMultiPartitionCosmosContainer(client); + truncateCollection(createdCollection); + + for (int i = 0; i < 5; i++) { + createdStoredProcs.add(createStoredProc(createdCollection)); + } + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + private static CosmosStoredProcedureProperties getStoredProcedureDef() { + CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(); + storedProcedureDef.id(UUID.randomUUID().toString()); + storedProcedureDef.body("function() {var x = 10;}"); + return storedProcedureDef; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/StoredProcedureUpsertReplaceTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/StoredProcedureUpsertReplaceTest.java new file mode 100644 index 0000000000000..b4370517e7074 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/StoredProcedureUpsertReplaceTest.java @@ -0,0 +1,122 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosResponseValidator; +import com.azure.data.cosmos.CosmosStoredProcedure; +import com.azure.data.cosmos.CosmosStoredProcedureProperties; +import com.azure.data.cosmos.CosmosStoredProcedureRequestOptions; +import com.azure.data.cosmos.CosmosStoredProcedureResponse; +import com.azure.data.cosmos.PartitionKey; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; + +public class StoredProcedureUpsertReplaceTest extends TestSuiteBase { + + private CosmosContainer createdCollection; + + private CosmosClient client; + + @Factory(dataProvider = "clientBuildersWithDirect") + public StoredProcedureUpsertReplaceTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void replaceStoredProcedure() throws Exception { + + // create a stored procedure + CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(); + storedProcedureDef.id(UUID.randomUUID().toString()); + storedProcedureDef.body("function() {var x = 10;}"); + CosmosStoredProcedureProperties readBackSp = createdCollection.getScripts() + .createStoredProcedure(storedProcedureDef, new CosmosStoredProcedureRequestOptions()).block() + .properties(); + + // read stored procedure to validate creation + waitIfNeededForReplicasToCatchUp(clientBuilder()); + Mono readObservable = createdCollection.getScripts() + .getStoredProcedure(readBackSp.id()).read(null); + + // validate stored procedure creation + CosmosResponseValidator validatorForRead = new CosmosResponseValidator.Builder() + .withId(readBackSp.id()).withStoredProcedureBody("function() {var x = 10;}").notNullEtag().build(); + validateSuccess(readObservable, validatorForRead); + + // update stored procedure + readBackSp.body("function() {var x = 11;}"); + + Mono replaceObservable = createdCollection.getScripts() + .getStoredProcedure(readBackSp.id()).replace(readBackSp); + + // validate stored procedure replace + CosmosResponseValidator validatorForReplace = new CosmosResponseValidator.Builder() + .withId(readBackSp.id()).withStoredProcedureBody("function() {var x = 11;}").notNullEtag().build(); + validateSuccess(replaceObservable, validatorForReplace); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void executeStoredProcedure() throws Exception { + // create a stored procedure + CosmosStoredProcedureProperties storedProcedureDef = BridgeInternal + .createCosmosStoredProcedureProperties("{" + " 'id': '" + UUID.randomUUID().toString() + "'," + + " 'body':" + " 'function () {" + " for (var i = 0; i < 10; i++) {" + + " getContext().getResponse().appendValue(\"Body\", i);" + " }" + " }'" + "}"); + + CosmosStoredProcedure storedProcedure = null; + + storedProcedure = createdCollection.getScripts() + .createStoredProcedure(storedProcedureDef, new CosmosStoredProcedureRequestOptions()).block() + .storedProcedure(); + + String result = null; + + CosmosStoredProcedureRequestOptions options = new CosmosStoredProcedureRequestOptions(); + options.partitionKey(PartitionKey.None); + result = storedProcedure.execute(null, options).block().responseAsString(); + + assertThat(result).isEqualTo("\"0123456789\""); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdCollection = getSharedMultiPartitionCosmosContainer(client); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/TestSuiteBase.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/TestSuiteBase.java new file mode 100644 index 0000000000000..2495347ac1169 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/TestSuiteBase.java @@ -0,0 +1,1004 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CompositePath; +import com.azure.data.cosmos.CompositePathSortOrder; +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosBridgeInternal; +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosClientTest; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosContainerRequestOptions; +import com.azure.data.cosmos.CosmosContainerProperties; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosDatabaseForTest; +import com.azure.data.cosmos.CosmosDatabaseProperties; +import com.azure.data.cosmos.CosmosDatabaseResponse; +import com.azure.data.cosmos.CosmosItem; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.CosmosItemResponse; +import com.azure.data.cosmos.CosmosResponse; +import com.azure.data.cosmos.CosmosResponseValidator; +import com.azure.data.cosmos.CosmosStoredProcedureRequestOptions; +import com.azure.data.cosmos.CosmosUser; +import com.azure.data.cosmos.CosmosUserProperties; +import com.azure.data.cosmos.DataType; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.IncludedPath; +import com.azure.data.cosmos.Index; +import com.azure.data.cosmos.IndexingPolicy; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.RetryOptions; +import com.azure.data.cosmos.SqlQuerySpec; +import com.azure.data.cosmos.internal.Configs; +import com.azure.data.cosmos.internal.FailureValidator; +import com.azure.data.cosmos.internal.FeedResponseListValidator; +import com.azure.data.cosmos.internal.PathParser; +import com.azure.data.cosmos.internal.TestConfigurations; +import com.azure.data.cosmos.internal.Utils; +import com.azure.data.cosmos.internal.directconnectivity.Protocol; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.CaseFormat; +import com.google.common.collect.ImmutableList; +import io.reactivex.subscribers.TestSubscriber; +import org.apache.commons.lang3.ObjectUtils; +import org.apache.commons.lang3.StringUtils; +import org.mockito.stubbing.Answer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterSuite; +import org.testng.annotations.BeforeSuite; +import org.testng.annotations.DataProvider; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Schedulers; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static com.azure.data.cosmos.BridgeInternal.extractConfigs; +import static com.azure.data.cosmos.BridgeInternal.injectConfigs; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.spy; + +public class TestSuiteBase extends CosmosClientTest { + + private static final int DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL = 500; + private static final ObjectMapper objectMapper = new ObjectMapper(); + + protected static Logger logger = LoggerFactory.getLogger(TestSuiteBase.class.getSimpleName()); + protected static final int TIMEOUT = 40000; + protected static final int FEED_TIMEOUT = 40000; + protected static final int SETUP_TIMEOUT = 60000; + protected static final int SHUTDOWN_TIMEOUT = 12000; + + protected static final int SUITE_SETUP_TIMEOUT = 120000; + protected static final int SUITE_SHUTDOWN_TIMEOUT = 60000; + + protected static final int WAIT_REPLICA_CATCH_UP_IN_MILLIS = 4000; + + protected final static ConsistencyLevel accountConsistency; + protected static final ImmutableList preferredLocations; + private static final ImmutableList desiredConsistencies; + private static final ImmutableList protocols; + + protected int subscriberValidationTimeout = TIMEOUT; + + private static CosmosDatabase SHARED_DATABASE; + private static CosmosContainer SHARED_MULTI_PARTITION_COLLECTION; + private static CosmosContainer SHARED_MULTI_PARTITION_COLLECTION_WITH_COMPOSITE_AND_SPATIAL_INDEXES; + private static CosmosContainer SHARED_SINGLE_PARTITION_COLLECTION; + + public TestSuiteBase(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + protected static CosmosDatabase getSharedCosmosDatabase(CosmosClient client) { + return CosmosBridgeInternal.getCosmosDatabaseWithNewClient(SHARED_DATABASE, client); + } + + protected static CosmosContainer getSharedMultiPartitionCosmosContainer(CosmosClient client) { + return CosmosBridgeInternal.getCosmosContainerWithNewClient(SHARED_MULTI_PARTITION_COLLECTION, SHARED_DATABASE, client); + } + + protected static CosmosContainer getSharedMultiPartitionCosmosContainerWithCompositeAndSpatialIndexes(CosmosClient client) { + return CosmosBridgeInternal.getCosmosContainerWithNewClient(SHARED_MULTI_PARTITION_COLLECTION_WITH_COMPOSITE_AND_SPATIAL_INDEXES, SHARED_DATABASE, client); + } + + protected static CosmosContainer getSharedSinglePartitionCosmosContainer(CosmosClient client) { + return CosmosBridgeInternal.getCosmosContainerWithNewClient(SHARED_SINGLE_PARTITION_COLLECTION, SHARED_DATABASE, client); + } + + static { + accountConsistency = parseConsistency(TestConfigurations.CONSISTENCY); + desiredConsistencies = immutableListOrNull( + ObjectUtils.defaultIfNull(parseDesiredConsistencies(TestConfigurations.DESIRED_CONSISTENCIES), + allEqualOrLowerConsistencies(accountConsistency))); + preferredLocations = immutableListOrNull(parsePreferredLocation(TestConfigurations.PREFERRED_LOCATIONS)); + protocols = ObjectUtils.defaultIfNull(immutableListOrNull(parseProtocols(TestConfigurations.PROTOCOLS)), + ImmutableList.of(Protocol.HTTPS, Protocol.TCP)); + + // Object mapper configurations + objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true); + objectMapper.configure(JsonParser.Feature.ALLOW_TRAILING_COMMA, true); + objectMapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); + } + + protected TestSuiteBase() { + logger.debug("Initializing {} ...", this.getClass().getSimpleName()); + } + + private static ImmutableList immutableListOrNull(List list) { + return list != null ? ImmutableList.copyOf(list) : null; + } + + private static class DatabaseManagerImpl implements CosmosDatabaseForTest.DatabaseManager { + public static DatabaseManagerImpl getInstance(CosmosClient client) { + return new DatabaseManagerImpl(client); + } + + private final CosmosClient client; + + private DatabaseManagerImpl(CosmosClient client) { + this.client = client; + } + + @Override + public Flux> queryDatabases(SqlQuerySpec query) { + return client.queryDatabases(query, null); + } + + @Override + public Mono createDatabase(CosmosDatabaseProperties databaseDefinition) { + return client.createDatabase(databaseDefinition); + } + + @Override + public CosmosDatabase getDatabase(String id) { + return client.getDatabase(id); + } + } + + @BeforeSuite(groups = {"simple", "long", "direct", "multi-master", "emulator", "non-emulator"}, timeOut = SUITE_SETUP_TIMEOUT) + public static void beforeSuite() { + + logger.info("beforeSuite Started"); + + try (CosmosClient houseKeepingClient = createGatewayHouseKeepingDocumentClient().build()) { + CosmosDatabaseForTest dbForTest = CosmosDatabaseForTest.create(DatabaseManagerImpl.getInstance(houseKeepingClient)); + SHARED_DATABASE = dbForTest.createdDatabase; + CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); + SHARED_MULTI_PARTITION_COLLECTION = createCollection(SHARED_DATABASE, getCollectionDefinitionWithRangeRangeIndex(), options, 10100); + SHARED_MULTI_PARTITION_COLLECTION_WITH_COMPOSITE_AND_SPATIAL_INDEXES = createCollection(SHARED_DATABASE, getCollectionDefinitionMultiPartitionWithCompositeAndSpatialIndexes(), options); + SHARED_SINGLE_PARTITION_COLLECTION = createCollection(SHARED_DATABASE, getCollectionDefinitionWithRangeRangeIndex(), options, 6000); + } + } + + @AfterSuite(groups = {"simple", "long", "direct", "multi-master", "emulator", "non-emulator"}, timeOut = SUITE_SHUTDOWN_TIMEOUT) + public static void afterSuite() { + + logger.info("afterSuite Started"); + + try (CosmosClient houseKeepingClient = createGatewayHouseKeepingDocumentClient().build()) { + safeDeleteDatabase(SHARED_DATABASE); + CosmosDatabaseForTest.cleanupStaleTestDatabases(DatabaseManagerImpl.getInstance(houseKeepingClient)); + } + } + + protected static void truncateCollection(CosmosContainer cosmosContainer) { + CosmosContainerProperties cosmosContainerProperties = cosmosContainer.read().block().properties(); + String cosmosContainerId = cosmosContainerProperties.id(); + logger.info("Truncating collection {} ...", cosmosContainerId); + List paths = cosmosContainerProperties.partitionKeyDefinition().paths(); + FeedOptions options = new FeedOptions(); + options.maxDegreeOfParallelism(-1); + options.enableCrossPartitionQuery(true); + options.maxItemCount(100); + + logger.info("Truncating collection {} documents ...", cosmosContainer.id()); + + cosmosContainer.queryItems("SELECT * FROM root", options) + .publishOn(Schedulers.parallel()) + .flatMap(page -> Flux.fromIterable(page.results())) + .flatMap(doc -> { + + Object propertyValue = null; + if (paths != null && !paths.isEmpty()) { + List pkPath = PathParser.getPathParts(paths.get(0)); + propertyValue = doc.getObjectByPath(pkPath); + if (propertyValue == null) { + propertyValue = PartitionKey.None; + } + + } + return cosmosContainer.getItem(doc.id(), propertyValue).delete(); + }).then().block(); + logger.info("Truncating collection {} triggers ...", cosmosContainerId); + + cosmosContainer.getScripts().queryTriggers("SELECT * FROM root", options) + .publishOn(Schedulers.parallel()) + .flatMap(page -> Flux.fromIterable(page.results())) + .flatMap(trigger -> { +// if (paths != null && !paths.isEmpty()) { +// Object propertyValue = trigger.getObjectByPath(PathParser.getPathParts(paths.get(0))); +// requestOptions.partitionKey(new PartitionKey(propertyValue)); +// } + + return cosmosContainer.getScripts().getTrigger(trigger.id()).delete(); + }).then().block(); + + logger.info("Truncating collection {} storedProcedures ...", cosmosContainerId); + + cosmosContainer.getScripts().queryStoredProcedures("SELECT * FROM root", options) + .publishOn(Schedulers.parallel()) + .flatMap(page -> Flux.fromIterable(page.results())) + .flatMap(storedProcedure -> { + +// if (paths != null && !paths.isEmpty()) { +// Object propertyValue = storedProcedure.getObjectByPath(PathParser.getPathParts(paths.get(0))); +// requestOptions.partitionKey(new PartitionKey(propertyValue)); +// } + + return cosmosContainer.getScripts().getStoredProcedure(storedProcedure.id()).delete(new CosmosStoredProcedureRequestOptions()); + }).then().block(); + + logger.info("Truncating collection {} udfs ...", cosmosContainerId); + + cosmosContainer.getScripts().queryUserDefinedFunctions("SELECT * FROM root", options) + .publishOn(Schedulers.parallel()) + .flatMap(page -> Flux.fromIterable(page.results())) + .flatMap(udf -> { + +// if (paths != null && !paths.isEmpty()) { +// Object propertyValue = udf.getObjectByPath(PathParser.getPathParts(paths.get(0))); +// requestOptions.partitionKey(new PartitionKey(propertyValue)); +// } + + return cosmosContainer.getScripts().getUserDefinedFunction(udf.id()).delete(); + }).then().block(); + + logger.info("Finished truncating collection {}.", cosmosContainerId); + } + + protected static void waitIfNeededForReplicasToCatchUp(CosmosClientBuilder clientBuilder) { + switch (clientBuilder.consistencyLevel()) { + case EVENTUAL: + case CONSISTENT_PREFIX: + logger.info(" additional wait in EVENTUAL mode so the replica catch up"); + // give times to replicas to catch up after a write + try { + TimeUnit.MILLISECONDS.sleep(WAIT_REPLICA_CATCH_UP_IN_MILLIS); + } catch (Exception e) { + logger.error("unexpected failure", e); + } + + case SESSION: + case BOUNDED_STALENESS: + case STRONG: + default: + break; + } + } + + public static CosmosContainer createCollection(CosmosDatabase database, CosmosContainerProperties cosmosContainerProperties, + CosmosContainerRequestOptions options, int throughput) { + return database.createContainer(cosmosContainerProperties, throughput, options).block().container(); + } + + public static CosmosContainer createCollection(CosmosDatabase database, CosmosContainerProperties cosmosContainerProperties, + CosmosContainerRequestOptions options) { + return database.createContainer(cosmosContainerProperties, options).block().container(); + } + + private static CosmosContainerProperties getCollectionDefinitionMultiPartitionWithCompositeAndSpatialIndexes() { + final String NUMBER_FIELD = "numberField"; + final String STRING_FIELD = "stringField"; + final String NUMBER_FIELD_2 = "numberField2"; + final String STRING_FIELD_2 = "stringField2"; + final String BOOL_FIELD = "boolField"; + final String NULL_FIELD = "nullField"; + final String OBJECT_FIELD = "objectField"; + final String ARRAY_FIELD = "arrayField"; + final String SHORT_STRING_FIELD = "shortStringField"; + final String MEDIUM_STRING_FIELD = "mediumStringField"; + final String LONG_STRING_FIELD = "longStringField"; + final String PARTITION_KEY = "pk"; + + PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); + ArrayList partitionKeyPaths = new ArrayList(); + partitionKeyPaths.add("/" + PARTITION_KEY); + partitionKeyDefinition.paths(partitionKeyPaths); + + CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDefinition); + + IndexingPolicy indexingPolicy = new IndexingPolicy(); + List> compositeIndexes = new ArrayList<>(); + + //Simple + ArrayList compositeIndexSimple = new ArrayList(); + CompositePath compositePath1 = new CompositePath(); + compositePath1.path("/" + NUMBER_FIELD); + compositePath1.order(CompositePathSortOrder.ASCENDING); + + CompositePath compositePath2 = new CompositePath(); + compositePath2.path("/" + STRING_FIELD); + compositePath2.order(CompositePathSortOrder.DESCENDING); + + compositeIndexSimple.add(compositePath1); + compositeIndexSimple.add(compositePath2); + + //Max Columns + ArrayList compositeIndexMaxColumns = new ArrayList(); + CompositePath compositePath3 = new CompositePath(); + compositePath3.path("/" + NUMBER_FIELD); + compositePath3.order(CompositePathSortOrder.DESCENDING); + + CompositePath compositePath4 = new CompositePath(); + compositePath4.path("/" + STRING_FIELD); + compositePath4.order(CompositePathSortOrder.ASCENDING); + + CompositePath compositePath5 = new CompositePath(); + compositePath5.path("/" + NUMBER_FIELD_2); + compositePath5.order(CompositePathSortOrder.DESCENDING); + + CompositePath compositePath6 = new CompositePath(); + compositePath6.path("/" + STRING_FIELD_2); + compositePath6.order(CompositePathSortOrder.ASCENDING); + + compositeIndexMaxColumns.add(compositePath3); + compositeIndexMaxColumns.add(compositePath4); + compositeIndexMaxColumns.add(compositePath5); + compositeIndexMaxColumns.add(compositePath6); + + //Primitive Values + ArrayList compositeIndexPrimitiveValues = new ArrayList(); + CompositePath compositePath7 = new CompositePath(); + compositePath7.path("/" + NUMBER_FIELD); + compositePath7.order(CompositePathSortOrder.DESCENDING); + + CompositePath compositePath8 = new CompositePath(); + compositePath8.path("/" + STRING_FIELD); + compositePath8.order(CompositePathSortOrder.ASCENDING); + + CompositePath compositePath9 = new CompositePath(); + compositePath9.path("/" + BOOL_FIELD); + compositePath9.order(CompositePathSortOrder.DESCENDING); + + CompositePath compositePath10 = new CompositePath(); + compositePath10.path("/" + NULL_FIELD); + compositePath10.order(CompositePathSortOrder.ASCENDING); + + compositeIndexPrimitiveValues.add(compositePath7); + compositeIndexPrimitiveValues.add(compositePath8); + compositeIndexPrimitiveValues.add(compositePath9); + compositeIndexPrimitiveValues.add(compositePath10); + + //Long Strings + ArrayList compositeIndexLongStrings = new ArrayList(); + CompositePath compositePath11 = new CompositePath(); + compositePath11.path("/" + STRING_FIELD); + + CompositePath compositePath12 = new CompositePath(); + compositePath12.path("/" + SHORT_STRING_FIELD); + + CompositePath compositePath13 = new CompositePath(); + compositePath13.path("/" + MEDIUM_STRING_FIELD); + + CompositePath compositePath14 = new CompositePath(); + compositePath14.path("/" + LONG_STRING_FIELD); + + compositeIndexLongStrings.add(compositePath11); + compositeIndexLongStrings.add(compositePath12); + compositeIndexLongStrings.add(compositePath13); + compositeIndexLongStrings.add(compositePath14); + + compositeIndexes.add(compositeIndexSimple); + compositeIndexes.add(compositeIndexMaxColumns); + compositeIndexes.add(compositeIndexPrimitiveValues); + compositeIndexes.add(compositeIndexLongStrings); + + indexingPolicy.compositeIndexes(compositeIndexes); + cosmosContainerProperties.indexingPolicy(indexingPolicy); + + return cosmosContainerProperties; + } + + public static CosmosContainer createCollection(CosmosClient client, String dbId, CosmosContainerProperties collectionDefinition) { + return client.getDatabase(dbId).createContainer(collectionDefinition).block().container(); + } + + public static void deleteCollection(CosmosClient client, String dbId, String collectionId) { + client.getDatabase(dbId).getContainer(collectionId).delete().block(); + } + + public static CosmosItem createDocument(CosmosContainer cosmosContainer, CosmosItemProperties item) { + return cosmosContainer.createItem(item).block().item(); + } + + private Flux bulkInsert(CosmosContainer cosmosContainer, + List documentDefinitionList, + int concurrencyLevel) { + List> result = new ArrayList<>(documentDefinitionList.size()); + for (CosmosItemProperties docDef : documentDefinitionList) { + result.add(cosmosContainer.createItem(docDef)); + } + + return Flux.merge(Flux.fromIterable(result), concurrencyLevel); + } + public List bulkInsertBlocking(CosmosContainer cosmosContainer, + List documentDefinitionList) { + return bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL) + .publishOn(Schedulers.parallel()) + .map(CosmosItemResponse::properties) + .collectList() + .block(); + } + + public void voidBulkInsertBlocking(CosmosContainer cosmosContainer, + List documentDefinitionList) { + bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL) + .publishOn(Schedulers.parallel()) + .map(CosmosItemResponse::properties) + .then() + .block(); + } + + public static CosmosUser createUser(CosmosClient client, String databaseId, CosmosUserProperties userSettings) { + return client.getDatabase(databaseId).read().block().database().createUser(userSettings).block().user(); + } + + public static CosmosUser safeCreateUser(CosmosClient client, String databaseId, CosmosUserProperties user) { + deleteUserIfExists(client, databaseId, user.id()); + return createUser(client, databaseId, user); + } + + private static CosmosContainer safeCreateCollection(CosmosClient client, String databaseId, CosmosContainerProperties collection, CosmosContainerRequestOptions options) { + deleteCollectionIfExists(client, databaseId, collection.id()); + return createCollection(client.getDatabase(databaseId), collection, options); + } + + static protected CosmosContainerProperties getCollectionDefinition() { + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + + CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); + + return collectionDefinition; + } + + static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndex() { + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList<>(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + IndexingPolicy indexingPolicy = new IndexingPolicy(); + List includedPaths = new ArrayList<>(); + IncludedPath includedPath = new IncludedPath(); + includedPath.path("/*"); + Collection indexes = new ArrayList<>(); + Index stringIndex = Index.Range(DataType.STRING); + BridgeInternal.setProperty(stringIndex, "precision", -1); + indexes.add(stringIndex); + + Index numberIndex = Index.Range(DataType.NUMBER); + BridgeInternal.setProperty(numberIndex, "precision", -1); + indexes.add(numberIndex); + includedPath.indexes(indexes); + includedPaths.add(includedPath); + indexingPolicy.setIncludedPaths(includedPaths); + + CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); + cosmosContainerProperties.indexingPolicy(indexingPolicy); + + return cosmosContainerProperties; + } + + public static void deleteCollectionIfExists(CosmosClient client, String databaseId, String collectionId) { + CosmosDatabase database = client.getDatabase(databaseId).read().block().database(); + List res = database.queryContainers(String.format("SELECT * FROM root r where r.id = '%s'", collectionId), null) + .flatMap(page -> Flux.fromIterable(page.results())) + .collectList() + .block(); + + if (!res.isEmpty()) { + deleteCollection(database, collectionId); + } + } + + public static void deleteCollection(CosmosDatabase cosmosDatabase, String collectionId) { + cosmosDatabase.getContainer(collectionId).delete().block(); + } + + public static void deleteCollection(CosmosContainer cosmosContainer) { + cosmosContainer.delete().block(); + } + + public static void deleteDocumentIfExists(CosmosClient client, String databaseId, String collectionId, String docId) { + FeedOptions options = new FeedOptions(); + options.partitionKey(new PartitionKey(docId)); + CosmosContainer cosmosContainer = client.getDatabase(databaseId).read().block().database().getContainer(collectionId).read().block().container(); + List res = cosmosContainer + .queryItems(String.format("SELECT * FROM root r where r.id = '%s'", docId), options) + .flatMap(page -> Flux.fromIterable(page.results())) + .collectList().block(); + + if (!res.isEmpty()) { + deleteDocument(cosmosContainer, docId); + } + } + + public static void safeDeleteDocument(CosmosContainer cosmosContainer, String documentId, Object partitionKey) { + if (cosmosContainer != null && documentId != null) { + try { + cosmosContainer.getItem(documentId, partitionKey).read().block().item().delete().block(); + } catch (Exception e) { + CosmosClientException dce = Utils.as(e, CosmosClientException.class); + if (dce == null || dce.statusCode() != 404) { + throw e; + } + } + } + } + + public static void deleteDocument(CosmosContainer cosmosContainer, String documentId) { + cosmosContainer.getItem(documentId, PartitionKey.None).read().block().item().delete(); + } + + public static void deleteUserIfExists(CosmosClient client, String databaseId, String userId) { + CosmosDatabase database = client.getDatabase(databaseId).read().block().database(); + List res = database + .queryUsers(String.format("SELECT * FROM root r where r.id = '%s'", userId), null) + .flatMap(page -> Flux.fromIterable(page.results())) + .collectList().block(); + if (!res.isEmpty()) { + deleteUser(database, userId); + } + } + + public static void deleteUser(CosmosDatabase database, String userId) { + database.getUser(userId).read().block().user().delete().block(); + } + + static private CosmosDatabase safeCreateDatabase(CosmosClient client, CosmosDatabaseProperties databaseSettings) { + safeDeleteDatabase(client.getDatabase(databaseSettings.id())); + return client.createDatabase(databaseSettings).block().database(); + } + + static protected CosmosDatabase createDatabase(CosmosClient client, String databaseId) { + CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId); + return client.createDatabase(databaseSettings).block().database(); + } + + static protected CosmosDatabase createDatabaseIfNotExists(CosmosClient client, String databaseId) { + List res = client.queryDatabases(String.format("SELECT * FROM r where r.id = '%s'", databaseId), null) + .flatMap(p -> Flux.fromIterable(p.results())) + .collectList() + .block(); + if (res.size() != 0) { + return client.getDatabase(databaseId).read().block().database(); + } else { + CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId); + return client.createDatabase(databaseSettings).block().database(); + } + } + + static protected void safeDeleteDatabase(CosmosDatabase database) { + if (database != null) { + try { + database.delete().block(); + } catch (Exception e) { + } + } + } + + static protected void safeDeleteAllCollections(CosmosDatabase database) { + if (database != null) { + List collections = database.readAllContainers() + .flatMap(p -> Flux.fromIterable(p.results())) + .collectList() + .block(); + + for(CosmosContainerProperties collection: collections) { + database.getContainer(collection.id()).delete().block(); + } + } + } + + static protected void safeDeleteCollection(CosmosContainer collection) { + if (collection != null) { + try { + collection.delete().block(); + } catch (Exception e) { + } + } + } + + static protected void safeDeleteCollection(CosmosDatabase database, String collectionId) { + if (database != null && collectionId != null) { + try { + database.getContainer(collectionId).delete().block(); + } catch (Exception e) { + } + } + } + + static protected void safeCloseAsync(CosmosClient client) { + if (client != null) { + new Thread(() -> { + try { + client.close(); + } catch (Exception e) { + logger.error("failed to close client", e); + } + }).start(); + } + } + + static protected void safeClose(CosmosClient client) { + if (client != null) { + try { + client.close(); + } catch (Exception e) { + logger.error("failed to close client", e); + } + } + } + + public void validateSuccess(Mono single, CosmosResponseValidator validator) + throws InterruptedException { + validateSuccess(single.flux(), validator, subscriberValidationTimeout); + } + + public static void validateSuccess(Flux flowable, + CosmosResponseValidator validator, long timeout) { + + TestSubscriber testSubscriber = new TestSubscriber<>(); + + flowable.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + testSubscriber.assertValueCount(1); + validator.validate(testSubscriber.values().get(0)); + } + + public void validateFailure(Mono mono, FailureValidator validator) + throws InterruptedException { + validateFailure(mono.flux(), validator, subscriberValidationTimeout); + } + + public static void validateFailure(Flux flowable, + FailureValidator validator, long timeout) throws InterruptedException { + + TestSubscriber testSubscriber = new TestSubscriber<>(); + + flowable.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNotComplete(); + testSubscriber.assertTerminated(); + assertThat(testSubscriber.errors()).hasSize(1); + validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0)); + } + + public void validateQuerySuccess(Flux> flowable, + FeedResponseListValidator validator) { + validateQuerySuccess(flowable, validator, subscriberValidationTimeout); + } + + public static void validateQuerySuccess(Flux> flowable, + FeedResponseListValidator validator, long timeout) { + + TestSubscriber> testSubscriber = new TestSubscriber<>(); + + flowable.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + validator.validate(testSubscriber.values()); + } + + public void validateQueryFailure(Flux> flowable, FailureValidator validator) { + validateQueryFailure(flowable, validator, subscriberValidationTimeout); + } + + public static void validateQueryFailure(Flux> flowable, + FailureValidator validator, long timeout) { + + TestSubscriber> testSubscriber = new TestSubscriber<>(); + + flowable.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS); + testSubscriber.assertNotComplete(); + testSubscriber.assertTerminated(); + assertThat(testSubscriber.getEvents().get(1)).hasSize(1); + validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0)); + } + + @DataProvider + public static Object[][] clientBuilders() { + return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null)}}; + } + + @DataProvider + public static Object[][] clientBuildersWithSessionConsistency() { + return new Object[][]{ + {createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.HTTPS, false, null)}, + {createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null)}, + {createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null)} + }; + } + + static ConsistencyLevel parseConsistency(String consistency) { + if (consistency != null) { + consistency = CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency).trim(); + return ConsistencyLevel.valueOf(consistency); + } + + logger.error("INVALID configured test consistency [{}].", consistency); + throw new IllegalStateException("INVALID configured test consistency " + consistency); + } + + static List parsePreferredLocation(String preferredLocations) { + if (StringUtils.isEmpty(preferredLocations)) { + return null; + } + + try { + return objectMapper.readValue(preferredLocations, new TypeReference>() { + }); + } catch (Exception e) { + logger.error("INVALID configured test preferredLocations [{}].", preferredLocations); + throw new IllegalStateException("INVALID configured test preferredLocations " + preferredLocations); + } + } + + static List parseProtocols(String protocols) { + if (StringUtils.isEmpty(protocols)) { + return null; + } + List protocolList = new ArrayList<>(); + try { + List protocolStrings = objectMapper.readValue(protocols, new TypeReference>() { + }); + for(String protocol : protocolStrings) { + protocolList.add(Protocol.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, protocol))); + } + return protocolList; + } catch (Exception e) { + logger.error("INVALID configured test protocols [{}].", protocols); + throw new IllegalStateException("INVALID configured test protocols " + protocols); + } + } + + @DataProvider + public static Object[][] simpleClientBuildersWithDirect() { + return simpleClientBuildersWithDirect(toArray(protocols)); + } + + @DataProvider + public static Object[][] simpleClientBuildersWithDirectHttps() { + return simpleClientBuildersWithDirect(Protocol.HTTPS); + } + + private static Object[][] simpleClientBuildersWithDirect(Protocol... protocols) { + logger.info("Max test consistency to use is [{}]", accountConsistency); + List testConsistencies = ImmutableList.of(ConsistencyLevel.EVENTUAL); + + boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION; + + List cosmosConfigurations = new ArrayList<>(); + + for (Protocol protocol : protocols) { + testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(consistencyLevel, + protocol, + isMultiMasterEnabled, + preferredLocations))); + } + + cosmosConfigurations.forEach(c -> logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]", + c.connectionPolicy().connectionMode(), + c.consistencyLevel(), + extractConfigs(c).getProtocol() + )); + + cosmosConfigurations.add(createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null)); + + return cosmosConfigurations.stream().map(b -> new Object[]{b}).collect(Collectors.toList()).toArray(new Object[0][]); + } + + @DataProvider + public static Object[][] clientBuildersWithDirect() { + return clientBuildersWithDirectAllConsistencies(toArray(protocols)); + } + + @DataProvider + public static Object[][] clientBuildersWithDirectHttps() { + return clientBuildersWithDirectAllConsistencies(Protocol.HTTPS); + } + + @DataProvider + public static Object[][] clientBuildersWithDirectSession() { + return clientBuildersWithDirectSession(toArray(protocols)); + } + + static Protocol[] toArray(List protocols) { + return protocols.toArray(new Protocol[protocols.size()]); + } + + private static Object[][] clientBuildersWithDirectSession(Protocol... protocols) { + return clientBuildersWithDirect(new ArrayList() {{ + add(ConsistencyLevel.SESSION); + }}, protocols); + } + + private static Object[][] clientBuildersWithDirectAllConsistencies(Protocol... protocols) { + logger.info("Max test consistency to use is [{}]", accountConsistency); + return clientBuildersWithDirect(desiredConsistencies, protocols); + } + + static List parseDesiredConsistencies(String consistencies) { + if (StringUtils.isEmpty(consistencies)) { + return null; + } + List consistencyLevels = new ArrayList<>(); + try { + List consistencyStrings = objectMapper.readValue(consistencies, new TypeReference>() {}); + for(String consistency : consistencyStrings) { + consistencyLevels.add(ConsistencyLevel.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency))); + } + return consistencyLevels; + } catch (Exception e) { + logger.error("INVALID consistency test desiredConsistencies [{}].", consistencies); + throw new IllegalStateException("INVALID configured test desiredConsistencies " + consistencies); + } + } + + static List allEqualOrLowerConsistencies(ConsistencyLevel accountConsistency) { + List testConsistencies = new ArrayList<>(); + switch (accountConsistency) { + + case STRONG: + testConsistencies.add(ConsistencyLevel.STRONG); + case BOUNDED_STALENESS: + testConsistencies.add(ConsistencyLevel.BOUNDED_STALENESS); + case SESSION: + testConsistencies.add(ConsistencyLevel.SESSION); + case CONSISTENT_PREFIX: + testConsistencies.add(ConsistencyLevel.CONSISTENT_PREFIX); + case EVENTUAL: + testConsistencies.add(ConsistencyLevel.EVENTUAL); + break; + default: + throw new IllegalStateException("INVALID configured test consistency " + accountConsistency); + } + return testConsistencies; + } + + private static Object[][] clientBuildersWithDirect(List testConsistencies, Protocol... protocols) { + boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION; + + List cosmosConfigurations = new ArrayList<>(); + + for (Protocol protocol : protocols) { + testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(consistencyLevel, + protocol, + isMultiMasterEnabled, + preferredLocations))); + } + + cosmosConfigurations.forEach(c -> logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]", + c.connectionPolicy().connectionMode(), + c.consistencyLevel(), + extractConfigs(c).getProtocol() + )); + + cosmosConfigurations.add(createGatewayRxDocumentClient(ConsistencyLevel.SESSION, isMultiMasterEnabled, preferredLocations)); + + return cosmosConfigurations.stream().map(c -> new Object[]{c}).collect(Collectors.toList()).toArray(new Object[0][]); + } + + static protected CosmosClientBuilder createGatewayHouseKeepingDocumentClient() { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.GATEWAY); + RetryOptions options = new RetryOptions(); + options.maxRetryWaitTimeInSeconds(SUITE_SETUP_TIMEOUT); + connectionPolicy.retryOptions(options); + return CosmosClient.builder().endpoint(TestConfigurations.HOST) + .key(TestConfigurations.MASTER_KEY) + .connectionPolicy(connectionPolicy) + .consistencyLevel(ConsistencyLevel.SESSION); + } + + static protected CosmosClientBuilder createGatewayRxDocumentClient(ConsistencyLevel consistencyLevel, boolean multiMasterEnabled, List preferredLocations) { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.GATEWAY); + connectionPolicy.usingMultipleWriteLocations(multiMasterEnabled); + connectionPolicy.preferredLocations(preferredLocations); + return CosmosClient.builder().endpoint(TestConfigurations.HOST) + .key(TestConfigurations.MASTER_KEY) + .connectionPolicy(connectionPolicy) + .consistencyLevel(consistencyLevel); + } + + static protected CosmosClientBuilder createGatewayRxDocumentClient() { + return createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null); + } + + static protected CosmosClientBuilder createDirectRxDocumentClient(ConsistencyLevel consistencyLevel, + Protocol protocol, + boolean multiMasterEnabled, + List preferredLocations) { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(ConnectionMode.DIRECT); + + if (preferredLocations != null) { + connectionPolicy.preferredLocations(preferredLocations); + } + + if (multiMasterEnabled && consistencyLevel == ConsistencyLevel.SESSION) { + connectionPolicy.usingMultipleWriteLocations(true); + } + + Configs configs = spy(new Configs()); + doAnswer((Answer)invocation -> protocol).when(configs).getProtocol(); + + CosmosClientBuilder builder = CosmosClient.builder().endpoint(TestConfigurations.HOST) + .key(TestConfigurations.MASTER_KEY) + .connectionPolicy(connectionPolicy) + .consistencyLevel(consistencyLevel); + + return injectConfigs(builder, configs); + } + + protected int expectedNumberOfPages(int totalExpectedResult, int maxPageSize) { + return Math.max((totalExpectedResult + maxPageSize - 1 ) / maxPageSize, 1); + } + + @DataProvider(name = "queryMetricsArgProvider") + public Object[][] queryMetricsArgProvider() { + return new Object[][]{ + {true}, + {false}, + }; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/TokenResolverTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/TokenResolverTest.java new file mode 100644 index 0000000000000..597ad47ca7f67 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/TokenResolverTest.java @@ -0,0 +1,561 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.internal.AsyncDocumentClient; +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.ChangeFeedOptions; +import com.azure.data.cosmos.ConnectionMode; +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosResourceType; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.internal.Document; +import com.azure.data.cosmos.internal.DocumentCollection; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.internal.*; +import com.azure.data.cosmos.PermissionMode; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.TokenResolver; +import com.azure.data.cosmos.internal.TestSuiteBase; +import org.testng.SkipException; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.time.OffsetDateTime; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; + +public class TokenResolverTest extends TestSuiteBase { + + private class UserClass { + public String userName; + public int userId; + + public UserClass(String userName, int userId) { + this.userName = userName; + this.userId = userId; + } + } + + private Database createdDatabase; + private DocumentCollection createdCollection; + private User userWithReadPermission; + private User userWithAllPermission; + + private Permission readPermission; + private Permission allPermission; + + private AsyncDocumentClient.Builder clientBuilder; + private AsyncDocumentClient client; + + @Factory(dataProvider = "clientBuilders") + public TokenResolverTest(AsyncDocumentClient.Builder clientBuilder) { + super(clientBuilder); + } + + @DataProvider(name = "connectionMode") + public Object[][] connectionMode() { + return new Object[][]{ + {ConnectionMode.GATEWAY}, + {ConnectionMode.DIRECT}, + }; + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + createdDatabase = SHARED_DATABASE; + createdCollection = SHARED_MULTI_PARTITION_COLLECTION; + + client = clientBuilder().build(); + + userWithReadPermission = createUser(client, createdDatabase.id(), getUserDefinition()); + readPermission = client.createPermission(userWithReadPermission.selfLink(), getPermission(createdCollection, "ReadPermissionOnColl", PermissionMode.READ), null).single().block() + .getResource(); + + userWithAllPermission = createUser(client, createdDatabase.id(), getUserDefinition()); + allPermission = client.createPermission(userWithAllPermission.selfLink(), getPermission(createdCollection, "AllPermissionOnColl", PermissionMode.ALL), null).single().block() + .getResource(); + } + + @Test(groups = {"simple"}, dataProvider = "connectionMode", timeOut = TIMEOUT) + public void readDocumentWithReadPermission(ConnectionMode connectionMode) { + Document docDefinition = getDocumentDefinition(); + ResourceResponse resourceResponse = client + .createDocument(BridgeInternal.getAltLink(createdCollection), docDefinition, null, false).blockFirst(); + AsyncDocumentClient asyncClientWithTokenResolver = null; + try { + asyncClientWithTokenResolver = buildClient(connectionMode, PermissionMode.READ); + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setPartitionKey(new PartitionKey(resourceResponse.getResource().get("mypk"))); + HashMap properties = new HashMap(); + properties.put("UserId", "readUser"); + requestOptions.setProperties(properties); + Flux> readObservable = asyncClientWithTokenResolver.readDocument(resourceResponse.getResource().selfLink(), requestOptions); + ResourceResponseValidator validator = new ResourceResponseValidator.Builder() + .withId(resourceResponse.getResource().id()).build(); + validateSuccess(readObservable, validator); + } finally { + safeClose(asyncClientWithTokenResolver); + } + } + + @Test(groups = {"simple"}, dataProvider = "connectionMode", timeOut = TIMEOUT) + public void deleteDocumentWithReadPermission(ConnectionMode connectionMode) { + Document docDefinition = getDocumentDefinition(); + ResourceResponse resourceResponse = client + .createDocument(BridgeInternal.getAltLink(createdCollection), docDefinition, null, false).blockFirst(); + AsyncDocumentClient asyncClientWithTokenResolver = null; + try { + asyncClientWithTokenResolver = buildClient(connectionMode, PermissionMode.READ); + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setPartitionKey(new PartitionKey(resourceResponse.getResource().get("mypk"))); + Flux> readObservable = asyncClientWithTokenResolver.deleteDocument(resourceResponse.getResource().selfLink(), requestOptions); + FailureValidator validator = new FailureValidator.Builder().statusCode(HttpConstants.StatusCodes.FORBIDDEN).build(); + validateFailure(readObservable, validator); + } finally { + safeClose(asyncClientWithTokenResolver); + } + } + + @Test(groups = {"simple"}, dataProvider = "connectionMode", timeOut = TIMEOUT) + public void writeDocumentWithReadPermission(ConnectionMode connectionMode) { + AsyncDocumentClient asyncClientWithTokenResolver = null; + try { + asyncClientWithTokenResolver = buildClient(connectionMode, PermissionMode.READ); + Flux> readObservable = asyncClientWithTokenResolver.createDocument(createdCollection.selfLink(), getDocumentDefinition(), null, true); + FailureValidator validator = new FailureValidator.Builder().statusCode(HttpConstants.StatusCodes.FORBIDDEN).build(); + validateFailure(readObservable, validator); + } finally { + safeClose(asyncClientWithTokenResolver); + } + } + + @Test(groups = {"simple"}, dataProvider = "connectionMode", timeOut = TIMEOUT) + public void writeDocumentWithAllPermission(ConnectionMode connectionMode) { + AsyncDocumentClient asyncClientWithTokenResolver = null; + try { + asyncClientWithTokenResolver = buildClient(connectionMode, PermissionMode.ALL); + Document documentDefinition = getDocumentDefinition(); + Flux> readObservable = asyncClientWithTokenResolver.createDocument(createdCollection.selfLink(), documentDefinition, null, true); + ResourceResponseValidator validator = new ResourceResponseValidator.Builder() + .withId(documentDefinition.id()).build(); + validateSuccess(readObservable, validator); + } finally { + safeClose(asyncClientWithTokenResolver); + } + } + + @Test(groups = {"simple"}, dataProvider = "connectionMode", timeOut = TIMEOUT) + public void deleteDocumentWithAllPermission(ConnectionMode connectionMode) { + Document docDefinition = getDocumentDefinition(); + ResourceResponse resourceResponse = client + .createDocument(BridgeInternal.getAltLink(createdCollection), docDefinition, null, false).blockFirst(); + AsyncDocumentClient asyncClientWithTokenResolver = null; + try { + asyncClientWithTokenResolver = buildClient(connectionMode, PermissionMode.ALL); + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setPartitionKey(new PartitionKey(resourceResponse.getResource().get("mypk"))); + Flux> readObservable = asyncClientWithTokenResolver.deleteDocument(resourceResponse.getResource().selfLink(), requestOptions); + ResourceResponseValidator validator = new ResourceResponseValidator.Builder() + .nullResource().build(); + validateSuccess(readObservable, validator); + } finally { + safeClose(asyncClientWithTokenResolver); + } + } + + @Test(groups = {"simple"}, dataProvider = "connectionMode", timeOut = TIMEOUT) + public void readCollectionWithReadPermission(ConnectionMode connectionMode) { + AsyncDocumentClient asyncClientWithTokenResolver = null; + try { + asyncClientWithTokenResolver = buildClient(connectionMode, PermissionMode.READ); + Flux> readObservable = asyncClientWithTokenResolver.readCollection(createdCollection.selfLink(), null); + ResourceResponseValidator validator = new ResourceResponseValidator.Builder() + .withId(createdCollection.id()).build(); + validateSuccess(readObservable, validator); + } finally { + safeClose(asyncClientWithTokenResolver); + } + } + + @Test(groups = {"simple"}, dataProvider = "connectionMode", timeOut = TIMEOUT) + public void deleteCollectionWithReadPermission(ConnectionMode connectionMode) { + AsyncDocumentClient asyncClientWithTokenResolver = null; + try { + asyncClientWithTokenResolver = buildClient(connectionMode, PermissionMode.READ); + Flux> readObservable = asyncClientWithTokenResolver.deleteCollection(createdCollection.selfLink(), null); + FailureValidator validator = new FailureValidator.Builder().statusCode(HttpConstants.StatusCodes.FORBIDDEN).build(); + validateFailure(readObservable, validator); + } finally { + safeClose(asyncClientWithTokenResolver); + } + } + + @Test(groups = {"simple"}, dataProvider = "connectionMode", timeOut = TIMEOUT) + public void verifyingAuthTokenAPISequence(ConnectionMode connectionMode) { + Document docDefinition = getDocumentDefinition(); + ResourceResponse resourceResponse = client + .createDocument(BridgeInternal.getAltLink(createdCollection), docDefinition, null, false).blockFirst(); + AsyncDocumentClient asyncClientWithTokenResolver = null; + try { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(connectionMode); + + //Unauthorized error with invalid token resolver, valid master key and valid permission feed, making it sure tokenResolver has higher priority than all. + List permissionFeed = new ArrayList<>(); + permissionFeed.add(readPermission); + asyncClientWithTokenResolver = new AsyncDocumentClient.Builder() + .withServiceEndpoint(TestConfigurations.HOST) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .withTokenResolver(getTokenResolver(null)) //TokenResolver always generating invalid token. + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withPermissionFeed(permissionFeed) + .build(); + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setPartitionKey(new PartitionKey(resourceResponse.getResource().get("mypk"))); + Flux> readObservable = asyncClientWithTokenResolver.readDocument(resourceResponse.getResource().selfLink(), requestOptions); + FailureValidator failureValidator = new FailureValidator.Builder().statusCode(HttpConstants.StatusCodes.UNAUTHORIZED).build(); + validateFailure(readObservable, failureValidator); + + //Success read operation with valid token resolver, invalid master key and invalid permission feed, making it sure tokenResolver has higher priority than all. + asyncClientWithTokenResolver = new AsyncDocumentClient.Builder() + .withServiceEndpoint(TestConfigurations.HOST) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .withTokenResolver(getTokenResolver(PermissionMode.READ)) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .withPermissionFeed(permissionFeed) + .build(); + readObservable = asyncClientWithTokenResolver.readDocument(resourceResponse.getResource().selfLink(), requestOptions); + ResourceResponseValidator sucessValidator = new ResourceResponseValidator.Builder() + .withId(resourceResponse.getResource().id()).build(); + validateSuccess(readObservable, sucessValidator); + + + //Success read operation with valid permission feed, supporting above hypothesis. + asyncClientWithTokenResolver = new AsyncDocumentClient.Builder() + .withServiceEndpoint(TestConfigurations.HOST) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .withPermissionFeed(permissionFeed) + .build(); + readObservable = asyncClientWithTokenResolver.readDocument(resourceResponse.getResource().selfLink(), requestOptions); + validateSuccess(readObservable, sucessValidator); + + + //Success read operation with valid master key, supporting above hypothesis. + asyncClientWithTokenResolver = new AsyncDocumentClient.Builder() + .withServiceEndpoint(TestConfigurations.HOST) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) + .build(); + readObservable = asyncClientWithTokenResolver.readDocument(resourceResponse.getResource().selfLink(), requestOptions); + validateSuccess(readObservable, sucessValidator); + + } finally { + safeClose(asyncClientWithTokenResolver); + } + } + + @Test(groups = {"simple"}, dataProvider = "connectionMode", timeOut = 6000000) + public void createAndExecuteSprocWithWritePermission(ConnectionMode connectionMode) throws InterruptedException { + AsyncDocumentClient asyncClientWithTokenResolver = null; + try { + asyncClientWithTokenResolver = buildClient(connectionMode, PermissionMode.ALL); + String sprocId = "storedProcedure" + UUID.randomUUID().toString(); + StoredProcedure sproc = new StoredProcedure( + "{" + + " 'id':'" + sprocId + "'," + + " 'body':" + + " 'function() {" + + " var mytext = \"x\";" + + " var myval = 1;" + + " try {" + + " getContext().getResponse().setBody(\"Success!\");" + + " }" + + " catch(err) {" + + " getContext().getResponse().setBody(\"inline err: [\" + err.number + \"] \" + err);" + + " }" + + " }'" + + "}"); + + Flux> createObservable = asyncClientWithTokenResolver.createStoredProcedure(createdCollection.selfLink(), sproc, null); + ResourceResponseValidator createSucessValidator = new ResourceResponseValidator.Builder() + .withId(sprocId).build(); + validateSuccess(createObservable, createSucessValidator); + + RequestOptions options = new RequestOptions(); + options.setPartitionKey(new PartitionKey("")); + String sprocLink = "dbs/" + createdDatabase.id() + "/colls/" + createdCollection.id() + "/sprocs/" + sprocId; + StoredProcedureResponse result = asyncClientWithTokenResolver.executeStoredProcedure(sprocLink, options, null).single().block(); + assertThat(result.getResponseAsString()).isEqualTo("\"Success!\""); + } finally { + safeClose(asyncClientWithTokenResolver); + } + } + + @Test(groups = {"simple"}, dataProvider = "connectionMode", timeOut = TIMEOUT) + public void readDocumentsWithAllPermission(ConnectionMode connectionMode) { + AsyncDocumentClient asyncClientWithTokenResolver = null; + String id1 = UUID.randomUUID().toString(); + String id2 = UUID.randomUUID().toString(); + + try { + asyncClientWithTokenResolver = buildClient(connectionMode, PermissionMode.ALL); + Document document1 = asyncClientWithTokenResolver.createDocument(createdCollection.selfLink(), new Document("{'id': '" + id1 + "'}"), null, false) + .single().block().getResource(); + Document document2 = asyncClientWithTokenResolver.createDocument(createdCollection.selfLink(), new Document("{'id': '" + id2 + "'}"), null, false) + .single().block().getResource(); + List expectedIds = new ArrayList(); + String rid1 = document1.resourceId(); + String rid2 = document2.resourceId(); + expectedIds.add(rid1); + expectedIds.add(rid2); + String query = "SELECT * FROM r WHERE r._rid=\"" + rid1 + "\" or r._rid=\"" + rid2 + "\""; + + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = asyncClientWithTokenResolver.queryDocuments(createdCollection.selfLink(), query, options); + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(2) + .exactlyContainsInAnyOrder(expectedIds).build(); + validateQuerySuccess(queryObservable, validator, 10000); + } finally { + safeClose(asyncClientWithTokenResolver); + } + } + + @Test(groups = {"simple"}, dataProvider = "connectionMode", timeOut = TIMEOUT) + public void readChangeFeedWithAllPermission(ConnectionMode connectionMode) throws InterruptedException { + + //setStartDateTime is not currently supported in multimaster mode. So skipping the test + if(BridgeInternal.isEnableMultipleWriteLocations(client.getDatabaseAccount().single().block())){ + throw new SkipException("StartTime/IfModifiedSince is not currently supported when EnableMultipleWriteLocations is set"); + } + + AsyncDocumentClient asyncClientWithTokenResolver = null; + String id1 = UUID.randomUUID().toString(); + String id2 = UUID.randomUUID().toString(); + String partitionKey = createdCollection.getPartitionKey().paths().get(0).substring(1); + String partitionKeyValue = "pk"; + Document document1 = new Document(); + document1.id(id1); + BridgeInternal.setProperty(document1, partitionKey, partitionKeyValue); + Document document2 = new Document(); + document2.id(id2); + BridgeInternal.setProperty(document2, partitionKey, partitionKeyValue); + try { + asyncClientWithTokenResolver = buildClient(connectionMode, PermissionMode.ALL); + OffsetDateTime befTime = OffsetDateTime.now(); + Thread.sleep(1000); + + document1 = asyncClientWithTokenResolver + .createDocument(createdCollection.selfLink(), document1, null, false).single().block() + .getResource(); + document2 = asyncClientWithTokenResolver + .createDocument(createdCollection.selfLink(), document2, null, false).single().block() + .getResource(); + List expectedIds = new ArrayList(); + String rid1 = document1.resourceId(); + String rid2 = document2.resourceId(); + expectedIds.add(rid1); + expectedIds.add(rid2); + + ChangeFeedOptions options = new ChangeFeedOptions(); + options.partitionKey(new PartitionKey(partitionKeyValue)); + options.startDateTime(befTime); + + Thread.sleep(1000); + Flux> queryObservable = asyncClientWithTokenResolver + .queryDocumentChangeFeed(createdCollection.selfLink(), options); + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .exactlyContainsInAnyOrder(expectedIds).build(); + validateQuerySuccess(queryObservable, validator, 10000); + } finally { + safeClose(asyncClientWithTokenResolver); + } + } + + @Test(groups = {"simple"}, dataProvider = "connectionMode", timeOut = TIMEOUT) + public void verifyRuntimeExceptionWhenUserModifiesProperties(ConnectionMode connectionMode) { + AsyncDocumentClient asyncClientWithTokenResolver = null; + + try { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(connectionMode); + asyncClientWithTokenResolver = new AsyncDocumentClient.Builder() + .withServiceEndpoint(TestConfigurations.HOST) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .withTokenResolver(getBadTokenResolver()) + .build(); + + RequestOptions options = new RequestOptions(); + options.setProperties(new HashMap()); + Flux> readObservable = asyncClientWithTokenResolver.readCollection(createdCollection.selfLink(), options); + FailureValidator validator = new FailureValidator.Builder().withRuntimeExceptionClass(UnsupportedOperationException.class).build(); + validateFailure(readObservable, validator); + } finally { + safeClose(asyncClientWithTokenResolver); + } + } + + @Test(groups = {"simple"}, dataProvider = "connectionMode", timeOut = TIMEOUT) + public void verifyBlockListedUserThrows(ConnectionMode connectionMode) { + String field = "user"; + UserClass blockListedUser = new UserClass("block listed user", 0); + String errorMessage = "block listed user! access denied!"; + + AsyncDocumentClient asyncClientWithTokenResolver = null; + try { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(connectionMode); + asyncClientWithTokenResolver = new AsyncDocumentClient.Builder() + .withServiceEndpoint(TestConfigurations.HOST) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .withTokenResolver(getTokenResolverWithBlockList(PermissionMode.READ, field, blockListedUser, errorMessage)) + .build(); + + RequestOptions options = new RequestOptions(); + HashMap properties = new HashMap(); + properties.put(field, blockListedUser); + options.setProperties(properties); + Flux> readObservable = asyncClientWithTokenResolver.readCollection(createdCollection.selfLink(), options); + FailureValidator validator = new FailureValidator.Builder().withRuntimeExceptionMessage(errorMessage).build(); + validateFailure(readObservable, validator); + + properties.put(field, new UserClass("valid user", 1)); + options.setProperties(properties); + readObservable = asyncClientWithTokenResolver.readCollection(createdCollection.selfLink(), options); + ResourceResponseValidator sucessValidator = new ResourceResponseValidator.Builder() + .withId(createdCollection.id()).build(); + validateSuccess(readObservable, sucessValidator); + } finally { + safeClose(asyncClientWithTokenResolver); + } + } + + @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + client.close(); + } + + private Document getDocumentDefinition() { + String uuid = UUID.randomUUID().toString(); + Document doc = new Document(String.format("{ " + + "\"id\": \"%s\", " + + "\"mypk\": \"%s\", " + + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + + "}" + , uuid, uuid)); + return doc; + } + + private AsyncDocumentClient buildClient(ConnectionMode connectionMode, PermissionMode permissionMode) { + ConnectionPolicy connectionPolicy = new ConnectionPolicy(); + connectionPolicy.connectionMode(connectionMode); + return new AsyncDocumentClient.Builder() + .withServiceEndpoint(TestConfigurations.HOST) + .withConnectionPolicy(connectionPolicy) + .withConsistencyLevel(ConsistencyLevel.SESSION) + .withTokenResolver(getTokenResolver(permissionMode)) + .build(); + } + + private static User getUserDefinition() { + User user = new User(); + user.id(UUID.randomUUID().toString()); + return user; + } + + private Permission getPermission(Resource resource, String permissionId, PermissionMode permissionMode) { + Permission permission = new Permission(); + permission.id(permissionId); + permission.setPermissionMode(permissionMode); + permission.setResourceLink(resource.selfLink()); + return permission; + } + + private TokenResolver getTokenResolver(PermissionMode permissionMode) { + return (String requestVerb, String resourceIdOrFullName, CosmosResourceType resourceType, Map properties) -> { + if (permissionMode == null) { + return "invalid"; + } else if (permissionMode.equals(PermissionMode.READ)) { + return readPermission.getToken(); + } else { + return allPermission.getToken(); + } + }; + } + + private TokenResolver getBadTokenResolver() { + return (String requestVerb, String resourceIdOrFullName, CosmosResourceType resourceType, Map properties) -> { + if (resourceType == CosmosResourceType.System) { + return readPermission.getToken(); + } + if (properties != null) { + properties.put("key", "value"); + } + return null; + }; + } + + private TokenResolver getTokenResolverWithBlockList(PermissionMode permissionMode, String field, UserClass blockListedUser, String errorMessage) { + return (String requestVerb, String resourceIdOrFullName, CosmosResourceType resourceType, Map properties) -> { + UserClass currentUser = null; + if (properties != null && properties.get(field) != null) { + currentUser = (UserClass) properties.get(field); + } + + if (resourceType == CosmosResourceType.System) { + return readPermission.getToken(); + } else if (currentUser != null && + !currentUser.userName.equals(blockListedUser.userName) && + currentUser.userId != blockListedUser.userId) { + if (permissionMode.equals(PermissionMode.READ)) { + return readPermission.getToken(); + } else { + return allPermission.getToken(); + } + } else { + throw new RuntimeException(errorMessage); + } + }; + } +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/TopQueryTests.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/TopQueryTests.java new file mode 100644 index 0000000000000..88910aeb5e35c --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/TopQueryTests.java @@ -0,0 +1,229 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.internal.FeedResponseListValidator; +import com.azure.data.cosmos.internal.RetryAnalyzer; +import com.azure.data.cosmos.internal.Utils.ValueHolder; +import com.azure.data.cosmos.internal.query.TakeContinuationToken; +import io.reactivex.subscribers.TestSubscriber; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static org.assertj.core.api.Assertions.assertThat; + +public class TopQueryTests extends TestSuiteBase { + private CosmosContainer createdCollection; + private ArrayList docs = new ArrayList(); + + private String partitionKey = "mypk"; + private int firstPk = 0; + private int secondPk = 1; + private String field = "field"; + + private CosmosClient client; + + @Factory(dataProvider = "clientBuildersWithDirect") + public TopQueryTests(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider", retryAnalyzer = RetryAnalyzer.class) + public void queryDocumentsWithTop(boolean qmEnabled) throws Exception { + + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + options.maxItemCount(9); + options.maxDegreeOfParallelism(2); + options.populateQueryMetrics(qmEnabled); + + int expectedTotalSize = 20; + int expectedNumberOfPages = 3; + int[] expectedPageLengths = new int[] { 9, 9, 2 }; + + for (int i = 0; i < 2; i++) { + Flux> queryObservable1 = createdCollection.queryItems("SELECT TOP 0 value AVG(c.field) from c", options); + + FeedResponseListValidator validator1 = new FeedResponseListValidator.Builder() + .totalSize(0).build(); + + validateQuerySuccess(queryObservable1, validator1, TIMEOUT); + + Flux> queryObservable2 = createdCollection.queryItems("SELECT TOP 1 value AVG(c.field) from c", options); + + FeedResponseListValidator validator2 = new FeedResponseListValidator.Builder() + .totalSize(1).build(); + + validateQuerySuccess(queryObservable2, validator2, TIMEOUT); + + Flux> queryObservable3 = createdCollection.queryItems("SELECT TOP 20 * from c", options); + + FeedResponseListValidator validator3 = new FeedResponseListValidator.Builder() + .totalSize(expectedTotalSize).numberOfPages(expectedNumberOfPages).pageLengths(expectedPageLengths) + .hasValidQueryMetrics(qmEnabled).build(); + + validateQuerySuccess(queryObservable3, validator3, TIMEOUT); + + if (i == 0) { + options.partitionKey(new PartitionKey(firstPk)); + options.enableCrossPartitionQuery(false); + + expectedTotalSize = 10; + expectedNumberOfPages = 2; + expectedPageLengths = new int[] { 9, 1 }; + + } + } + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void topContinuationTokenRoundTrips() throws Exception { + { + // Positive + TakeContinuationToken takeContinuationToken = new TakeContinuationToken(42, "asdf"); + String serialized = takeContinuationToken.toString(); + ValueHolder outTakeContinuationToken = new ValueHolder(); + + assertThat(TakeContinuationToken.tryParse(serialized, outTakeContinuationToken)).isTrue(); + TakeContinuationToken deserialized = outTakeContinuationToken.v; + + assertThat(deserialized.getTakeCount()).isEqualTo(42); + assertThat(deserialized.getSourceToken()).isEqualTo("asdf"); + } + + { + // Negative + ValueHolder outTakeContinuationToken = new ValueHolder(); + assertThat( + TakeContinuationToken.tryParse("{\"property\": \"Not a valid token\"}", outTakeContinuationToken)) + .isFalse(); + } + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, retryAnalyzer = RetryAnalyzer.class) + public void queryDocumentsWithTopContinuationTokens() throws Exception { + String query = "SELECT TOP 8 * FROM c"; + this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10 }, 8); + } + + private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, int topCount) { + for (int pageSize : pageSizes) { + List receivedDocuments = this.queryWithContinuationTokens(query, pageSize); + Set actualIds = new HashSet(); + for (CosmosItemProperties document : receivedDocuments) { + actualIds.add(document.resourceId()); + } + + assertThat(actualIds.size()).describedAs("total number of results").isEqualTo(topCount); + } + } + + private List queryWithContinuationTokens(String query, int pageSize) { + String requestContinuation = null; + List continuationTokens = new ArrayList(); + List receivedDocuments = new ArrayList(); + + do { + FeedOptions options = new FeedOptions(); + options.maxItemCount(pageSize); + options.enableCrossPartitionQuery(true); + options.maxDegreeOfParallelism(2); + options.requestContinuation(requestContinuation); + Flux> queryObservable = createdCollection.queryItems(query, options); + + //Observable> firstPageObservable = queryObservable.first(); + TestSubscriber> testSubscriber = new TestSubscriber<>(); + queryObservable.subscribe(testSubscriber); + testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); + testSubscriber.assertNoErrors(); + testSubscriber.assertComplete(); + + FeedResponse firstPage = (FeedResponse) testSubscriber.getEvents().get(0).get(0); + requestContinuation = firstPage.continuationToken(); + receivedDocuments.addAll(firstPage.results()); + continuationTokens.add(requestContinuation); + } while (requestContinuation != null); + + return receivedDocuments; + } + + public void bulkInsert(CosmosClient client) { + generateTestData(); + + for (int i = 0; i < docs.size(); i++) { + createDocument(createdCollection, docs.get(i)); + } + } + + public void generateTestData() { + + for (int i = 0; i < 10; i++) { + CosmosItemProperties d = new CosmosItemProperties(); + d.id(Integer.toString(i)); + BridgeInternal.setProperty(d, field, i); + BridgeInternal.setProperty(d, partitionKey, firstPk); + docs.add(d); + } + + for (int i = 10; i < 20; i++) { + CosmosItemProperties d = new CosmosItemProperties(); + d.id(Integer.toString(i)); + BridgeInternal.setProperty(d, field, i); + BridgeInternal.setProperty(d, partitionKey, secondPk); + docs.add(d); + } + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() throws Exception { + client = clientBuilder().build(); + createdCollection = getSharedSinglePartitionCosmosContainer(client); + truncateCollection(createdCollection); + + bulkInsert(client); + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/TriggerCrudTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/TriggerCrudTest.java new file mode 100644 index 0000000000000..f152077f4d19a --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/TriggerCrudTest.java @@ -0,0 +1,129 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosResponse; +import com.azure.data.cosmos.CosmosResponseValidator; +import com.azure.data.cosmos.CosmosTrigger; +import com.azure.data.cosmos.CosmosTriggerProperties; +import com.azure.data.cosmos.CosmosTriggerResponse; +import com.azure.data.cosmos.TriggerOperation; +import com.azure.data.cosmos.TriggerType; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.util.UUID; + +public class TriggerCrudTest extends TestSuiteBase { + private CosmosContainer createdCollection; + + private CosmosClient client; + + @Factory(dataProvider = "clientBuildersWithDirect") + public TriggerCrudTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT * 100) + public void createTrigger() throws Exception { + + // create a trigger + CosmosTriggerProperties trigger = new CosmosTriggerProperties(); + trigger.id(UUID.randomUUID().toString()); + trigger.body("function() {var x = 10;}"); + trigger.triggerOperation(TriggerOperation.CREATE); + trigger.triggerType(TriggerType.PRE); + + Mono createObservable = createdCollection.getScripts().createTrigger(trigger); + + // validate trigger creation + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(trigger.id()) + .withTriggerBody("function() {var x = 10;}") + .withTriggerInternals(TriggerType.PRE, TriggerOperation.CREATE) + .notNullEtag() + .build(); + validateSuccess(createObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void readTrigger() throws Exception { + // create a trigger + CosmosTriggerProperties trigger = new CosmosTriggerProperties(); + trigger.id(UUID.randomUUID().toString()); + trigger.body("function() {var x = 10;}"); + trigger.triggerOperation(TriggerOperation.CREATE); + trigger.triggerType(TriggerType.PRE); + CosmosTrigger readBackTrigger = createdCollection.getScripts().createTrigger(trigger).block().trigger(); + + // read trigger + waitIfNeededForReplicasToCatchUp(clientBuilder()); + Mono readObservable = readBackTrigger.read(); + + // validate read trigger + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(trigger.id()) + .withTriggerBody("function() {var x = 10;}") + .withTriggerInternals(TriggerType.PRE, TriggerOperation.CREATE) + .notNullEtag() + .build(); + validateSuccess(readObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void deleteTrigger() throws Exception { + // create a trigger + CosmosTriggerProperties trigger = new CosmosTriggerProperties(); + trigger.id(UUID.randomUUID().toString()); + trigger.body("function() {var x = 10;}"); + trigger.triggerOperation(TriggerOperation.CREATE); + trigger.triggerType(TriggerType.PRE); + CosmosTrigger readBackTrigger = createdCollection.getScripts().createTrigger(trigger).block().trigger(); + + // delete trigger + Mono deleteObservable = readBackTrigger.delete(); + + // validate delete trigger + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .nullResource() + .build(); + validateSuccess(deleteObservable, validator); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdCollection = getSharedMultiPartitionCosmosContainer(client); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/TriggerQueryTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/TriggerQueryTest.java new file mode 100644 index 0000000000000..f0fd9b6785f0c --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/TriggerQueryTest.java @@ -0,0 +1,184 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosTriggerProperties; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.Resource; +import com.azure.data.cosmos.TriggerOperation; +import com.azure.data.cosmos.TriggerType; +import com.azure.data.cosmos.internal.FailureValidator; +import com.azure.data.cosmos.internal.FeedResponseListValidator; +import com.azure.data.cosmos.internal.FeedResponseValidator; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; + +public class TriggerQueryTest extends TestSuiteBase { + + private CosmosContainer createdCollection; + private static final List createdTriggers = new ArrayList<>(); + + private CosmosClient client; + + @Factory(dataProvider = "clientBuildersWithDirect") + public TriggerQueryTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryWithFilter() throws Exception { + + String filterId = createdTriggers.get(0).id(); + String query = String.format("SELECT * from c where c.id = '%s'", filterId); + + FeedOptions options = new FeedOptions(); + options.maxItemCount(5); + Flux> queryObservable = createdCollection.getScripts().queryTriggers(query, options); + + List expectedDocs = createdTriggers + .stream() + .filter(sp -> filterId.equals(sp.id()) ) + .collect(Collectors.toList()); + assertThat(expectedDocs).isNotEmpty(); + + int expectedPageSize = (expectedDocs.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(expectedDocs.size()) + .exactlyContainsInAnyOrder(expectedDocs.stream().map(Resource::resourceId).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + + validateQuerySuccess(queryObservable, validator, 10000); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void query_NoResults() throws Exception { + + String query = "SELECT * from root r where r.id = '2'"; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdCollection.getScripts().queryTriggers(query, options); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .containsExactly(new ArrayList<>()) + .numberOfPages(1) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(queryObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryAll() throws Exception { + + String query = "SELECT * from root"; + FeedOptions options = new FeedOptions(); + options.maxItemCount(3); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdCollection.getScripts().queryTriggers(query, options); + + createdTriggers.forEach(cosmosTriggerSettings -> logger.info("Created trigger in method: {}", cosmosTriggerSettings.resourceId())); + + List expectedDocs = createdTriggers; + + int expectedPageSize = (expectedDocs.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator + .Builder() + .exactlyContainsInAnyOrder(expectedDocs + .stream() + .map(Resource::resourceId) + .collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(queryObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void invalidQuerySytax() throws Exception { + String query = "I am an invalid query"; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdCollection.getScripts().queryTriggers(query, options); + + FailureValidator validator = new FailureValidator.Builder() + .instanceOf(CosmosClientException.class) + .statusCode(400) + .notNullActivityId() + .build(); + validateQueryFailure(queryObservable, validator); + } + + public CosmosTriggerProperties createTrigger(CosmosContainer cosmosContainer) { + CosmosTriggerProperties storedProcedure = getTriggerDef(); + return cosmosContainer.getScripts().createTrigger(storedProcedure).block().properties(); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() throws Exception { + client = clientBuilder().build(); + createdCollection = getSharedMultiPartitionCosmosContainer(client); + truncateCollection(createdCollection); + createdTriggers.clear(); + + for(int i = 0; i < 5; i++) { + createdTriggers.add(createTrigger(createdCollection)); + } + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + private static CosmosTriggerProperties getTriggerDef() { + CosmosTriggerProperties trigger = new CosmosTriggerProperties(); + trigger.id(UUID.randomUUID().toString()); + trigger.body("function() {var x = 10;}"); + trigger.triggerOperation(TriggerOperation.CREATE); + trigger.triggerType(TriggerType.PRE); + return trigger; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/TriggerUpsertReplaceTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/TriggerUpsertReplaceTest.java new file mode 100644 index 0000000000000..39f0c0addc345 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/TriggerUpsertReplaceTest.java @@ -0,0 +1,102 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosResponseValidator; +import com.azure.data.cosmos.CosmosTriggerProperties; +import com.azure.data.cosmos.CosmosTriggerResponse; +import com.azure.data.cosmos.TriggerOperation; +import com.azure.data.cosmos.TriggerType; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.util.UUID; + +public class TriggerUpsertReplaceTest extends TestSuiteBase { + + private CosmosContainer createdCollection; + + private CosmosClient client; + + @Factory(dataProvider = "clientBuildersWithDirect") + public TriggerUpsertReplaceTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void replaceTrigger() throws Exception { + + // create a trigger + CosmosTriggerProperties trigger = new CosmosTriggerProperties(); + trigger.id(UUID.randomUUID().toString()); + trigger.body("function() {var x = 10;}"); + trigger.triggerOperation(TriggerOperation.CREATE); + trigger.triggerType(TriggerType.PRE); + CosmosTriggerProperties readBackTrigger = createdCollection.getScripts().createTrigger(trigger).block().properties(); + + // read trigger to validate creation + waitIfNeededForReplicasToCatchUp(clientBuilder()); + Mono readObservable = createdCollection.getScripts().getTrigger(readBackTrigger.id()).read(); + + // validate trigger creation + CosmosResponseValidator validatorForRead = new CosmosResponseValidator.Builder() + .withId(readBackTrigger.id()) + .withTriggerBody("function() {var x = 10;}") + .withTriggerInternals(TriggerType.PRE, TriggerOperation.CREATE) + .notNullEtag() + .build(); + validateSuccess(readObservable, validatorForRead); + + //update trigger + readBackTrigger.body("function() {var x = 11;}"); + + Mono updateObservable = createdCollection.getScripts().getTrigger(readBackTrigger.id()).replace(readBackTrigger); + + // validate trigger replace + CosmosResponseValidator validatorForUpdate = new CosmosResponseValidator.Builder() + .withId(readBackTrigger.id()) + .withTriggerBody("function() {var x = 11;}") + .withTriggerInternals(TriggerType.PRE, TriggerOperation.CREATE) + .notNullEtag() + .build(); + validateSuccess(updateObservable, validatorForUpdate); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdCollection = getSharedMultiPartitionCosmosContainer(client); + truncateCollection(createdCollection); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/UniqueIndexTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/UniqueIndexTest.java new file mode 100644 index 0000000000000..1fcd0f8497509 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/UniqueIndexTest.java @@ -0,0 +1,247 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.ConnectionPolicy; +import com.azure.data.cosmos.ConsistencyLevel; +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosContainerProperties; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosDatabaseForTest; +import com.azure.data.cosmos.CosmosItem; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.CosmosItemRequestOptions; +import com.azure.data.cosmos.DataType; +import com.azure.data.cosmos.ExcludedPath; +import com.azure.data.cosmos.HashIndex; +import com.azure.data.cosmos.IncludedPath; +import com.azure.data.cosmos.IndexingMode; +import com.azure.data.cosmos.IndexingPolicy; +import com.azure.data.cosmos.PartitionKey; +import com.azure.data.cosmos.PartitionKeyDefinition; +import com.azure.data.cosmos.UniqueKey; +import com.azure.data.cosmos.UniqueKeyPolicy; +import com.azure.data.cosmos.internal.HttpConstants; +import com.azure.data.cosmos.internal.TestConfigurations; +import com.azure.data.cosmos.internal.TestUtils; +import com.azure.data.cosmos.internal.Utils; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.UUID; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +public class UniqueIndexTest extends TestSuiteBase { + protected static final int TIMEOUT = 30000; + protected static final int SETUP_TIMEOUT = 20000; + protected static final int SHUTDOWN_TIMEOUT = 20000; + + private final String databaseId = CosmosDatabaseForTest.generateId(); + private CosmosClient client; + private CosmosDatabase database; + + private CosmosContainer collection; + + @Test(groups = { "long" }, timeOut = TIMEOUT) + public void insertWithUniqueIndex() throws Exception { + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + + CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); + UniqueKeyPolicy uniqueKeyPolicy = new UniqueKeyPolicy(); + UniqueKey uniqueKey = new UniqueKey(); + uniqueKey.paths(ImmutableList.of("/name", "/description")); + uniqueKeyPolicy.uniqueKeys(Lists.newArrayList(uniqueKey)); + collectionDefinition.uniqueKeyPolicy(uniqueKeyPolicy); + + IndexingPolicy indexingPolicy = new IndexingPolicy(); + indexingPolicy.indexingMode(IndexingMode.CONSISTENT); + ExcludedPath excludedPath = new ExcludedPath(); + excludedPath.path("/*"); + indexingPolicy.excludedPaths(Collections.singletonList(excludedPath)); + + IncludedPath includedPath1 = new IncludedPath(); + includedPath1.path("/name/?"); + includedPath1.indexes(Collections.singletonList(new HashIndex(DataType.STRING, 7))); + + IncludedPath includedPath2 = new IncludedPath(); + includedPath2.path("/description/?"); + includedPath2.indexes(Collections.singletonList(new HashIndex(DataType.STRING, 7))); + indexingPolicy.setIncludedPaths(ImmutableList.of(includedPath1, includedPath2)); + collectionDefinition.indexingPolicy(indexingPolicy); + + ObjectMapper om = new ObjectMapper(); + + JsonNode doc1 = om.readValue("{\"name\":\"Alexander Pushkin\",\"description\":\"poet\",\"id\": \""+ UUID.randomUUID().toString() +"\"}", JsonNode.class); + JsonNode doc2 = om.readValue("{\"name\":\"Alexander Pushkin\",\"description\":\"playwright\",\"id\": \"" + UUID.randomUUID().toString() + "\"}", JsonNode.class); + JsonNode doc3 = om.readValue("{\"name\":\"حافظ شیرازی\",\"description\":\"poet\",\"id\": \"" + UUID.randomUUID().toString() + "\"}", JsonNode.class); + + collection = database.createContainer(collectionDefinition).block().container(); + + CosmosItem item = collection.createItem(doc1).block().item(); + + CosmosItemRequestOptions options = new CosmosItemRequestOptions(); + options.partitionKey(PartitionKey.None); + CosmosItemProperties itemSettings = item.read(options).block().properties(); + assertThat(itemSettings.id()).isEqualTo(doc1.get("id").textValue()); + + try { + collection.createItem(doc1).block(); + fail("Did not throw due to unique constraint (create)"); + } catch (RuntimeException e) { + assertThat(getDocumentClientException(e).statusCode()).isEqualTo(HttpConstants.StatusCodes.CONFLICT); + } + + collection.createItem(doc2).block(); + collection.createItem(doc3).block(); + } + + @Test(groups = { "long" }, timeOut = TIMEOUT * 1000) + public void replaceAndDeleteWithUniqueIndex() throws Exception { + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + + CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); + UniqueKeyPolicy uniqueKeyPolicy = new UniqueKeyPolicy(); + UniqueKey uniqueKey = new UniqueKey(); + uniqueKey.paths(ImmutableList.of("/name", "/description")); + uniqueKeyPolicy.uniqueKeys(Lists.newArrayList(uniqueKey)); + collectionDefinition.uniqueKeyPolicy(uniqueKeyPolicy); + + collection = database.createContainer(collectionDefinition).block().container(); + + ObjectMapper om = new ObjectMapper(); + + ObjectNode doc1 = om.readValue("{\"name\":\"عمر خیّام\",\"description\":\"poet\",\"id\": \""+ UUID.randomUUID().toString() +"\"}", ObjectNode.class); + ObjectNode doc3 = om.readValue("{\"name\":\"Rabindranath Tagore\",\"description\":\"poet\",\"id\": \""+ UUID.randomUUID().toString() +"\"}", ObjectNode.class); + ObjectNode doc2 = om.readValue("{\"name\":\"عمر خیّام\",\"description\":\"mathematician\",\"id\": \""+ UUID.randomUUID().toString() +"\"}", ObjectNode.class); + + CosmosItemProperties doc1Inserted = collection.createItem(doc1, new CosmosItemRequestOptions()).block().properties(); + + collection.getItem(doc1.get("id").asText(), PartitionKey.None).replace(doc1Inserted, new CosmosItemRequestOptions()).block().properties(); // REPLACE with same values -- OK. + + CosmosItemProperties doc2Inserted = collection.createItem(doc2, new CosmosItemRequestOptions()).block().properties(); + CosmosItemProperties doc2Replacement = new CosmosItemProperties(doc1Inserted.toJson()); + doc2Replacement.id( doc2Inserted.id()); + + try { + collection.getItem(doc2Inserted.id(), PartitionKey.None).replace(doc2Replacement, new CosmosItemRequestOptions()).block(); // REPLACE doc2 with values from doc1 -- Conflict. + fail("Did not throw due to unique constraint"); + } + catch (RuntimeException ex) { + assertThat(getDocumentClientException(ex).statusCode()).isEqualTo(HttpConstants.StatusCodes.CONFLICT); + } + + doc3.put("id", doc1Inserted.id()); + collection.getItem(doc1Inserted.id(), PartitionKey.None).replace(doc3).block(); // REPLACE with values from doc3 -- OK. + + collection.getItem(doc1Inserted.id(), PartitionKey.None).delete().block(); + collection.createItem(doc1, new CosmosItemRequestOptions()).block(); + } + + @Test(groups = { "long" }, timeOut = TIMEOUT) + public void uniqueKeySerializationDeserialization() { + PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); + ArrayList paths = new ArrayList(); + paths.add("/mypk"); + partitionKeyDef.paths(paths); + + CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); + UniqueKeyPolicy uniqueKeyPolicy = new UniqueKeyPolicy(); + UniqueKey uniqueKey = new UniqueKey(); + uniqueKey.paths(ImmutableList.of("/name", "/description")); + uniqueKeyPolicy.uniqueKeys(Lists.newArrayList(uniqueKey)); + collectionDefinition.uniqueKeyPolicy(uniqueKeyPolicy); + + IndexingPolicy indexingPolicy = new IndexingPolicy(); + indexingPolicy.indexingMode(IndexingMode.CONSISTENT); + ExcludedPath excludedPath = new ExcludedPath(); + excludedPath.path("/*"); + indexingPolicy.excludedPaths(Collections.singletonList(excludedPath)); + + IncludedPath includedPath1 = new IncludedPath(); + includedPath1.path("/name/?"); + includedPath1.indexes(Collections.singletonList(new HashIndex(DataType.STRING, 7))); + + IncludedPath includedPath2 = new IncludedPath(); + includedPath2.path("/description/?"); + includedPath2.indexes(Collections.singletonList(new HashIndex(DataType.STRING, 7))); + indexingPolicy.setIncludedPaths(ImmutableList.of(includedPath1, includedPath2)); + + collectionDefinition.indexingPolicy(indexingPolicy); + + CosmosContainer createdCollection = database.createContainer(collectionDefinition).block().container(); + + CosmosContainerProperties collection = createdCollection.read().block().properties(); + + assertThat(collection.uniqueKeyPolicy()).isNotNull(); + assertThat(collection.uniqueKeyPolicy().uniqueKeys()).isNotNull(); + assertThat(collection.uniqueKeyPolicy().uniqueKeys()) + .hasSameSizeAs(collectionDefinition.uniqueKeyPolicy().uniqueKeys()); + assertThat(collection.uniqueKeyPolicy().uniqueKeys() + .stream().map(ui -> ui.paths()).collect(Collectors.toList())) + .containsExactlyElementsOf( + ImmutableList.of(ImmutableList.of("/name", "/description"))); + } + + private CosmosClientException getDocumentClientException(RuntimeException e) { + CosmosClientException dce = Utils.as(e.getCause(), CosmosClientException.class); + assertThat(dce).isNotNull(); + return dce; + } + + @BeforeClass(groups = { "long" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + // set up the client + client = CosmosClient.builder() + .endpoint(TestConfigurations.HOST) + .key(TestConfigurations.MASTER_KEY) + .connectionPolicy(ConnectionPolicy.defaultPolicy()) + .consistencyLevel(ConsistencyLevel.SESSION).build(); + + database = createDatabase(client, databaseId); + } + + @AfterClass(groups = { "long" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteDatabase(database); + safeClose(client); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/UserCrudTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/UserCrudTest.java new file mode 100644 index 0000000000000..61f47f895a7af --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/UserCrudTest.java @@ -0,0 +1,180 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosDatabaseForTest; +import com.azure.data.cosmos.CosmosResponseValidator; +import com.azure.data.cosmos.CosmosUser; +import com.azure.data.cosmos.CosmosUserResponse; +import com.azure.data.cosmos.CosmosUserProperties; +import com.azure.data.cosmos.internal.FailureValidator; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.util.UUID; + +public class UserCrudTest extends TestSuiteBase { + + public final String databaseId = CosmosDatabaseForTest.generateId(); + + private CosmosDatabase createdDatabase; + + private CosmosClient client; + + @Factory(dataProvider = "clientBuilders") + public UserCrudTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT) + public void createUser() throws Exception { + //create user + CosmosUserProperties user = new CosmosUserProperties(); + user.id(UUID.randomUUID().toString()); + + Mono createObservable = createdDatabase.createUser(user); + + // validate user creation + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(user.id()) + .notNullEtag() + .build(); + validateSuccess(createObservable, validator); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT) + public void readUser() throws Exception { + + //create user + CosmosUserProperties user = new CosmosUserProperties(); + user.id(UUID.randomUUID().toString()); + + CosmosUser readBackUser = createdDatabase.createUser(user).block().user(); + + // read user + Mono readObservable = readBackUser.read(); + + //validate user read + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(readBackUser.id()) + .notNullEtag() + .build(); + + validateSuccess(readObservable, validator); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT) + public void deleteUser() throws Exception { + //create user + CosmosUserProperties user = new CosmosUserProperties(); + user.id(UUID.randomUUID().toString()); + + CosmosUser readBackUser = createdDatabase.createUser(user).block().user(); + + // delete user + Mono deleteObservable = readBackUser.delete(); + + // validate user delete + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .nullResource() + .build(); + validateSuccess(deleteObservable, validator); + + // attempt to read the user which was deleted + Mono readObservable = readBackUser.read(); + FailureValidator notFoundValidator = new FailureValidator.Builder().resourceNotFound().build(); + validateFailure(readObservable, notFoundValidator); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT) + public void upsertUser() throws Exception { + + //create user + CosmosUserProperties user = new CosmosUserProperties(); + user.id(UUID.randomUUID().toString()); + + Mono upsertObservable = createdDatabase.upsertUser(user); + + //validate user upsert + CosmosResponseValidator validatorForUpsert = new CosmosResponseValidator.Builder() + .withId(user.id()) + .notNullEtag() + .build(); + + validateSuccess(upsertObservable, validatorForUpsert); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT) + public void replaceUser() throws Exception { + + //create user + CosmosUserProperties user = new CosmosUserProperties(); + user.id(UUID.randomUUID().toString()); + + CosmosUserProperties readBackUser = createdDatabase.createUser(user).block().properties(); + + // read user to validate creation + Mono readObservable = createdDatabase.getUser(user.id()).read(); + + //validate user read + CosmosResponseValidator validatorForRead = new CosmosResponseValidator.Builder() + .withId(readBackUser.id()) + .notNullEtag() + .build(); + + validateSuccess(readObservable, validatorForRead); + + //update user + String oldId = readBackUser.id(); + readBackUser.id(UUID.randomUUID().toString()); + + Mono updateObservable = createdDatabase.getUser(oldId).replace(readBackUser); + + // validate user replace + CosmosResponseValidator validatorForUpdate = new CosmosResponseValidator.Builder() + .withId(readBackUser.id()) + .notNullEtag() + .build(); + + validateSuccess(updateObservable, validatorForUpdate); + } + + @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdDatabase = createDatabase(client, databaseId); + } + + @AfterClass(groups = { "emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteDatabase(createdDatabase); + safeClose(client); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/UserDefinedFunctionCrudTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/UserDefinedFunctionCrudTest.java new file mode 100644 index 0000000000000..d00e74ac9f499 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/UserDefinedFunctionCrudTest.java @@ -0,0 +1,119 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosResponse; +import com.azure.data.cosmos.CosmosResponseValidator; +import com.azure.data.cosmos.CosmosUserDefinedFunction; +import com.azure.data.cosmos.CosmosUserDefinedFunctionProperties; +import com.azure.data.cosmos.CosmosUserDefinedFunctionResponse; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.util.UUID; + +public class UserDefinedFunctionCrudTest extends TestSuiteBase { + + private CosmosContainer createdCollection; + private CosmosClient client; + + @Factory(dataProvider = "clientBuildersWithDirect") + public UserDefinedFunctionCrudTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void createUserDefinedFunction() throws Exception { + // create udf + CosmosUserDefinedFunctionProperties udf = new CosmosUserDefinedFunctionProperties(); + udf.id(UUID.randomUUID().toString()); + udf.body("function() {var x = 10;}"); + + Mono createObservable = createdCollection.getScripts().createUserDefinedFunction(udf); + + // validate udf creation + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(udf.id()) + .withUserDefinedFunctionBody("function() {var x = 10;}") + .notNullEtag() + .build(); + validateSuccess(createObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void readUserDefinedFunction() throws Exception { + // create a udf + CosmosUserDefinedFunctionProperties udf = new CosmosUserDefinedFunctionProperties(); + udf.id(UUID.randomUUID().toString()); + udf.body("function() {var x = 10;}"); + CosmosUserDefinedFunction readBackUdf = createdCollection.getScripts().createUserDefinedFunction(udf).block().userDefinedFunction(); + + // read udf + waitIfNeededForReplicasToCatchUp(clientBuilder()); + Mono readObservable = readBackUdf.read(); + + //validate udf read + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .withId(udf.id()) + .withUserDefinedFunctionBody("function() {var x = 10;}") + .notNullEtag() + .build(); + validateSuccess(readObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void deleteUserDefinedFunction() throws Exception { + // create a udf + CosmosUserDefinedFunctionProperties udf = new CosmosUserDefinedFunctionProperties(); + udf.id(UUID.randomUUID().toString()); + udf.body("function() {var x = 10;}"); + CosmosUserDefinedFunction readBackUdf = createdCollection.getScripts().createUserDefinedFunction(udf).block().userDefinedFunction(); + + // delete udf + Mono deleteObservable = readBackUdf.delete(); + + // validate udf delete + CosmosResponseValidator validator = new CosmosResponseValidator.Builder() + .nullResource() + .build(); + validateSuccess(deleteObservable, validator); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdCollection = getSharedMultiPartitionCosmosContainer(client); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/UserDefinedFunctionQueryTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/UserDefinedFunctionQueryTest.java new file mode 100644 index 0000000000000..b77d030affd43 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/UserDefinedFunctionQueryTest.java @@ -0,0 +1,180 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosClientException; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosUserDefinedFunctionProperties; +import com.azure.data.cosmos.internal.Database; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.FailureValidator; +import com.azure.data.cosmos.internal.FeedResponseListValidator; +import com.azure.data.cosmos.internal.FeedResponseValidator; +import com.azure.data.cosmos.internal.TestUtils; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; + +public class UserDefinedFunctionQueryTest extends TestSuiteBase { + + private Database createdDatabase; + private CosmosContainer createdCollection; + private List createdUDF = new ArrayList<>(); + + private CosmosClient client; + + public String getCollectionLink() { + return TestUtils.getCollectionNameLink(createdDatabase.id(), createdCollection.id()); + } + + @Factory(dataProvider = "clientBuildersWithDirect") + public UserDefinedFunctionQueryTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryWithFilter() throws Exception { + + String filterId = createdUDF.get(0).id(); + String query = String.format("SELECT * from c where c.id = '%s'", filterId); + + FeedOptions options = new FeedOptions(); + options.maxItemCount(5); + Flux> queryObservable = createdCollection.getScripts().queryUserDefinedFunctions(query, options); + + List expectedDocs = createdUDF.stream().filter(sp -> filterId.equals(sp.id()) ).collect(Collectors.toList()); + assertThat(expectedDocs).isNotEmpty(); + + int expectedPageSize = (expectedDocs.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(expectedDocs.size()) + .exactlyContainsInAnyOrder(expectedDocs.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + + validateQuerySuccess(queryObservable, validator, 10000); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void query_NoResults() throws Exception { + + String query = "SELECT * from root r where r.id = '2'"; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdCollection.getScripts().queryUserDefinedFunctions(query, options); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .containsExactly(new ArrayList<>()) + .numberOfPages(1) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(queryObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryAll() throws Exception { + + String query = "SELECT * from root"; + FeedOptions options = new FeedOptions(); + options.maxItemCount(3); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdCollection.getScripts().queryUserDefinedFunctions(query, options); + + List expectedDocs = createdUDF; + + int expectedPageSize = (expectedDocs.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator + .Builder() + .exactlyContainsInAnyOrder(expectedDocs + .stream() + .map(d -> d.resourceId()) + .collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .allPagesSatisfy(new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(queryObservable, validator); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void invalidQuerySytax() throws Exception { + String query = "I am an invalid query"; + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + Flux> queryObservable = createdCollection.getScripts().queryUserDefinedFunctions(query, options); + + FailureValidator validator = new FailureValidator.Builder() + .instanceOf(CosmosClientException.class) + .statusCode(400) + .notNullActivityId() + .build(); + validateQueryFailure(queryObservable, validator); + } + + public CosmosUserDefinedFunctionProperties createUserDefinedFunction(CosmosContainer cosmosContainer) { + CosmosUserDefinedFunctionProperties storedProcedure = getUserDefinedFunctionDef(); + return cosmosContainer.getScripts().createUserDefinedFunction(storedProcedure).block().properties(); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() throws Exception { + client = clientBuilder().build(); + createdCollection = getSharedMultiPartitionCosmosContainer(client); + truncateCollection(createdCollection); + + for(int i = 0; i < 5; i++) { + createdUDF.add(createUserDefinedFunction(createdCollection)); + } + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + private static CosmosUserDefinedFunctionProperties getUserDefinedFunctionDef() { + CosmosUserDefinedFunctionProperties udf = new CosmosUserDefinedFunctionProperties(); + udf.id(UUID.randomUUID().toString()); + udf.body("function() {var x = 10;}"); + return udf; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/UserDefinedFunctionUpsertReplaceTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/UserDefinedFunctionUpsertReplaceTest.java new file mode 100644 index 0000000000000..3f18c39be096f --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/UserDefinedFunctionUpsertReplaceTest.java @@ -0,0 +1,99 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosResponseValidator; +import com.azure.data.cosmos.CosmosUserDefinedFunctionProperties; +import com.azure.data.cosmos.CosmosUserDefinedFunctionResponse; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Mono; + +import java.util.UUID; + +public class UserDefinedFunctionUpsertReplaceTest extends TestSuiteBase { + + private CosmosContainer createdCollection; + + private CosmosClient client; + + @Factory(dataProvider = "clientBuildersWithDirect") + public UserDefinedFunctionUpsertReplaceTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void replaceUserDefinedFunction() throws Exception { + + // create a udf + CosmosUserDefinedFunctionProperties udf = new CosmosUserDefinedFunctionProperties(); + udf.id(UUID.randomUUID().toString()); + udf.body("function() {var x = 10;}"); + + CosmosUserDefinedFunctionProperties readBackUdf = null; + + readBackUdf = createdCollection.getScripts().createUserDefinedFunction(udf).block().properties(); + + // read udf to validate creation + waitIfNeededForReplicasToCatchUp(clientBuilder()); + Mono readObservable = createdCollection.getScripts().getUserDefinedFunction(readBackUdf.id()).read(); + + // validate udf creation + CosmosResponseValidator validatorForRead = new CosmosResponseValidator.Builder() + .withId(readBackUdf.id()) + .withUserDefinedFunctionBody("function() {var x = 10;}") + .notNullEtag() + .build(); + validateSuccess(readObservable, validatorForRead); + + //update udf + readBackUdf.body("function() {var x = 11;}"); + + Mono replaceObservable = createdCollection.getScripts().getUserDefinedFunction(readBackUdf.id()).replace(readBackUdf); + + //validate udf replace + CosmosResponseValidator validatorForReplace = new CosmosResponseValidator.Builder() + .withId(readBackUdf.id()) + .withUserDefinedFunctionBody("function() {var x = 11;}") + .notNullEtag() + .build(); + validateSuccess(replaceObservable, validatorForReplace); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdCollection = getSharedMultiPartitionCosmosContainer(client); + truncateCollection(createdCollection); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/UserQueryTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/UserQueryTest.java new file mode 100644 index 0000000000000..3845c32f4db0f --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/UserQueryTest.java @@ -0,0 +1,154 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosDatabase; +import com.azure.data.cosmos.CosmosDatabaseForTest; +import com.azure.data.cosmos.CosmosUserProperties; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import com.azure.data.cosmos.internal.FeedResponseListValidator; +import com.azure.data.cosmos.internal.FeedResponseValidator; +import com.azure.data.cosmos.internal.TestUtils; +import org.apache.commons.lang3.StringUtils; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; + +public class UserQueryTest extends TestSuiteBase { + + public final String databaseId = CosmosDatabaseForTest.generateId(); + + private List createdUsers = new ArrayList<>(); + + private CosmosClient client; + private CosmosDatabase createdDatabase; + + @Factory(dataProvider = "clientBuilders") + public UserQueryTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryUsersWithFilter() throws Exception { + + String filterUserId = createdUsers.get(0).id(); + String query = String.format("SELECT * from c where c.id = '%s'", filterUserId); + + FeedOptions options = new FeedOptions(); + options.maxItemCount(5); + Flux> queryObservable = createdDatabase.queryUsers(query, options); + + List expectedUsers = createdUsers.stream() + .filter(c -> StringUtils.equals(filterUserId, c.id()) ).collect(Collectors.toList()); + + assertThat(expectedUsers).isNotEmpty(); + + int expectedPageSize = (expectedUsers.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(expectedUsers.size()) + .exactlyContainsInAnyOrder(expectedUsers.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + + validateQuerySuccess(queryObservable, validator, 10000); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryAllUsers() throws Exception { + + String query = "SELECT * from c"; + + FeedOptions options = new FeedOptions(); + options.maxItemCount(2); + String databaseLink = TestUtils.getDatabaseNameLink(databaseId); + Flux> queryObservable = createdDatabase.queryUsers(query, options); + + List expectedUsers = createdUsers; + + assertThat(expectedUsers).isNotEmpty(); + + int expectedPageSize = (expectedUsers.size() + options.maxItemCount() - 1) / options.maxItemCount(); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .totalSize(expectedUsers.size()) + .exactlyContainsInAnyOrder(expectedUsers.stream().map(d -> d.resourceId()).collect(Collectors.toList())) + .numberOfPages(expectedPageSize) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + + validateQuerySuccess(queryObservable, validator, 10000); + } + + @Test(groups = { "simple" }, timeOut = TIMEOUT) + public void queryUsers_NoResults() throws Exception { + + String query = "SELECT * from root r where r.id = '2'"; + FeedOptions options = new FeedOptions(); + Flux> queryObservable = createdDatabase.queryUsers(query, options); + + FeedResponseListValidator validator = new FeedResponseListValidator.Builder() + .containsExactly(new ArrayList<>()) + .numberOfPages(1) + .pageSatisfy(0, new FeedResponseValidator.Builder() + .requestChargeGreaterThanOrEqualTo(1.0).build()) + .build(); + validateQuerySuccess(queryObservable, validator); + } + + @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) + public void beforeClass() throws Exception { + client = clientBuilder().build(); + + createdDatabase = createDatabase(client, databaseId); + + for(int i = 0; i < 5; i++) { + CosmosUserProperties user = new CosmosUserProperties(); + user.id(UUID.randomUUID().toString()); + createdUsers.add(createUser(client, databaseId, user).read().block().properties()); + } + + waitIfNeededForReplicasToCatchUp(clientBuilder()); + } + + @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteDatabase(createdDatabase); + safeClose(client); + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/VeryLargeDocumentQueryTest.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/VeryLargeDocumentQueryTest.java new file mode 100644 index 0000000000000..5e69089374d64 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/VeryLargeDocumentQueryTest.java @@ -0,0 +1,125 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx; + +import com.azure.data.cosmos.BridgeInternal; +import com.azure.data.cosmos.CosmosClient; +import com.azure.data.cosmos.CosmosClientBuilder; +import com.azure.data.cosmos.CosmosContainer; +import com.azure.data.cosmos.CosmosItemProperties; +import com.azure.data.cosmos.CosmosItemRequestOptions; +import com.azure.data.cosmos.CosmosItemResponse; +import com.azure.data.cosmos.FeedOptions; +import com.azure.data.cosmos.FeedResponse; +import org.apache.commons.lang3.StringUtils; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Schedulers; +import reactor.test.StepVerifier; + +import java.time.Duration; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.commons.io.FileUtils.ONE_MB; + +public class VeryLargeDocumentQueryTest extends TestSuiteBase { + + private final static int TIMEOUT = 60000; + private final static int SETUP_TIMEOUT = 60000; + private CosmosContainer createdCollection; + + private CosmosClient client; + + @Factory(dataProvider = "simpleClientBuildersWithDirect") + public VeryLargeDocumentQueryTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT) + public void queryLargeDocuments() { + + int cnt = 5; + + for(int i = 0; i < cnt; i++) { + createLargeDocument(); + } + + FeedOptions options = new FeedOptions(); + options.enableCrossPartitionQuery(true); + + Flux> feedResponseFlux = createdCollection.queryItems("SELECT * FROM r", + options); + + AtomicInteger totalCount = new AtomicInteger(); + StepVerifier.create(feedResponseFlux.subscribeOn(Schedulers.single())) + .thenConsumeWhile(feedResponse -> { + int size = feedResponse.results().size(); + totalCount.addAndGet(size); + return true; + }) + .expectComplete() + .verify(Duration.ofMillis(subscriberValidationTimeout)); + } + + private void createLargeDocument() { + CosmosItemProperties docDefinition = getDocumentDefinition(); + + //Keep size as ~ 1.999MB to account for size of other props + int size = (int) (ONE_MB * 1.999); + BridgeInternal.setProperty(docDefinition, "largeString", StringUtils.repeat("x", size)); + + Mono createObservable = createdCollection.createItem(docDefinition, new CosmosItemRequestOptions()); + + StepVerifier.create(createObservable.subscribeOn(Schedulers.single())) + .expectNextMatches(cosmosItemResponse -> cosmosItemResponse.properties().id().equals(docDefinition.id())) + .expectComplete() + .verify(Duration.ofMillis(subscriberValidationTimeout)); + } + + @BeforeClass(groups = { "emulator" }, timeOut = 2 * SETUP_TIMEOUT) + public void beforeClass() { + client = clientBuilder().build(); + createdCollection = getSharedMultiPartitionCosmosContainer(client); + truncateCollection(createdCollection); + } + + @AfterClass(groups = { "emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + private static CosmosItemProperties getDocumentDefinition() { + String uuid = UUID.randomUUID().toString(); + CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + + "\"id\": \"%s\", " + + "\"mypk\": \"%s\", " + + "}" + , uuid, uuid)); + return doc; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/proxy/HttpProxyChannelInitializer.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/proxy/HttpProxyChannelInitializer.java new file mode 100644 index 0000000000000..bb3973d0ab3d3 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/proxy/HttpProxyChannelInitializer.java @@ -0,0 +1,48 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx.proxy; + +import io.netty.channel.ChannelInitializer; +import io.netty.channel.socket.SocketChannel; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * The channel initializer. + * + */ +public class HttpProxyChannelInitializer extends ChannelInitializer { + private final Logger logger = LoggerFactory.getLogger(HttpProxyChannelInitializer.class); + private AtomicLong taskCounter = new AtomicLong(); + private HttpProxyClientHandler httpProxyClientHandler; + + @Override + protected void initChannel(SocketChannel ch) throws Exception { + httpProxyClientHandler = new HttpProxyClientHandler("task-" + taskCounter.getAndIncrement()); + logger.info("task-" + taskCounter.getAndIncrement()); + ch.pipeline().addLast(httpProxyClientHandler); + } + +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/proxy/HttpProxyClientHandler.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/proxy/HttpProxyClientHandler.java new file mode 100644 index 0000000000000..a1edfb573dadb --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/proxy/HttpProxyClientHandler.java @@ -0,0 +1,116 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx.proxy; + +import io.netty.bootstrap.Bootstrap; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Handle data from client. + * + */ +public class HttpProxyClientHandler extends ChannelInboundHandlerAdapter { + private final Logger logger = LoggerFactory.getLogger(HttpProxyClientHandler.class); + private final String id; + private Channel clientChannel; + private Channel remoteChannel; + private HttpProxyClientHeader header ; + public HttpProxyClientHandler(String id) { + this.id = id; + header = new HttpProxyClientHeader(); + } + + @Override + public void channelActive(ChannelHandlerContext ctx) { + clientChannel = ctx.channel(); + } + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) { + if (header.isComplete()) { + remoteChannel.writeAndFlush(msg); // just forward + return; + } + + ByteBuf in = (ByteBuf) msg; + header.digest(in); + + if (!header.isComplete()) { + in.release(); + return; + } + + logger.info(id + " {}", header); + clientChannel.config().setAutoRead(false); // disable AutoRead until remote connection is ready + + if (header.isHttps()) { // if https, respond 200 to create tunnel + clientChannel.writeAndFlush(Unpooled.wrappedBuffer("HTTP/1.1 200 Connection Established\r\n\r\n".getBytes())); + } + + Bootstrap b = new Bootstrap(); + b.group(clientChannel.eventLoop()) // use the same EventLoop + .channel(clientChannel.getClass()) + .handler(new HttpProxyRemoteHandler(id, clientChannel)); + ChannelFuture f = b.connect(header.getHost(), header.getPort()); + remoteChannel = f.channel(); + + f.addListener((ChannelFutureListener) future -> { + if (future.isSuccess()) { + clientChannel.config().setAutoRead(true); // connection is ready, enable AutoRead + if (!header.isHttps()) { // forward header and remaining bytes + remoteChannel.write(header.getByteBuf()); + } + + remoteChannel.writeAndFlush(in); + } else { + in.release(); + clientChannel.close(); + } + }); + } + + @Override + public void channelInactive(ChannelHandlerContext ctx) { + flushAndClose(remoteChannel); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable e) { + logger.error(id + " error occured", e); + flushAndClose(clientChannel); + } + + private void flushAndClose(Channel ch) { + if (ch != null && ch.isActive()) { + ch.writeAndFlush(Unpooled.EMPTY_BUFFER).addListener(ChannelFutureListener.CLOSE); + } + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/proxy/HttpProxyClientHeader.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/proxy/HttpProxyClientHeader.java new file mode 100644 index 0000000000000..f58c47cc23d55 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/proxy/HttpProxyClientHeader.java @@ -0,0 +1,149 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx.proxy; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; + +/** + * The http header of client. + * + */ +public class HttpProxyClientHeader { + private String method; + private String host; + private int port; + private boolean https; + private boolean complete; + private ByteBuf byteBuf = Unpooled.buffer(); + + private final StringBuilder lineBuf = new StringBuilder(); + + public boolean isComplete() { + return complete; + } + + public String getMethod() { + return method; + } + + public void setMethod(String method) { + this.method = method; + } + + public String getHost() { + return host; + } + + public void setHost(String host) { + this.host = host; + } + + public int getPort() { + return port; + } + + public void setPort(int port) { + this.port = port; + } + + public boolean isHttps() { + return https; + } + + public void setHttps(boolean https) { + this.https = https; + } + + public ByteBuf getByteBuf() { + return byteBuf; + } + + public void setByteBuf(ByteBuf byteBuf) { + this.byteBuf = byteBuf; + } + + public StringBuilder getLineBuf() { + return lineBuf; + } + + public void setComplete(boolean complete) { + this.complete = complete; + } + + public void digest(ByteBuf in) { + while (in.isReadable()) { + if (complete) { + throw new IllegalStateException("already complete"); + } + + String line = readLine(in); + if (line == null) { + return; + } + + if (method == null) { + method = line.split(" ")[0]; // the first word is http method name + https = method.equalsIgnoreCase("CONNECT"); // method CONNECT means https + } + + if (line.startsWith("Host: ") || line.startsWith("host: ")) { + String[] arr = line.split(":"); + host = arr[1].trim(); + if (arr.length == 3) { + port = Integer.parseInt(arr[2]); + } else if (https) { + port = 443; // https + } else { + port = 80; // http + } + } + + if (line.isEmpty()) { + if (host == null || port == 0) { + throw new IllegalStateException("cannot find header \'Host\'"); + } + + byteBuf = byteBuf.asReadOnly(); + complete = true; + break; + } + } + } + + private String readLine(ByteBuf in) { + while (in.isReadable()) { + byte b = in.readByte(); + byteBuf.writeByte(b); + lineBuf.append((char) b); + int len = lineBuf.length(); + if (len >= 2 && lineBuf.substring(len - 2).equals("\r\n")) { + String line = lineBuf.substring(0, len - 2); + lineBuf.delete(0, len); + return line; + } + + } + return null; + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/proxy/HttpProxyRemoteHandler.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/proxy/HttpProxyRemoteHandler.java new file mode 100644 index 0000000000000..7424ae3c25a2a --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/proxy/HttpProxyRemoteHandler.java @@ -0,0 +1,74 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx.proxy; + +import io.netty.buffer.Unpooled; +import io.netty.channel.Channel; +import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Handle data from remote. + * + */ +public class HttpProxyRemoteHandler extends ChannelInboundHandlerAdapter { + private final Logger logger = LoggerFactory.getLogger(HttpProxyRemoteHandler.class); + private final String id; + private Channel clientChannel; + private Channel remoteChannel; + + public HttpProxyRemoteHandler(String id, Channel clientChannel) { + this.id = id; + this.clientChannel = clientChannel; + } + + @Override + public void channelActive(ChannelHandlerContext ctx) { + this.remoteChannel = ctx.channel(); + } + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) { + clientChannel.writeAndFlush(msg); // just forward + } + + @Override + public void channelInactive(ChannelHandlerContext ctx) { + flushAndClose(clientChannel); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable e) { + logger.error(id + " error occured", e); + flushAndClose(remoteChannel); + } + + private void flushAndClose(Channel ch) { + if (ch != null && ch.isActive()) { + ch.writeAndFlush(Unpooled.EMPTY_BUFFER).addListener(ChannelFutureListener.CLOSE); + } + } +} diff --git a/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/proxy/HttpProxyServer.java b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/proxy/HttpProxyServer.java new file mode 100644 index 0000000000000..be33ef88ffce4 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/java/com/azure/data/cosmos/rx/proxy/HttpProxyServer.java @@ -0,0 +1,71 @@ +/* + * The MIT License (MIT) + * Copyright (c) 2018 Microsoft Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +package com.azure.data.cosmos.rx.proxy; + +import io.netty.bootstrap.ServerBootstrap; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A http proxy server. + * + */ +public class HttpProxyServer { + private final Logger logger = LoggerFactory.getLogger(HttpProxyServer.class); + private HttpProxyChannelInitializer httpProxyChannelInitializer; + private int port = 8080; + EventLoopGroup bossGroup; + EventLoopGroup workerGroup; + public HttpProxyServer() { + bossGroup = new NioEventLoopGroup(1); + workerGroup = new NioEventLoopGroup(); + } + + public void start() { + new Thread(() -> { + logger.info("HttpProxyServer started on port: {}", port); + httpProxyChannelInitializer = new HttpProxyChannelInitializer(); + try { + ServerBootstrap bootstrap = new ServerBootstrap(); + bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) + .childHandler(httpProxyChannelInitializer) + .bind(port).sync().channel().closeFuture().sync(); + } catch (InterruptedException e) { + logger.error("Error occurred", e); + } + }).start(); + } + + public void shutDown() { + if(!workerGroup.isShutdown()) { + workerGroup.shutdownGracefully(); + } + + if(!bossGroup.isShutdown()) { + bossGroup.shutdownGracefully(); + } + } +} diff --git a/sdk/cosmos/sdk/src/test/resources/Microsoft.jpg b/sdk/cosmos/sdk/src/test/resources/Microsoft.jpg new file mode 100644 index 0000000000000..cf6c76bba4981 Binary files /dev/null and b/sdk/cosmos/sdk/src/test/resources/Microsoft.jpg differ diff --git a/sdk/cosmos/sdk/src/test/resources/cosmosdb-1.png b/sdk/cosmos/sdk/src/test/resources/cosmosdb-1.png new file mode 100644 index 0000000000000..60d23806107c5 Binary files /dev/null and b/sdk/cosmos/sdk/src/test/resources/cosmosdb-1.png differ diff --git a/sdk/cosmos/sdk/src/test/resources/databaseAccount.json b/sdk/cosmos/sdk/src/test/resources/databaseAccount.json new file mode 100644 index 0000000000000..b514a0dda813a --- /dev/null +++ b/sdk/cosmos/sdk/src/test/resources/databaseAccount.json @@ -0,0 +1,38 @@ +{ + "_self": "", + "id": "localhost", + "_rid": "localhost", + "media": "//media/", + "addresses": "//addresses/", + "_dbs": "//dbs/", + "writableLocations": [ + { + "name": "South Central US", + "databaseAccountEndpoint": "https://127.0.0.1:8081/" + } + ], + "readableLocations": [ + { + "name": "South Central US", + "databaseAccountEndpoint": "https://127.0.0.1:8081/" + } + ], + "enableMultipleWriteLocations": false, + "userReplicationPolicy": { + "asyncReplication": false, + "minReplicaSetSize": 1, + "maxReplicasetSize": 4 + }, + "userConsistencyPolicy": { + "defaultConsistencyLevel": "Session" + }, + "systemReplicationPolicy": { + "minReplicaSetSize": 1, + "maxReplicasetSize": 4 + }, + "readPolicy": { + "primaryReadCoefficient": 1, + "secondaryReadCoefficient": 1 + }, + "queryEngineConfiguration": "{\"maxSqlQueryInputLength\":262144,\"maxJoinsPerSqlQuery\":5,\"maxLogicalAndPerSqlQuery\":500,\"maxLogicalOrPerSqlQuery\":500,\"maxUdfRefPerSqlQuery\":10,\"maxInExpressionItemsCount\":16000,\"queryMaxInMemorySortDocumentCount\":500,\"maxQueryRequestTimeoutFraction\":0.9,\"sqlAllowNonFiniteNumbers\":false,\"sqlAllowAggregateFunctions\":true,\"sqlAllowSubQuery\":true,\"sqlAllowScalarSubQuery\":true,\"allowNewKeywords\":true,\"sqlAllowLike\":false,\"maxSpatialQueryCells\":12,\"spatialMaxGeometryPointCount\":256,\"sqlAllowTop\":true,\"enableSpatialIndexing\":true}" +} \ No newline at end of file diff --git a/sdk/cosmos/sdk/src/test/resources/emulator-testng.xml b/sdk/cosmos/sdk/src/test/resources/emulator-testng.xml new file mode 100644 index 0000000000000..16bec3b3b396c --- /dev/null +++ b/sdk/cosmos/sdk/src/test/resources/emulator-testng.xml @@ -0,0 +1,35 @@ + + + + + + + + + + + + + + diff --git a/sdk/cosmos/sdk/src/test/resources/fast-testng.xml b/sdk/cosmos/sdk/src/test/resources/fast-testng.xml new file mode 100644 index 0000000000000..b78fb1cab3254 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/resources/fast-testng.xml @@ -0,0 +1,35 @@ + + + + + + + + + + + + + + diff --git a/sdk/cosmos/sdk/src/test/resources/log4j.properties b/sdk/cosmos/sdk/src/test/resources/log4j.properties new file mode 100644 index 0000000000000..1f3287c67a55f --- /dev/null +++ b/sdk/cosmos/sdk/src/test/resources/log4j.properties @@ -0,0 +1,15 @@ +# this is the log4j configuration for tests + +# Set root logger level to DEBUG and its only appender to A1. +log4j.rootLogger=INFO, A1 + +# Set HTTP components' logger to INFO + +log4j.category.io.netty=INFO +log4j.category.io.reactivex=INFO +# A1 is set to be a ConsoleAppender. +log4j.appender.A1=org.apache.log4j.ConsoleAppender + +# A1 uses PatternLayout. +log4j.appender.A1.layout=org.apache.log4j.PatternLayout +log4j.appender.A1.layout.ConversionPattern=%d %5X{pid} [%t] %-5p %c - %m%n diff --git a/sdk/cosmos/sdk/src/test/resources/long-testng.xml b/sdk/cosmos/sdk/src/test/resources/long-testng.xml new file mode 100644 index 0000000000000..debd90ba2136e --- /dev/null +++ b/sdk/cosmos/sdk/src/test/resources/long-testng.xml @@ -0,0 +1,35 @@ + + + + + + + + + + + + + + diff --git a/sdk/cosmos/sdk/src/test/resources/sampleConflict.json b/sdk/cosmos/sdk/src/test/resources/sampleConflict.json new file mode 100644 index 0000000000000..12f2a507b7a39 --- /dev/null +++ b/sdk/cosmos/sdk/src/test/resources/sampleConflict.json @@ -0,0 +1,11 @@ +{ + "id": "k6d9ALgBmD8BAAAAAAAAQA==", + "_rid": "k6d9ALgBmD8BAAAAAAAAQA==", + "_self": "dbs/k6d9AA==/colls/k6d9ALgBmD8=/conflicts/k6d9ALgBmD8BAAAAAAAAQA==/", + "_etag": "\"00004a0f-0000-0000-0000-5b6e214b0000\"", + "resourceType": "document", + "operationType": "create", + "resourceId": "k6d9ALgBmD+ChB4AAAAAAA==", + "content": "{\"id\":\"0007312a-a1c5-4b54-9e39-35de2367fa33\",\"regionId\":2,\"regionEndpoint\":\"https://test-southeastasia.documents.azure.com:443/\",\"_rid\":\"k6d9ALgBmD+ChB4AAAAAAA==\",\"_self\":\"dbs\\/k6d9AA==\\/colls\\/k6d9ALgBmD8=\\/docs\\/k6d9ALgBmD+ChB4AAAAAAA==\\/\",\"_etag\":\"\\\"00000200-0000-0000-0000-5b6e214b0000\\\"\",\"_attachments\":\"attachments\\/\",\"_ts\":1533944139}", + "_ts": 1533944139 +} diff --git a/eventgrid/data-plane/README.md b/sdk/eventgrid/microsoft-azure-eventgrid/README.md old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/README.md rename to sdk/eventgrid/microsoft-azure-eventgrid/README.md diff --git a/eventgrid/data-plane/pom.xml b/sdk/eventgrid/microsoft-azure-eventgrid/pom.xml old mode 100755 new mode 100644 similarity index 96% rename from eventgrid/data-plane/pom.xml rename to sdk/eventgrid/microsoft-azure-eventgrid/pom.xml index fbef3bf157f7a..674f7f407d618 --- a/eventgrid/data-plane/pom.xml +++ b/sdk/eventgrid/microsoft-azure-eventgrid/pom.xml @@ -14,7 +14,7 @@ Microsoft Azure SDK for eventgrid This package contains Microsoft Azure EventGrid SDK. https://github.com/Azure/azure-sdk-for-java - + The MIT License (MIT) http://opensource.org/licenses/MIT @@ -54,6 +54,11 @@ 1.5.0 test + + commons-io + commons-io + 2.6 + diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/DomainCredentials.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/DomainCredentials.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/DomainCredentials.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/DomainCredentials.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/EventGridClient.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/EventGridClient.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/EventGridClient.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/EventGridClient.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/TopicCredentials.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/TopicCredentials.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/TopicCredentials.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/TopicCredentials.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/customization/EventGridSubscriber.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/customization/EventGridSubscriber.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/customization/EventGridSubscriber.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/customization/EventGridSubscriber.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/customization/SystemEventTypeMappings.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/customization/SystemEventTypeMappings.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/customization/SystemEventTypeMappings.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/customization/SystemEventTypeMappings.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/customization/SystemEventTypes.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/customization/SystemEventTypes.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/customization/SystemEventTypes.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/customization/SystemEventTypes.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/implementation/EventGridClientImpl.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/implementation/EventGridClientImpl.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/implementation/EventGridClientImpl.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/implementation/EventGridClientImpl.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/implementation/package-info.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/implementation/package-info.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/implementation/package-info.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/implementation/package-info.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/AppConfigurationKeyValueDeletedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/AppConfigurationKeyValueDeletedEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/AppConfigurationKeyValueDeletedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/AppConfigurationKeyValueDeletedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/AppConfigurationKeyValueModifiedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/AppConfigurationKeyValueModifiedEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/AppConfigurationKeyValueModifiedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/AppConfigurationKeyValueModifiedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryArtifactEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryArtifactEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryArtifactEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryArtifactEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryArtifactEventTarget.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryArtifactEventTarget.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryArtifactEventTarget.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryArtifactEventTarget.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryChartDeletedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryChartDeletedEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryChartDeletedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryChartDeletedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryChartPushedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryChartPushedEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryChartPushedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryChartPushedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryEventActor.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryEventActor.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryEventActor.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryEventActor.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryEventData.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryEventRequest.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryEventRequest.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryEventRequest.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryEventRequest.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryEventSource.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryEventSource.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryEventSource.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryEventSource.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryEventTarget.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryEventTarget.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryEventTarget.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryEventTarget.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryImageDeletedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryImageDeletedEventData.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryImageDeletedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryImageDeletedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryImagePushedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryImagePushedEventData.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryImagePushedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ContainerRegistryImagePushedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/DeviceConnectionStateEventInfo.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/DeviceConnectionStateEventInfo.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/DeviceConnectionStateEventInfo.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/DeviceConnectionStateEventInfo.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/DeviceConnectionStateEventProperties.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/DeviceConnectionStateEventProperties.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/DeviceConnectionStateEventProperties.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/DeviceConnectionStateEventProperties.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/DeviceLifeCycleEventProperties.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/DeviceLifeCycleEventProperties.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/DeviceLifeCycleEventProperties.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/DeviceLifeCycleEventProperties.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTelemetryEventProperties.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTelemetryEventProperties.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTelemetryEventProperties.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTelemetryEventProperties.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTwinInfo.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTwinInfo.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTwinInfo.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTwinInfo.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTwinInfoProperties.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTwinInfoProperties.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTwinInfoProperties.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTwinInfoProperties.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTwinInfoX509Thumbprint.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTwinInfoX509Thumbprint.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTwinInfoX509Thumbprint.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTwinInfoX509Thumbprint.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTwinMetadata.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTwinMetadata.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTwinMetadata.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTwinMetadata.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTwinProperties.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTwinProperties.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTwinProperties.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/DeviceTwinProperties.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/EventGridEvent.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/EventGridEvent.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/EventGridEvent.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/EventGridEvent.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/EventHubCaptureFileCreatedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/EventHubCaptureFileCreatedEventData.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/EventHubCaptureFileCreatedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/EventHubCaptureFileCreatedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/IotHubDeviceConnectedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/IotHubDeviceConnectedEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/IotHubDeviceConnectedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/IotHubDeviceConnectedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/IotHubDeviceCreatedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/IotHubDeviceCreatedEventData.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/IotHubDeviceCreatedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/IotHubDeviceCreatedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/IotHubDeviceDeletedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/IotHubDeviceDeletedEventData.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/IotHubDeviceDeletedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/IotHubDeviceDeletedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/IotHubDeviceDisconnectedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/IotHubDeviceDisconnectedEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/IotHubDeviceDisconnectedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/IotHubDeviceDisconnectedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/IotHubDeviceTelemetryEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/IotHubDeviceTelemetryEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/IotHubDeviceTelemetryEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/IotHubDeviceTelemetryEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/JobState.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/JobState.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/JobState.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/JobState.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MapsGeofenceEnteredEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MapsGeofenceEnteredEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MapsGeofenceEnteredEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MapsGeofenceEnteredEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MapsGeofenceEventProperties.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MapsGeofenceEventProperties.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MapsGeofenceEventProperties.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MapsGeofenceEventProperties.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MapsGeofenceExitedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MapsGeofenceExitedEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MapsGeofenceExitedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MapsGeofenceExitedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MapsGeofenceGeometry.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MapsGeofenceGeometry.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MapsGeofenceGeometry.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MapsGeofenceGeometry.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MapsGeofenceResultEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MapsGeofenceResultEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MapsGeofenceResultEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MapsGeofenceResultEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobCanceledEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobCanceledEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobCanceledEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobCanceledEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobCancelingEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobCancelingEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobCancelingEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobCancelingEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobError.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobError.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobError.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobError.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobErrorCategory.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobErrorCategory.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobErrorCategory.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobErrorCategory.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobErrorCode.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobErrorCode.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobErrorCode.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobErrorCode.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobErrorDetail.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobErrorDetail.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobErrorDetail.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobErrorDetail.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobErroredEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobErroredEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobErroredEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobErroredEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobFinishedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobFinishedEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobFinishedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobFinishedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutput.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutput.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutput.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutput.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputAsset.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputAsset.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputAsset.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputAsset.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputCanceledEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputCanceledEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputCanceledEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputCanceledEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputCancelingEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputCancelingEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputCancelingEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputCancelingEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputErroredEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputErroredEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputErroredEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputErroredEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputFinishedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputFinishedEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputFinishedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputFinishedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputProcessingEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputProcessingEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputProcessingEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputProcessingEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputProgressEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputProgressEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputProgressEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputProgressEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputScheduledEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputScheduledEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputScheduledEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputScheduledEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputStateChangeEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputStateChangeEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputStateChangeEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobOutputStateChangeEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobProcessingEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobProcessingEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobProcessingEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobProcessingEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobRetry.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobRetry.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobRetry.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobRetry.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobScheduledEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobScheduledEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobScheduledEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobScheduledEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobState.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobState.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobState.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobState.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobStateChangeEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobStateChangeEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobStateChangeEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaJobStateChangeEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventConnectionRejectedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventConnectionRejectedEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventConnectionRejectedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventConnectionRejectedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventEncoderConnectedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventEncoderConnectedEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventEncoderConnectedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventEncoderConnectedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventEncoderDisconnectedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventEncoderDisconnectedEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventEncoderDisconnectedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventEncoderDisconnectedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventIncomingDataChunkDroppedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventIncomingDataChunkDroppedEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventIncomingDataChunkDroppedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventIncomingDataChunkDroppedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventIncomingStreamReceivedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventIncomingStreamReceivedEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventIncomingStreamReceivedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventIncomingStreamReceivedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventIncomingStreamsOutOfSyncEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventIncomingStreamsOutOfSyncEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventIncomingStreamsOutOfSyncEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventIncomingStreamsOutOfSyncEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventIncomingVideoStreamsOutOfSyncEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventIncomingVideoStreamsOutOfSyncEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventIncomingVideoStreamsOutOfSyncEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventIncomingVideoStreamsOutOfSyncEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventIngestHeartbeatEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventIngestHeartbeatEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventIngestHeartbeatEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventIngestHeartbeatEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventTrackDiscontinuityDetectedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventTrackDiscontinuityDetectedEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventTrackDiscontinuityDetectedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/MediaLiveEventTrackDiscontinuityDetectedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ResourceActionCancelData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ResourceActionCancelData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ResourceActionCancelData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ResourceActionCancelData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ResourceActionFailureData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ResourceActionFailureData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ResourceActionFailureData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ResourceActionFailureData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ResourceActionSuccessData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ResourceActionSuccessData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ResourceActionSuccessData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ResourceActionSuccessData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ResourceDeleteCancelData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ResourceDeleteCancelData.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ResourceDeleteCancelData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ResourceDeleteCancelData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ResourceDeleteFailureData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ResourceDeleteFailureData.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ResourceDeleteFailureData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ResourceDeleteFailureData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ResourceDeleteSuccessData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ResourceDeleteSuccessData.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ResourceDeleteSuccessData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ResourceDeleteSuccessData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ResourceWriteCancelData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ResourceWriteCancelData.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ResourceWriteCancelData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ResourceWriteCancelData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ResourceWriteFailureData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ResourceWriteFailureData.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ResourceWriteFailureData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ResourceWriteFailureData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ResourceWriteSuccessData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ResourceWriteSuccessData.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ResourceWriteSuccessData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ResourceWriteSuccessData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ServiceBusActiveMessagesAvailableWithNoListenersEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ServiceBusActiveMessagesAvailableWithNoListenersEventData.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ServiceBusActiveMessagesAvailableWithNoListenersEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ServiceBusActiveMessagesAvailableWithNoListenersEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ServiceBusDeadletterMessagesAvailableWithNoListenersEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ServiceBusDeadletterMessagesAvailableWithNoListenersEventData.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/ServiceBusDeadletterMessagesAvailableWithNoListenersEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/ServiceBusDeadletterMessagesAvailableWithNoListenersEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/StorageBlobCreatedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/StorageBlobCreatedEventData.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/StorageBlobCreatedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/StorageBlobCreatedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/StorageBlobDeletedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/StorageBlobDeletedEventData.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/StorageBlobDeletedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/StorageBlobDeletedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/SubscriptionDeletedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/SubscriptionDeletedEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/SubscriptionDeletedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/SubscriptionDeletedEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/SubscriptionValidationEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/SubscriptionValidationEventData.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/SubscriptionValidationEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/SubscriptionValidationEventData.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/SubscriptionValidationResponse.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/SubscriptionValidationResponse.java similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/SubscriptionValidationResponse.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/SubscriptionValidationResponse.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/package-info.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/package-info.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/models/package-info.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/models/package-info.java diff --git a/eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/package-info.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/package-info.java old mode 100755 new mode 100644 similarity index 100% rename from eventgrid/data-plane/src/main/java/com/microsoft/azure/eventgrid/package-info.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/main/java/com/microsoft/azure/eventgrid/package-info.java diff --git a/eventgrid/data-plane/src/test/java/com/microsoft/azure/eventgrid/EventGridTests.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/java/com/microsoft/azure/eventgrid/EventGridTests.java old mode 100755 new mode 100644 similarity index 98% rename from eventgrid/data-plane/src/test/java/com/microsoft/azure/eventgrid/EventGridTests.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/java/com/microsoft/azure/eventgrid/EventGridTests.java index 05473791aa670..962ad6805904d --- a/eventgrid/data-plane/src/test/java/com/microsoft/azure/eventgrid/EventGridTests.java +++ b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/java/com/microsoft/azure/eventgrid/EventGridTests.java @@ -10,6 +10,7 @@ import org.joda.time.DateTime; import org.junit.Test; +import org.junit.Ignore; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertEquals; @@ -21,6 +22,7 @@ public class EventGridTests { @Test + @Ignore public void canPublishEvent() throws Exception { String endpoint = System.getenv("EG_ENDPOINT"); String key = System.getenv("EG_KEY"); diff --git a/eventgrid/data-plane/src/test/java/com/microsoft/azure/eventgrid/customization/CustomizationTests.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/java/com/microsoft/azure/eventgrid/customization/CustomizationTests.java similarity index 99% rename from eventgrid/data-plane/src/test/java/com/microsoft/azure/eventgrid/customization/CustomizationTests.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/java/com/microsoft/azure/eventgrid/customization/CustomizationTests.java index d2dc11318afb7..c9c64040e7781 100644 --- a/eventgrid/data-plane/src/test/java/com/microsoft/azure/eventgrid/customization/CustomizationTests.java +++ b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/java/com/microsoft/azure/eventgrid/customization/CustomizationTests.java @@ -70,7 +70,7 @@ import com.microsoft.azure.eventgrid.models.SubscriptionValidationEventData; import org.junit.Assert; import org.junit.Test; -import sun.misc.IOUtils; +import org.apache.commons.io.IOUtils; import java.io.IOException; import java.lang.reflect.Type; @@ -1038,7 +1038,7 @@ public void consumeResourceWriteSuccessEvent() throws IOException { private String getTestPayloadFromFile(String fileName) { ClassLoader classLoader = getClass().getClassLoader(); try { - byte[] bytes = IOUtils.readFully(classLoader.getResourceAsStream("customization\\" + fileName), -1, true); + byte[] bytes = IOUtils.toByteArray(classLoader.getResourceAsStream("customization/" + fileName)); return new String(bytes); } catch (IOException e) { throw new RuntimeException(e); diff --git a/eventgrid/data-plane/src/test/java/com/microsoft/azure/eventgrid/customization/models/ContosoItemReceivedEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/java/com/microsoft/azure/eventgrid/customization/models/ContosoItemReceivedEventData.java similarity index 100% rename from eventgrid/data-plane/src/test/java/com/microsoft/azure/eventgrid/customization/models/ContosoItemReceivedEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/java/com/microsoft/azure/eventgrid/customization/models/ContosoItemReceivedEventData.java diff --git a/eventgrid/data-plane/src/test/java/com/microsoft/azure/eventgrid/customization/models/ContosoItemSentEventData.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/java/com/microsoft/azure/eventgrid/customization/models/ContosoItemSentEventData.java similarity index 100% rename from eventgrid/data-plane/src/test/java/com/microsoft/azure/eventgrid/customization/models/ContosoItemSentEventData.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/java/com/microsoft/azure/eventgrid/customization/models/ContosoItemSentEventData.java diff --git a/eventgrid/data-plane/src/test/java/com/microsoft/azure/eventgrid/customization/models/DroneShippingInfo.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/java/com/microsoft/azure/eventgrid/customization/models/DroneShippingInfo.java similarity index 100% rename from eventgrid/data-plane/src/test/java/com/microsoft/azure/eventgrid/customization/models/DroneShippingInfo.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/java/com/microsoft/azure/eventgrid/customization/models/DroneShippingInfo.java diff --git a/eventgrid/data-plane/src/test/java/com/microsoft/azure/eventgrid/customization/models/RocketShippingInfo.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/java/com/microsoft/azure/eventgrid/customization/models/RocketShippingInfo.java similarity index 100% rename from eventgrid/data-plane/src/test/java/com/microsoft/azure/eventgrid/customization/models/RocketShippingInfo.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/java/com/microsoft/azure/eventgrid/customization/models/RocketShippingInfo.java diff --git a/eventgrid/data-plane/src/test/java/com/microsoft/azure/eventgrid/customization/models/ShippingInfo.java b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/java/com/microsoft/azure/eventgrid/customization/models/ShippingInfo.java similarity index 100% rename from eventgrid/data-plane/src/test/java/com/microsoft/azure/eventgrid/customization/models/ShippingInfo.java rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/java/com/microsoft/azure/eventgrid/customization/models/ShippingInfo.java diff --git a/eventgrid/data-plane/src/test/resources/customization/AppConfigurationKeyValueDeleted.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/AppConfigurationKeyValueDeleted.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/AppConfigurationKeyValueDeleted.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/AppConfigurationKeyValueDeleted.json diff --git a/eventgrid/data-plane/src/test/resources/customization/AppConfigurationKeyValueModified.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/AppConfigurationKeyValueModified.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/AppConfigurationKeyValueModified.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/AppConfigurationKeyValueModified.json diff --git a/eventgrid/data-plane/src/test/resources/customization/ContainerRegistryChartDeletedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ContainerRegistryChartDeletedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/ContainerRegistryChartDeletedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ContainerRegistryChartDeletedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/ContainerRegistryChartPushedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ContainerRegistryChartPushedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/ContainerRegistryChartPushedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ContainerRegistryChartPushedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/ContainerRegistryImageDeletedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ContainerRegistryImageDeletedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/ContainerRegistryImageDeletedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ContainerRegistryImageDeletedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/ContainerRegistryImagePushedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ContainerRegistryImagePushedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/ContainerRegistryImagePushedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ContainerRegistryImagePushedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/CustomEventWithArrayData.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/CustomEventWithArrayData.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/CustomEventWithArrayData.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/CustomEventWithArrayData.json diff --git a/eventgrid/data-plane/src/test/resources/customization/CustomEventWithBooleanData.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/CustomEventWithBooleanData.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/CustomEventWithBooleanData.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/CustomEventWithBooleanData.json diff --git a/eventgrid/data-plane/src/test/resources/customization/CustomEventWithPolymorphicData.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/CustomEventWithPolymorphicData.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/CustomEventWithPolymorphicData.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/CustomEventWithPolymorphicData.json diff --git a/eventgrid/data-plane/src/test/resources/customization/CustomEventWithStringData.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/CustomEventWithStringData.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/CustomEventWithStringData.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/CustomEventWithStringData.json diff --git a/eventgrid/data-plane/src/test/resources/customization/CustomEvents.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/CustomEvents.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/CustomEvents.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/CustomEvents.json diff --git a/eventgrid/data-plane/src/test/resources/customization/EventGridSubscriptionDeletedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/EventGridSubscriptionDeletedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/EventGridSubscriptionDeletedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/EventGridSubscriptionDeletedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/EventGridSubscriptionValidationEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/EventGridSubscriptionValidationEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/EventGridSubscriptionValidationEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/EventGridSubscriptionValidationEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/EventHubCaptureFileCreatedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/EventHubCaptureFileCreatedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/EventHubCaptureFileCreatedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/EventHubCaptureFileCreatedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/IoTHubDeviceConnectedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/IoTHubDeviceConnectedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/IoTHubDeviceConnectedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/IoTHubDeviceConnectedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/IoTHubDeviceCreatedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/IoTHubDeviceCreatedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/IoTHubDeviceCreatedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/IoTHubDeviceCreatedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/IoTHubDeviceDeletedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/IoTHubDeviceDeletedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/IoTHubDeviceDeletedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/IoTHubDeviceDeletedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/IoTHubDeviceDisconnectedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/IoTHubDeviceDisconnectedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/IoTHubDeviceDisconnectedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/IoTHubDeviceDisconnectedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/IoTHubDeviceTelemetryEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/IoTHubDeviceTelemetryEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/IoTHubDeviceTelemetryEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/IoTHubDeviceTelemetryEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MapsGeofenceEnteredEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MapsGeofenceEnteredEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MapsGeofenceEnteredEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MapsGeofenceEnteredEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MapsGeofenceExitedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MapsGeofenceExitedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MapsGeofenceExitedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MapsGeofenceExitedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MapsGeofenceResultEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MapsGeofenceResultEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MapsGeofenceResultEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MapsGeofenceResultEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaJobCanceledEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobCanceledEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaJobCanceledEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobCanceledEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaJobCancelingEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobCancelingEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaJobCancelingEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobCancelingEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaJobErroredEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobErroredEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaJobErroredEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobErroredEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaJobFinishedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobFinishedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaJobFinishedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobFinishedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaJobOutputCanceledEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobOutputCanceledEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaJobOutputCanceledEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobOutputCanceledEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaJobOutputCancelingEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobOutputCancelingEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaJobOutputCancelingEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobOutputCancelingEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaJobOutputErroredEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobOutputErroredEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaJobOutputErroredEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobOutputErroredEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaJobOutputFinishedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobOutputFinishedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaJobOutputFinishedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobOutputFinishedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaJobOutputProcessingEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobOutputProcessingEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaJobOutputProcessingEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobOutputProcessingEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaJobOutputProgressEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobOutputProgressEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaJobOutputProgressEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobOutputProgressEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaJobOutputScheduledEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobOutputScheduledEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaJobOutputScheduledEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobOutputScheduledEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaJobOutputStateChangeEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobOutputStateChangeEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaJobOutputStateChangeEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobOutputStateChangeEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaJobProcessingEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobProcessingEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaJobProcessingEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobProcessingEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaJobScheduledEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobScheduledEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaJobScheduledEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobScheduledEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaJobStateChangeEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobStateChangeEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaJobStateChangeEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaJobStateChangeEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaLiveEventConnectionRejectedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaLiveEventConnectionRejectedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaLiveEventConnectionRejectedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaLiveEventConnectionRejectedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaLiveEventEncoderConnectedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaLiveEventEncoderConnectedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaLiveEventEncoderConnectedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaLiveEventEncoderConnectedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaLiveEventEncoderDisconnectedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaLiveEventEncoderDisconnectedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaLiveEventEncoderDisconnectedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaLiveEventEncoderDisconnectedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaLiveEventIncomingDataChunkDroppedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaLiveEventIncomingDataChunkDroppedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaLiveEventIncomingDataChunkDroppedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaLiveEventIncomingDataChunkDroppedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaLiveEventIncomingStreamReceivedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaLiveEventIncomingStreamReceivedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaLiveEventIncomingStreamReceivedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaLiveEventIncomingStreamReceivedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaLiveEventIncomingStreamsOutOfSyncEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaLiveEventIncomingStreamsOutOfSyncEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaLiveEventIncomingStreamsOutOfSyncEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaLiveEventIncomingStreamsOutOfSyncEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaLiveEventIncomingVideoStreamsOutOfSyncEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaLiveEventIncomingVideoStreamsOutOfSyncEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaLiveEventIncomingVideoStreamsOutOfSyncEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaLiveEventIncomingVideoStreamsOutOfSyncEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaLiveEventIngestHeartbeatEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaLiveEventIngestHeartbeatEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaLiveEventIngestHeartbeatEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaLiveEventIngestHeartbeatEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaLiveEventTrackDiscontinuityDetectedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaLiveEventTrackDiscontinuityDetectedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaLiveEventTrackDiscontinuityDetectedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaLiveEventTrackDiscontinuityDetectedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MediaServicesJobStateChangedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaServicesJobStateChangedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MediaServicesJobStateChangedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MediaServicesJobStateChangedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/MultipleEventsInSameBatch.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MultipleEventsInSameBatch.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/MultipleEventsInSameBatch.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/MultipleEventsInSameBatch.json diff --git a/eventgrid/data-plane/src/test/resources/customization/ResourceActionCancelEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ResourceActionCancelEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/ResourceActionCancelEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ResourceActionCancelEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/ResourceActionFailureEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ResourceActionFailureEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/ResourceActionFailureEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ResourceActionFailureEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/ResourceActionSuccessEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ResourceActionSuccessEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/ResourceActionSuccessEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ResourceActionSuccessEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/ResourceDeleteCancelEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ResourceDeleteCancelEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/ResourceDeleteCancelEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ResourceDeleteCancelEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/ResourceDeleteFailureEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ResourceDeleteFailureEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/ResourceDeleteFailureEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ResourceDeleteFailureEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/ResourceDeleteSuccessEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ResourceDeleteSuccessEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/ResourceDeleteSuccessEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ResourceDeleteSuccessEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/ResourceWriteCancelEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ResourceWriteCancelEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/ResourceWriteCancelEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ResourceWriteCancelEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/ResourceWriteFailureEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ResourceWriteFailureEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/ResourceWriteFailureEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ResourceWriteFailureEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/ResourceWriteSuccessEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ResourceWriteSuccessEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/ResourceWriteSuccessEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ResourceWriteSuccessEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/ServiceBusActiveMessagesAvailableWithNoListenersEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ServiceBusActiveMessagesAvailableWithNoListenersEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/ServiceBusActiveMessagesAvailableWithNoListenersEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ServiceBusActiveMessagesAvailableWithNoListenersEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/ServiceBusDeadletterMessagesAvailableWithNoListenersEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ServiceBusDeadletterMessagesAvailableWithNoListenersEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/ServiceBusDeadletterMessagesAvailableWithNoListenersEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/ServiceBusDeadletterMessagesAvailableWithNoListenersEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/StorageBlobCreatedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/StorageBlobCreatedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/StorageBlobCreatedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/StorageBlobCreatedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/StorageBlobDeletedEvent.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/StorageBlobDeletedEvent.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/StorageBlobDeletedEvent.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/StorageBlobDeletedEvent.json diff --git a/eventgrid/data-plane/src/test/resources/customization/StorageBlobDeletedEventWithExtraProperty.json b/sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/StorageBlobDeletedEventWithExtraProperty.json similarity index 100% rename from eventgrid/data-plane/src/test/resources/customization/StorageBlobDeletedEventWithExtraProperty.json rename to sdk/eventgrid/microsoft-azure-eventgrid/src/test/resources/customization/StorageBlobDeletedEventWithExtraProperty.json diff --git a/sdk/eventgrid/pom.service.xml b/sdk/eventgrid/pom.service.xml new file mode 100644 index 0000000000000..74589c55c2717 --- /dev/null +++ b/sdk/eventgrid/pom.service.xml @@ -0,0 +1,14 @@ + + + 4.0.0 + com.azure + azure-eventgrid-service + pom + 1.0.0 + + microsoft-azure-eventgrid + + diff --git a/eventhubs/client/CHANGELOG.md b/sdk/eventhubs/azure-eventhubs/CHANGELOG.md similarity index 100% rename from eventhubs/client/CHANGELOG.md rename to sdk/eventhubs/azure-eventhubs/CHANGELOG.md diff --git a/eventhubs/client/CONTRIBUTING.md b/sdk/eventhubs/azure-eventhubs/CONTRIBUTING.md similarity index 100% rename from eventhubs/client/CONTRIBUTING.md rename to sdk/eventhubs/azure-eventhubs/CONTRIBUTING.md diff --git a/eventhubs/client/README.md b/sdk/eventhubs/azure-eventhubs/README.md similarity index 96% rename from eventhubs/client/README.md rename to sdk/eventhubs/azure-eventhubs/README.md index 68cddc513e8f5..adc20b3f2cf68 100644 --- a/eventhubs/client/README.md +++ b/sdk/eventhubs/azure-eventhubs/README.md @@ -51,12 +51,12 @@ The easiest means for doing so is to use a connection string, which is created a Event Hubs namespace. If you aren't familiar with shared access policies in Azure, you may wish to follow the step-by-step guide to [get an Event Hubs connection string][event_hubs_connection_string]. -Once the connection string is obtained, create an `EventHubClient` using the `EventHubClientBuilder`: +Once the connection string is obtained, create an `EventHubAsyncClient` using the `EventHubClientBuilder`: ```java String connectionString = "<< CONNECTION STRING FOR THE EVENT HUBS NAMESPACE >>"; String eventHubPath = "<< NAME OF THE EVENT HUB >>"; -EventHubClient client = new EventHubClientBuilder() +EventHubAsyncClient client = new EventHubClientBuilder() .connectionString(connectionString, eventHubPath) .buildAsyncClient(); ``` @@ -91,7 +91,7 @@ ClientSecretCredential credential = new ClientSecretCredential() // {your-namespace}.servicebus.windows.net String host = "<< EVENT HUBS HOST >>" String eventHubPath = "<< NAME OF THE EVENT HUB >>"; -EventHubClient client = new EventHubClientBuilder() +EventHubAsyncClient client = new EventHubClientBuilder() .credential(host, eventHubPath, credential) .buildAsyncClient(); ``` @@ -143,7 +143,7 @@ you can also use the send method to send multiple events using a single call. #### Producer creation -With an existing [EventHubClient][eventhubclient], developers can create a producer by calling `createProducer()` or +With an existing [EventHubAsyncClient][eventhubasyncclient], developers can create a producer by calling `createProducer()` or `createProducer(EventHubProducerOptions)`. Creates a producer sends events to any partition, allowing Event Hubs service to route the event to an available @@ -192,7 +192,7 @@ We are creating a consumer that receives events from `partitionID` and only list the partition. ```java -EventHubConsumer consumer = client.createConsumer(EventHubClient.DEFAULT_CONSUMER_GROUP_NAME, partitionID, +EventHubConsumer consumer = client.createConsumer(EventHubAsyncClient.DEFAULT_CONSUMER_GROUP_NAME, partitionID, EventPosition.latest()); ``` @@ -305,7 +305,7 @@ Guidelines](./CONTRIBUTING.md) for more information. [event_hubs_messaging_exceptions]: https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-messaging-exceptions [event_hubs_product_docs]: https://docs.microsoft.com/en-us/azure/event-hubs/ [event_hubs_quotas]: https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-quotas -[eventhubclient]: ./azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubClient.java +[eventhubasyncclient]: ./azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubAsyncClient.java [eventhubconsumer]: ./azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubProducer.java [eventhubproduceroptions]: ./azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubProducerOptions.java [java_8_sdk_javadocs]: https://docs.oracle.com/javase/8/docs/api/java/util/logging/package-summary.html diff --git a/eventhubs/client/azure-eventhubs/pom.xml b/sdk/eventhubs/azure-eventhubs/pom.xml similarity index 100% rename from eventhubs/client/azure-eventhubs/pom.xml rename to sdk/eventhubs/azure-eventhubs/pom.xml diff --git a/eventhubs/client/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventData.java b/sdk/eventhubs/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventData.java similarity index 100% rename from eventhubs/client/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventData.java rename to sdk/eventhubs/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventData.java diff --git a/eventhubs/client/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventDataBatch.java b/sdk/eventhubs/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventDataBatch.java similarity index 100% rename from eventhubs/client/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventDataBatch.java rename to sdk/eventhubs/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventDataBatch.java diff --git a/eventhubs/client/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubClient.java b/sdk/eventhubs/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubAsyncClient.java similarity index 85% rename from eventhubs/client/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubClient.java rename to sdk/eventhubs/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubAsyncClient.java index a9cd13f889fd0..b8fbd025a1bc9 100644 --- a/eventhubs/client/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubClient.java +++ b/sdk/eventhubs/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubAsyncClient.java @@ -6,11 +6,15 @@ import com.azure.core.amqp.AmqpConnection; import com.azure.core.amqp.exception.AmqpException; import com.azure.core.amqp.exception.ErrorContext; +import com.azure.core.implementation.annotation.ReturnType; +import com.azure.core.implementation.annotation.ServiceClient; +import com.azure.core.implementation.annotation.ServiceMethod; import com.azure.core.implementation.util.ImplUtils; import com.azure.core.util.logging.ClientLogger; import com.azure.messaging.eventhubs.implementation.AmqpReceiveLink; import com.azure.messaging.eventhubs.implementation.AmqpResponseMapper; import com.azure.messaging.eventhubs.implementation.AmqpSendLink; +import com.azure.messaging.eventhubs.implementation.AmqpConstants; import com.azure.messaging.eventhubs.implementation.ConnectionOptions; import com.azure.messaging.eventhubs.implementation.EventHubConnection; import com.azure.messaging.eventhubs.implementation.EventHubManagementNode; @@ -20,6 +24,9 @@ import com.azure.messaging.eventhubs.implementation.ReactorHandlerProvider; import com.azure.messaging.eventhubs.implementation.ReactorProvider; import com.azure.messaging.eventhubs.implementation.StringUtil; +import com.azure.messaging.eventhubs.models.EventHubConsumerOptions; +import com.azure.messaging.eventhubs.models.EventHubProducerOptions; +import com.azure.messaging.eventhubs.models.EventPosition; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -31,23 +38,28 @@ import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; +import static com.azure.core.amqp.MessageConstant.ENQUEUED_TIME_UTC_ANNOTATION_NAME; +import static com.azure.core.amqp.MessageConstant.OFFSET_ANNOTATION_NAME; +import static com.azure.core.amqp.MessageConstant.SEQUENCE_NUMBER_ANNOTATION_NAME; + /** * The main point of interaction with Azure Event Hubs, the client offers a connection to a specific Event Hub within * the Event Hubs namespace and offers operations for sending event data, receiving events, and inspecting the connected * Event Hub. * - *

Creating an {@link EventHubClient} using Event Hubs namespace connection string

+ *

Creating an {@link EventHubAsyncClient} using Event Hubs namespace connection string

* * {@codesnippet com.azure.messaging.eventhubs.eventhubclientbuilder.connectionString#string-string} * - *

Creating an {@link EventHubClient} using Event Hub instance connection string

+ *

Creating an {@link EventHubAsyncClient} using Event Hub instance connection string

* * {@codesnippet com.azure.messaging.eventhubs.eventhubclientbuilder.connectionstring#string} * * @see EventHubClientBuilder * @see About Azure Event Hubs */ -public class EventHubClient implements Closeable { +@ServiceClient(builder = EventHubClientBuilder.class, isAsync = true) +public class EventHubAsyncClient implements Closeable { /** * The name of the default consumer group in the Event Hubs service. */ @@ -56,7 +68,7 @@ public class EventHubClient implements Closeable { private static final String RECEIVER_ENTITY_PATH_FORMAT = "%s/ConsumerGroups/%s/Partitions/%s"; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; - private final ClientLogger logger = new ClientLogger(EventHubClient.class); + private final ClientLogger logger = new ClientLogger(EventHubAsyncClient.class); private final String connectionId; private final Mono connectionMono; private final AtomicBoolean hasConnection = new AtomicBoolean(false); @@ -65,7 +77,7 @@ public class EventHubClient implements Closeable { private final EventHubProducerOptions defaultProducerOptions; private final EventHubConsumerOptions defaultConsumerOptions; - EventHubClient(ConnectionOptions connectionOptions, ReactorProvider provider, ReactorHandlerProvider handlerProvider) { + EventHubAsyncClient(ConnectionOptions connectionOptions, ReactorProvider provider, ReactorHandlerProvider handlerProvider) { Objects.requireNonNull(connectionOptions); Objects.requireNonNull(provider); Objects.requireNonNull(handlerProvider); @@ -91,6 +103,7 @@ public class EventHubClient implements Closeable { * * @return The set of information for the Event Hub that this client is associated with. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono getProperties() { return connectionMono.flatMap(connection -> connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties)); } @@ -100,6 +113,7 @@ public Mono getProperties() { * * @return A Flux of identifiers for the partitions of an Event Hub. */ + @ServiceMethod(returns = ReturnType.COLLECTION) public Flux getPartitionIds() { return getProperties().flatMapMany(properties -> Flux.fromArray(properties.partitionIds())); } @@ -111,6 +125,7 @@ public Flux getPartitionIds() { * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono getPartitionProperties(String partitionId) { return connectionMono.flatMap( connection -> connection.getManagementNode().flatMap(node -> { @@ -249,7 +264,7 @@ public EventHubConsumer createConsumer(String consumerGroup, String partitionId, return connection.createSession(entityPath).cast(EventHubSession.class); }).flatMap(session -> { logger.info("Creating consumer."); - return session.createConsumer(linkName, entityPath, eventPosition.getExpression(), connectionOptions.timeout(), + return session.createConsumer(linkName, entityPath, getExpression(eventPosition), connectionOptions.timeout(), clonedOptions.retry(), options.ownerLevel(), options.identifier()).cast(AmqpReceiveLink.class); }); @@ -275,6 +290,32 @@ public void close() { } } + private static String getExpression(EventPosition eventPosition) { + final String isInclusiveFlag = eventPosition.isInclusive() ? "=" : ""; + + // order of preference + if (eventPosition.offset() != null) { + return String.format(AmqpConstants.AMQP_ANNOTATION_FORMAT, OFFSET_ANNOTATION_NAME.getValue(), isInclusiveFlag, eventPosition.offset()); + } + + if (eventPosition.sequenceNumber() != null) { + return String.format(AmqpConstants.AMQP_ANNOTATION_FORMAT, SEQUENCE_NUMBER_ANNOTATION_NAME.getValue(), isInclusiveFlag, eventPosition.sequenceNumber()); + } + + if (eventPosition.enqueuedDateTime() != null) { + String ms; + try { + ms = Long.toString(eventPosition.enqueuedDateTime().toEpochMilli()); + } catch (ArithmeticException ex) { + ms = Long.toString(Long.MAX_VALUE); + } + + return String.format(AmqpConstants.AMQP_ANNOTATION_FORMAT, ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue(), isInclusiveFlag, ms); + } + + throw new IllegalArgumentException("No starting position was set."); + } + private static class ResponseMapper implements AmqpResponseMapper { @Override public EventHubProperties toEventHubProperties(Map amqpBody) { diff --git a/eventhubs/client/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubClientBuilder.java b/sdk/eventhubs/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubClientBuilder.java similarity index 90% rename from eventhubs/client/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubClientBuilder.java rename to sdk/eventhubs/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubClientBuilder.java index a336bd7774d43..a7152dd1e525a 100644 --- a/eventhubs/client/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubClientBuilder.java +++ b/sdk/eventhubs/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubClientBuilder.java @@ -7,6 +7,7 @@ import com.azure.core.amqp.TransportType; import com.azure.core.credentials.TokenCredential; import com.azure.core.exception.AzureException; +import com.azure.core.implementation.annotation.ServiceClientBuilder; import com.azure.core.implementation.util.ImplUtils; import com.azure.core.util.configuration.BaseConfigurations; import com.azure.core.util.configuration.Configuration; @@ -17,6 +18,8 @@ import com.azure.messaging.eventhubs.implementation.ConnectionStringProperties; import com.azure.messaging.eventhubs.implementation.ReactorHandlerProvider; import com.azure.messaging.eventhubs.implementation.ReactorProvider; +import com.azure.messaging.eventhubs.models.ProxyAuthenticationType; +import com.azure.messaging.eventhubs.models.ProxyConfiguration; import reactor.core.scheduler.Scheduler; import reactor.core.scheduler.Schedulers; @@ -30,29 +33,30 @@ /** * This class provides a fluent builder API to help aid the configuration and instantiation of the {@link - * EventHubClient}. Calling {@link #buildAsyncClient()} constructs an instant of the client. + * EventHubAsyncClient}. Calling {@link #buildAsyncClient()} constructs an instant of the client. * *

* The client requires credentials or a connection string to perform operations against Azure Event Hubs. Setting * credentials by using {@link #connectionString(String)}, {@link #connectionString(String, String)}, or {@link - * #credential(String, String, TokenCredential)}, is required in order to construct an {@link EventHubClient}. + * #credential(String, String, TokenCredential)}, is required in order to construct an {@link EventHubAsyncClient}. *

* - *

Creating an {@link EventHubClient} using Event Hubs namespace connection string

+ *

Creating an {@link EventHubAsyncClient} using Event Hubs namespace connection string

* * {@codesnippet com.azure.messaging.eventhubs.eventhubclientbuilder.connectionString#string-string} * - *

Creating an {@link EventHubClient} using Event Hub instance connection string

+ *

Creating an {@link EventHubAsyncClient} using Event Hub instance connection string

* * {@codesnippet com.azure.messaging.eventhubs.eventhubclientbuilder.connectionstring#string} * - *

Creating an {@link EventHubClient} using Event Hub with no {@link Retry}, different timeout and new + *

Creating an {@link EventHubAsyncClient} using Event Hub with no {@link Retry}, different timeout and new * Scheduler

* * {@codesnippet com.azure.messaging.eventhubs.eventhubclientbuilder.retry-timeout-scheduler} * - * @see EventHubClient + * @see EventHubAsyncClient */ +@ServiceClientBuilder(serviceClients = EventHubAsyncClient.class) public class EventHubClientBuilder { private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; @@ -151,10 +155,10 @@ public EventHubClientBuilder connectionString(String connectionString, String ev /** * Sets the configuration store that is used during construction of the service client. * - * If not specified, the default configuration store is used to configure the {@link EventHubClient}. Use {@link + * If not specified, the default configuration store is used to configure the {@link EventHubAsyncClient}. Use {@link * Configuration#NONE} to bypass using configuration settings during construction. * - * @param configuration The configuration store used to configure the {@link EventHubClient}. + * @param configuration The configuration store used to configure the {@link EventHubAsyncClient}. * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder configuration(Configuration configuration) { @@ -191,7 +195,7 @@ public EventHubClientBuilder credential(String host, String eventHubPath, TokenC } /** - * Sets the proxy configuration to use for {@link EventHubClient}. When a proxy is configured, {@link + * Sets the proxy configuration to use for {@link EventHubAsyncClient}. When a proxy is configured, {@link * TransportType#AMQP_WEB_SOCKETS} must be used for the transport type. * * @param proxyConfiguration The proxy configuration to use. @@ -228,7 +232,7 @@ public EventHubClientBuilder transportType(TransportType transport) { } /** - * Sets the default operation timeout for operations performed using {@link EventHubClient} and {@link + * Sets the default operation timeout for operations performed using {@link EventHubAsyncClient} and {@link * EventHubConsumer} such as starting the communication link with the service and sending messages. * * @param timeout Duration for operation timeout. @@ -240,7 +244,7 @@ public EventHubClientBuilder timeout(Duration timeout) { } /** - * Sets the retry policy for {@link EventHubClient}. If not specified, {@link Retry#getDefaultRetry()} is used. + * Sets the retry policy for {@link EventHubAsyncClient}. If not specified, {@link Retry#getDefaultRetry()} is used. * * @param retry The retry policy to use. * @return The updated {@link EventHubClientBuilder} object. @@ -251,8 +255,8 @@ public EventHubClientBuilder retry(Retry retry) { } /** - * Creates a new {@link EventHubClient} based on options set on this builder. Every time {@code buildAsyncClient()} - * is invoked, a new instance of {@link EventHubClient} is created. + * Creates a new {@link EventHubAsyncClient} based on options set on this builder. Every time {@code buildAsyncClient()} + * is invoked, a new instance of {@link EventHubAsyncClient} is created. * *

* The following options are used if ones are not specified in the builder: @@ -269,12 +273,12 @@ public EventHubClientBuilder retry(Retry retry) { *

  • If no scheduler is specified, an {@link Schedulers#elastic() elastic scheduler} is used.
  • * * - * @return A new {@link EventHubClient} instance with all the configured options. + * @return A new {@link EventHubAsyncClient} instance with all the configured options. * @throws IllegalArgumentException if the credentials have not been set using either {@link * #connectionString(String)} or {@link #credential(String, String, TokenCredential)}. Or, if a proxy is * specified but the transport type is not {@link TransportType#AMQP_WEB_SOCKETS web sockets}. */ - public EventHubClient buildAsyncClient() { + public EventHubAsyncClient buildAsyncClient() { configuration = configuration == null ? ConfigurationManager.getConfiguration().clone() : configuration; if (credentials == null) { @@ -320,7 +324,7 @@ public EventHubClient buildAsyncClient() { final ConnectionOptions parameters = new ConnectionOptions(host, eventHubPath, credentials, authorizationType, timeout, transport, retry, proxyConfiguration, scheduler); - return new EventHubClient(parameters, provider, handlerProvider); + return new EventHubAsyncClient(parameters, provider, handlerProvider); } private ProxyConfiguration getDefaultProxyConfiguration(Configuration configuration) { diff --git a/eventhubs/client/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubConsumer.java b/sdk/eventhubs/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubConsumer.java similarity index 91% rename from eventhubs/client/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubConsumer.java rename to sdk/eventhubs/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubConsumer.java index e66e509a26449..a76b11d722a6a 100644 --- a/eventhubs/client/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubConsumer.java +++ b/sdk/eventhubs/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubConsumer.java @@ -3,8 +3,11 @@ package com.azure.messaging.eventhubs; +import com.azure.core.implementation.annotation.Immutable; import com.azure.core.util.logging.ClientLogger; import com.azure.messaging.eventhubs.implementation.AmqpReceiveLink; +import com.azure.messaging.eventhubs.models.EventHubConsumerOptions; +import com.azure.messaging.eventhubs.models.EventPosition; import reactor.core.publisher.BaseSubscriber; import reactor.core.publisher.EmitterProcessor; import reactor.core.publisher.Flux; @@ -41,9 +44,10 @@ * * {@codesnippet com.azure.messaging.eventhubs.eventhubconsumer.receiveBackpressure} * - * @see EventHubClient#createConsumer(String, String, EventPosition) - * @see EventHubClient#createConsumer(String, String, EventPosition, EventHubConsumerOptions) + * @see EventHubAsyncClient#createConsumer(String, String, EventPosition) + * @see EventHubAsyncClient#createConsumer(String, String, EventPosition, EventHubConsumerOptions) */ +@Immutable public class EventHubConsumer implements Closeable { private static final AtomicReferenceFieldUpdater RECEIVE_LINK_FIELD_UPDATER = AtomicReferenceFieldUpdater.newUpdater(EventHubConsumer.class, AmqpReceiveLink.class, "receiveLink"); @@ -80,8 +84,7 @@ public class EventHubConsumer implements Closeable { } return link.receive().map(EventData::new); - }).timeout(operationTimeout) - .subscribeWith(emitterProcessor) + }).subscribeWith(emitterProcessor) .doOnSubscribe(subscription -> { AmqpReceiveLink existingLink = RECEIVE_LINK_FIELD_UPDATER.get(this); if (existingLink == null) { @@ -127,8 +130,8 @@ public void close() throws IOException { } /** - * Begin consuming events until there are no longer any subscribers, or the parent {@link EventHubClient#close() - * EventHubClient.close()} is called. + * Begin consuming events until there are no longer any subscribers, or the parent {@link EventHubAsyncClient#close() + * EventHubAsyncClient.close()} is called. * *

    Consuming events from Event Hub

    * diff --git a/eventhubs/client/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubProducer.java b/sdk/eventhubs/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubProducer.java similarity index 97% rename from eventhubs/client/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubProducer.java rename to sdk/eventhubs/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubProducer.java index 03989de29869b..e15cbebd17675 100644 --- a/eventhubs/client/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubProducer.java +++ b/sdk/eventhubs/azure-eventhubs/src/main/java/com/azure/messaging/eventhubs/EventHubProducer.java @@ -5,11 +5,14 @@ import com.azure.core.amqp.exception.AmqpException; import com.azure.core.amqp.exception.ErrorCondition; +import com.azure.core.implementation.annotation.Immutable; import com.azure.core.implementation.util.ImplUtils; import com.azure.core.util.logging.ClientLogger; import com.azure.messaging.eventhubs.implementation.AmqpSendLink; import com.azure.messaging.eventhubs.implementation.ErrorContextProvider; import com.azure.messaging.eventhubs.implementation.EventDataUtil; +import com.azure.messaging.eventhubs.models.EventHubProducerOptions; +import com.azure.messaging.eventhubs.models.SendOptions; import org.apache.qpid.proton.message.Message; import org.reactivestreams.Publisher; import reactor.core.publisher.Flux; @@ -74,8 +77,9 @@ * * {@codesnippet com.azure.messaging.eventhubs.eventhubproducer.send#publisher-sendOptions} * - * @see EventHubClient#createProducer() + * @see EventHubAsyncClient#createProducer() */ +@Immutable public class EventHubProducer implements Closeable { /** * The default maximum allowable size, in bytes, for a batch to be sent. @@ -276,11 +280,11 @@ private static class EventDataCollector implements Collector client.createConsumer(EventHubClient.DEFAULT_CONSUMER_GROUP_NAME, id, EventPosition.latest())); + .map(id -> client.createConsumer(EventHubAsyncClient.DEFAULT_CONSUMER_GROUP_NAME, id, EventPosition.latest())); final List consumerSubscriptions = consumers.map(consumer -> { return consumer.receive().subscribe(event -> { diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventDataBatchTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventDataBatchTest.java similarity index 100% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventDataBatchTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventDataBatchTest.java diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventDataTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventDataTest.java similarity index 100% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventDataTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventDataTest.java diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubClientBuilderTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubClientBuilderTest.java similarity index 93% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubClientBuilderTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubClientBuilderTest.java index 3070ed065883b..b8ab75b9237b2 100644 --- a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubClientBuilderTest.java +++ b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubClientBuilderTest.java @@ -5,6 +5,8 @@ import com.azure.core.amqp.TransportType; import com.azure.messaging.eventhubs.implementation.ClientConstants; +import com.azure.messaging.eventhubs.models.ProxyAuthenticationType; +import com.azure.messaging.eventhubs.models.ProxyConfiguration; import org.junit.Assert; import org.junit.Test; @@ -39,7 +41,7 @@ public void missingConnectionString() { @Test public void defaultProxyConfigurationBuilder() { final EventHubClientBuilder builder = new EventHubClientBuilder(); - final EventHubClient client = builder.connectionString(CORRECT_CONNECTION_STRING).buildAsyncClient(); + final EventHubAsyncClient client = builder.connectionString(CORRECT_CONNECTION_STRING).buildAsyncClient(); Assert.assertNotNull(client); } diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubClientIntegrationTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubClientIntegrationTest.java similarity index 89% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubClientIntegrationTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubClientIntegrationTest.java index 7d3deba988ecf..ba80afd99c381 100644 --- a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubClientIntegrationTest.java +++ b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubClientIntegrationTest.java @@ -6,6 +6,9 @@ import com.azure.core.util.logging.ClientLogger; import com.azure.messaging.eventhubs.implementation.ApiTestBase; import com.azure.messaging.eventhubs.implementation.ReactorHandlerProvider; +import com.azure.messaging.eventhubs.models.EventHubConsumerOptions; +import com.azure.messaging.eventhubs.models.EventHubProducerOptions; +import com.azure.messaging.eventhubs.models.EventPosition; import org.junit.Assert; import org.junit.Ignore; import org.junit.Rule; @@ -24,16 +27,16 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import static com.azure.messaging.eventhubs.EventHubClient.DEFAULT_CONSUMER_GROUP_NAME; +import static com.azure.messaging.eventhubs.EventHubAsyncClient.DEFAULT_CONSUMER_GROUP_NAME; import static java.nio.charset.StandardCharsets.UTF_8; /** - * Tests scenarios on {@link EventHubClient}. + * Tests scenarios on {@link EventHubAsyncClient}. */ public class EventHubClientIntegrationTest extends ApiTestBase { private static final String PARTITION_ID = "0"; - private EventHubClient client; + private EventHubAsyncClient client; @Rule public TestName testName = new TestName(); @@ -50,7 +53,7 @@ protected String testName() { @Override protected void beforeTest() { ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(getReactorProvider()); - client = new EventHubClient(getConnectionOptions(), getReactorProvider(), handlerProvider); + client = new EventHubAsyncClient(getConnectionOptions(), getReactorProvider(), handlerProvider); } @Override @@ -60,7 +63,7 @@ protected void afterTest() { @Test(expected = NullPointerException.class) public void nullConstructor() throws NullPointerException { - new EventHubClient(null, null, null); + new EventHubAsyncClient(null, null, null); } /** @@ -146,9 +149,9 @@ public void parallelEventHubClients() throws InterruptedException { }); final CountDownLatch countDownLatch = new CountDownLatch(numberOfClients); - final EventHubClient[] clients = new EventHubClient[numberOfClients]; + final EventHubAsyncClient[] clients = new EventHubAsyncClient[numberOfClients]; for (int i = 0; i < numberOfClients; i++) { - clients[i] = new EventHubClient(getConnectionOptions(), getReactorProvider(), new ReactorHandlerProvider(getReactorProvider())); + clients[i] = new EventHubAsyncClient(getConnectionOptions(), getReactorProvider(), new ReactorHandlerProvider(getReactorProvider())); } final EventHubProducer producer = clients[0].createProducer(new EventHubProducerOptions().partitionId(PARTITION_ID)); @@ -156,7 +159,7 @@ public void parallelEventHubClients() throws InterruptedException { final Disposable.Composite subscriptions = Disposables.composite(); try { - for (final EventHubClient hubClient : clients) { + for (final EventHubAsyncClient hubClient : clients) { final EventHubConsumer consumer = hubClient.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, PARTITION_ID, EventPosition.latest()); consumers.add(consumer); diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubClientMetadataIntegrationTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubClientMetadataIntegrationTest.java similarity index 94% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubClientMetadataIntegrationTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubClientMetadataIntegrationTest.java index 26a8499b9fbee..7758f5d9c0aae 100644 --- a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubClientMetadataIntegrationTest.java +++ b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubClientMetadataIntegrationTest.java @@ -14,6 +14,7 @@ import com.azure.messaging.eventhubs.implementation.ConnectionOptions; import com.azure.messaging.eventhubs.implementation.ConnectionStringProperties; import com.azure.messaging.eventhubs.implementation.ReactorHandlerProvider; +import com.azure.messaging.eventhubs.models.ProxyConfiguration; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; @@ -31,7 +32,7 @@ */ public class EventHubClientMetadataIntegrationTest extends ApiTestBase { private final String[] expectedPartitionIds = new String[]{"0", "1"}; - private EventHubClient client; + private EventHubAsyncClient client; private ReactorHandlerProvider handlerProvider; private String eventHubPath; @@ -53,7 +54,7 @@ protected void beforeTest() { eventHubPath = getConnectionOptions().eventHubPath(); handlerProvider = new ReactorHandlerProvider(getReactorProvider()); - client = new EventHubClient(getConnectionOptions(), getReactorProvider(), handlerProvider); + client = new EventHubAsyncClient(getConnectionOptions(), getReactorProvider(), handlerProvider); } @Override @@ -134,7 +135,7 @@ public void getPartitionPropertiesInvalidToken() throws InvalidKeyException, NoS final ConnectionOptions connectionOptions = new ConnectionOptions(original.endpoint().getHost(), original.eventHubPath(), badTokenProvider, getAuthorizationType(), TIMEOUT, TransportType.AMQP, Retry.getNoRetry(), ProxyConfiguration.SYSTEM_DEFAULTS, getConnectionOptions().scheduler()); - final EventHubClient client = new EventHubClient(connectionOptions, getReactorProvider(), handlerProvider); + final EventHubAsyncClient client = new EventHubAsyncClient(connectionOptions, getReactorProvider(), handlerProvider); // Act & Assert StepVerifier.create(client.getProperties()) @@ -159,7 +160,7 @@ public void getPartitionPropertiesNonExistentHub() { final ConnectionOptions connectionOptions = new ConnectionOptions(original.endpoint().getHost(), "invalid-event-hub", getTokenCredential(), getAuthorizationType(), TIMEOUT, TransportType.AMQP, Retry.getNoRetry(), ProxyConfiguration.SYSTEM_DEFAULTS, getConnectionOptions().scheduler()); - final EventHubClient client = new EventHubClient(connectionOptions, getReactorProvider(), handlerProvider); + final EventHubAsyncClient client = new EventHubAsyncClient(connectionOptions, getReactorProvider(), handlerProvider); // Act & Assert StepVerifier.create(client.getPartitionIds()) diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubConsumerIntegrationTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubConsumerIntegrationTest.java similarity index 92% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubConsumerIntegrationTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubConsumerIntegrationTest.java index dd512717b464d..46c57aaca1c6b 100644 --- a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubConsumerIntegrationTest.java +++ b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubConsumerIntegrationTest.java @@ -11,6 +11,10 @@ import com.azure.messaging.eventhubs.implementation.ConnectionOptions; import com.azure.messaging.eventhubs.implementation.ConnectionStringProperties; import com.azure.messaging.eventhubs.implementation.ReactorHandlerProvider; +import com.azure.messaging.eventhubs.models.EventHubConsumerOptions; +import com.azure.messaging.eventhubs.models.EventHubProducerOptions; +import com.azure.messaging.eventhubs.models.EventPosition; +import com.azure.messaging.eventhubs.models.ProxyConfiguration; import org.junit.Assert; import org.junit.Ignore; import org.junit.Rule; @@ -31,7 +35,7 @@ import java.util.function.Consumer; import static com.azure.core.amqp.exception.ErrorCondition.RESOURCE_LIMIT_EXCEEDED; -import static com.azure.messaging.eventhubs.EventHubClient.DEFAULT_CONSUMER_GROUP_NAME; +import static com.azure.messaging.eventhubs.EventHubAsyncClient.DEFAULT_CONSUMER_GROUP_NAME; import static java.nio.charset.StandardCharsets.UTF_8; /** @@ -46,7 +50,7 @@ public class EventHubConsumerIntegrationTest extends ApiTestBase { // The maximum number of receivers on a partition + consumer group is 5. private static final int MAX_NUMBER_OF_CONSUMERS = 5; - private EventHubClient client; + private EventHubAsyncClient client; public EventHubConsumerIntegrationTest() { super(new ClientLogger(EventHubConsumerIntegrationTest.class)); @@ -68,7 +72,7 @@ protected void beforeTest() { properties.eventHubPath(), getTokenCredential(), getAuthorizationType(), TIMEOUT, TransportType.AMQP, Retry.getNoRetry(), ProxyConfiguration.SYSTEM_DEFAULTS, Schedulers.newSingle("single-threaded")); - client = new EventHubClient(connectionOptions, getReactorProvider(), handlerProvider); + client = new EventHubAsyncClient(connectionOptions, getReactorProvider(), handlerProvider); } @Override @@ -77,7 +81,7 @@ protected void afterTest() { } /** - * Tests that the same EventHubClient can create multiple EventHubConsumers listening to different partitions. + * Tests that the same EventHubAsyncClient can create multiple EventHubConsumers listening to different partitions. */ @Test public void parallelCreationOfReceivers() { diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubConsumerTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubConsumerTest.java similarity index 99% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubConsumerTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubConsumerTest.java index 8b98ced4b4fb1..3271864c39cea 100644 --- a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubConsumerTest.java +++ b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubConsumerTest.java @@ -7,6 +7,7 @@ import com.azure.core.amqp.Retry; import com.azure.core.util.logging.ClientLogger; import com.azure.messaging.eventhubs.implementation.AmqpReceiveLink; +import com.azure.messaging.eventhubs.models.EventHubConsumerOptions; import org.apache.qpid.proton.message.Message; import org.junit.After; import org.junit.Assert; diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubProducerTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubProducerTest.java similarity index 95% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubProducerTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubProducerTest.java index 58bfeb2f82961..56e44f777736f 100644 --- a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubProducerTest.java +++ b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubProducerTest.java @@ -7,6 +7,8 @@ import com.azure.core.amqp.exception.AmqpException; import com.azure.core.amqp.exception.ErrorCondition; import com.azure.messaging.eventhubs.implementation.AmqpSendLink; +import com.azure.messaging.eventhubs.models.EventHubProducerOptions; +import com.azure.messaging.eventhubs.models.SendOptions; import org.apache.qpid.proton.amqp.messaging.Section; import org.apache.qpid.proton.message.Message; import org.junit.After; @@ -74,7 +76,7 @@ public void sendMultipleMessages() { when(sendLink.send(anyList())).thenReturn(Mono.empty()); final int maxMessageSize = 16 * 1024; - final SendOptions options = new SendOptions().maximumSizeInBytes(maxMessageSize); + final SendOptions options = new SendOptions(); final EventHubProducerOptions producerOptions = new EventHubProducerOptions().retry(Retry.getNoRetry()).timeout(Duration.ofSeconds(30)); final EventHubProducer producer = new EventHubProducer(Mono.just(sendLink), producerOptions); @@ -101,8 +103,7 @@ public void sendSingleMessage() { when(sendLink.send(any(Message.class))).thenReturn(Mono.empty()); - final int maxMessageSize = 16 * 1024; - final SendOptions options = new SendOptions().maximumSizeInBytes(maxMessageSize); + final SendOptions options = new SendOptions(); final EventHubProducerOptions producerOptions = new EventHubProducerOptions().retry(Retry.getNoRetry()).timeout(Duration.ofSeconds(30)); final EventHubProducer producer = new EventHubProducer(Mono.just(sendLink), producerOptions); @@ -154,14 +155,13 @@ public void partitionProducerCannotSendWithPartitionKey() { */ @Test public void sendTooManyMessages() { - final Flux testData = Flux.range(0, 20).flatMap(number -> { + final Flux testData = Flux.range(0, 500).flatMap(number -> { final EventData data = new EventData(CONTENTS.getBytes(UTF_8)); return Flux.just(data); }); final AmqpSendLink sendLink = mock(AmqpSendLink.class); - final int maxMessageSize = 16 * 1024; - final SendOptions options = new SendOptions().maximumSizeInBytes(maxMessageSize); + final SendOptions options = new SendOptions(); final EventHubProducerOptions producerOptions = new EventHubProducerOptions().retry(Retry.getNoRetry()).timeout(Duration.ofSeconds(30)); final EventHubProducer producer = new EventHubProducer(Mono.just(sendLink), producerOptions); diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubPropertiesTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubPropertiesTest.java similarity index 100% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubPropertiesTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubPropertiesTest.java diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubSharedAccessKeyCredentialTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubSharedAccessKeyCredentialTest.java similarity index 100% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubSharedAccessKeyCredentialTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubSharedAccessKeyCredentialTest.java diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventPositionIntegrationTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventPositionIntegrationTest.java similarity index 96% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventPositionIntegrationTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventPositionIntegrationTest.java index d9337acf5435b..30921adf1ba5a 100644 --- a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventPositionIntegrationTest.java +++ b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventPositionIntegrationTest.java @@ -6,6 +6,8 @@ import com.azure.core.util.logging.ClientLogger; import com.azure.messaging.eventhubs.implementation.ApiTestBase; import com.azure.messaging.eventhubs.implementation.ReactorHandlerProvider; +import com.azure.messaging.eventhubs.models.EventHubProducerOptions; +import com.azure.messaging.eventhubs.models.EventPosition; import org.junit.Assert; import org.junit.Ignore; import org.junit.Rule; @@ -25,7 +27,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -import static com.azure.messaging.eventhubs.EventHubClient.DEFAULT_CONSUMER_GROUP_NAME; +import static com.azure.messaging.eventhubs.EventHubAsyncClient.DEFAULT_CONSUMER_GROUP_NAME; +import static com.azure.messaging.eventhubs.TestUtils.MESSAGE_TRACKING_ID; import static com.azure.messaging.eventhubs.TestUtils.isMatchingEvent; import static java.nio.charset.StandardCharsets.UTF_8; @@ -41,11 +44,10 @@ public class EventPositionIntegrationTest extends ApiTestBase { private static final AtomicBoolean HAS_PUSHED_EVENTS = new AtomicBoolean(); private static final AtomicReference EVENTS_PUSHED = new AtomicReference<>(); private static final String MESSAGE_POSITION_ID = "message-position"; - private static final String MESSAGE_TRACKING_ID = "message-tracking-id"; private static final String MESSAGE_TRACKING_VALUE = UUID.randomUUID().toString(); private static final AtomicReference MESSAGES_PUSHED_INSTANT = new AtomicReference<>(); - private EventHubClient client; + private EventHubAsyncClient client; public EventPositionIntegrationTest() { super(new ClientLogger(EventPositionIntegrationTest.class)); @@ -64,7 +66,7 @@ protected void beforeTest() { skipIfNotRecordMode(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(getReactorProvider()); - client = new EventHubClient(getConnectionOptions(), getReactorProvider(), handlerProvider); + client = new EventHubAsyncClient(getConnectionOptions(), getReactorProvider(), handlerProvider); setupEventTestData(client); } @@ -330,7 +332,7 @@ public void receiveMessageFromSequenceNumberNonInclusive() { * When we run this test, we check if there have been events already pushed to the partition, if not, we push some * events there. */ - private void setupEventTestData(EventHubClient client) { + private void setupEventTestData(EventHubAsyncClient client) { if (HAS_PUSHED_EVENTS.getAndSet(true)) { logger.info("Already pushed events to partition. Skipping."); return; diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/InteropAmqpPropertiesTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/InteropAmqpPropertiesTest.java similarity index 95% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/InteropAmqpPropertiesTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/InteropAmqpPropertiesTest.java index d8d47b64accaf..c3aefcb3c4ebf 100644 --- a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/InteropAmqpPropertiesTest.java +++ b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/InteropAmqpPropertiesTest.java @@ -8,6 +8,8 @@ import com.azure.core.util.logging.ClientLogger; import com.azure.messaging.eventhubs.implementation.ApiTestBase; import com.azure.messaging.eventhubs.implementation.ReactorHandlerProvider; +import com.azure.messaging.eventhubs.models.EventHubProducerOptions; +import com.azure.messaging.eventhubs.models.EventPosition; import org.apache.qpid.proton.Proton; import org.apache.qpid.proton.amqp.Binary; import org.apache.qpid.proton.amqp.Symbol; @@ -36,7 +38,7 @@ public class InteropAmqpPropertiesTest extends ApiTestBase { private static final String PARTITION_ID = "0"; private static final String PAYLOAD = "test-message"; - private EventHubClient client; + private EventHubAsyncClient client; private EventHubProducer producer; private EventHubConsumer consumer; @@ -55,11 +57,11 @@ protected String testName() { @Override protected void beforeTest() { final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(getReactorProvider()); - client = new EventHubClient(getConnectionOptions(), getReactorProvider(), handlerProvider); + client = new EventHubAsyncClient(getConnectionOptions(), getReactorProvider(), handlerProvider); final EventHubProducerOptions producerOptions = new EventHubProducerOptions().partitionId(PARTITION_ID).retry(Retry.getNoRetry()).timeout(Duration.ofSeconds(30)); producer = client.createProducer(producerOptions); - consumer = client.createConsumer(EventHubClient.DEFAULT_CONSUMER_GROUP_NAME, PARTITION_ID, EventPosition.latest()); + consumer = client.createConsumer(EventHubAsyncClient.DEFAULT_CONSUMER_GROUP_NAME, PARTITION_ID, EventPosition.latest()); } @Override diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/PartitionPropertiesTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/PartitionPropertiesTest.java similarity index 100% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/PartitionPropertiesTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/PartitionPropertiesTest.java diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/SendOptionsTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/SendOptionsTest.java similarity index 68% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/SendOptionsTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/SendOptionsTest.java index 141ee2e961fd6..3905cfb8fcc24 100644 --- a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/SendOptionsTest.java +++ b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/SendOptionsTest.java @@ -3,6 +3,7 @@ package com.azure.messaging.eventhubs; +import com.azure.messaging.eventhubs.models.SendOptions; import org.junit.Assert; import org.junit.Test; @@ -15,7 +16,7 @@ public class SendOptionsTest { public void createDefault() { SendOptions options = new SendOptions(); - Assert.assertEquals(EventHubProducer.MAX_MESSAGE_LENGTH_BYTES, options.maximumSizeInBytes()); + // Assert.assertEquals(EventHubProducer.MAX_MESSAGE_LENGTH_BYTES, options.maximumSizeInBytes()); Assert.assertNull(options.partitionKey()); } @@ -27,9 +28,9 @@ public void setMaximumSize() { int size = 1024; SendOptions options = new SendOptions(); - options.maximumSizeInBytes(size); + // options.maximumSizeInBytes(size); - Assert.assertEquals(size, options.maximumSizeInBytes()); + // Assert.assertEquals(size, options.maximumSizeInBytes()); Assert.assertNull(options.partitionKey()); } @@ -43,7 +44,7 @@ public void setPartitionKey() { options.partitionKey(partitionKey); - Assert.assertEquals(EventHubProducer.MAX_MESSAGE_LENGTH_BYTES, options.maximumSizeInBytes()); + // Assert.assertEquals(EventHubProducer.MAX_MESSAGE_LENGTH_BYTES, options.maximumSizeInBytes()); Assert.assertEquals(partitionKey, options.partitionKey()); } @@ -55,18 +56,19 @@ public void cloneIdentical() { // Arrange String partitionKey = "My partition key"; int size = 800; - SendOptions options = new SendOptions().partitionKey(partitionKey).maximumSizeInBytes(size); + // SendOptions options = new SendOptions().partitionKey(partitionKey).maximumSizeInBytes(size); + SendOptions options = new SendOptions().partitionKey(partitionKey); // Act SendOptions clone = options.clone(); // Assert Assert.assertNotSame(clone, options); - Assert.assertEquals(size, options.maximumSizeInBytes()); + // Assert.assertEquals(size, options.maximumSizeInBytes()); Assert.assertEquals(partitionKey, options.partitionKey()); Assert.assertEquals(partitionKey, clone.partitionKey()); - Assert.assertEquals(size, clone.maximumSizeInBytes()); + // Assert.assertEquals(size, clone.maximumSizeInBytes()); } @@ -82,18 +84,19 @@ public void cloneModifyContents() { String partitionKey = "A new partition key"; int size = 24; - SendOptions options = new SendOptions().partitionKey(originalPartitionKey).maximumSizeInBytes(originalSize); + // SendOptions options = new SendOptions().partitionKey(originalPartitionKey).maximumSizeInBytes(originalSize); + SendOptions options = new SendOptions().partitionKey(originalPartitionKey); SendOptions clone = options.clone(); // Act - clone.partitionKey(partitionKey) - .maximumSizeInBytes(size); + // clone.partitionKey(partitionKey).maximumSizeInBytes(size); + clone.partitionKey(partitionKey); // Assert Assert.assertEquals(partitionKey, clone.partitionKey()); - Assert.assertEquals(size, clone.maximumSizeInBytes()); + // Assert.assertEquals(size, clone.maximumSizeInBytes()); - Assert.assertEquals(originalSize, options.maximumSizeInBytes()); + // Assert.assertEquals(originalSize, options.maximumSizeInBytes()); Assert.assertEquals(originalPartitionKey, options.partitionKey()); } } diff --git a/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/SetPrefetchCountTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/SetPrefetchCountTest.java new file mode 100644 index 0000000000000..be7a5dd4dd529 --- /dev/null +++ b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/SetPrefetchCountTest.java @@ -0,0 +1,159 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.messaging.eventhubs; + +import com.azure.core.amqp.Retry; +import com.azure.core.util.logging.ClientLogger; +import com.azure.messaging.eventhubs.implementation.ApiTestBase; +import com.azure.messaging.eventhubs.implementation.ReactorHandlerProvider; +import com.azure.messaging.eventhubs.models.EventHubConsumerOptions; +import com.azure.messaging.eventhubs.models.EventHubProducerOptions; +import com.azure.messaging.eventhubs.models.EventPosition; +import org.junit.Assert; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import reactor.core.Disposable; +import reactor.core.publisher.Flux; + +import java.time.Instant; +import java.util.UUID; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static com.azure.messaging.eventhubs.TestUtils.isMatchingEvent; + +/** + * Verifies we can use various prefetch options with {@link EventHubConsumer}. + */ +public class SetPrefetchCountTest extends ApiTestBase { + private static final String PARTITION_ID = "1"; + // Default number of events to fetch when creating the consumer. + private static final int DEFAULT_PREFETCH_COUNT = 500; + + // Set a large number of events to send to the service. + private static final int NUMBER_OF_EVENTS = DEFAULT_PREFETCH_COUNT * 3; + + // We use these values to keep track of the events we've pushed to the service and ensure the events we receive are + // our own. + private static final AtomicBoolean HAS_PUSHED_EVENTS = new AtomicBoolean(); + private static final String MESSAGE_TRACKING_VALUE = UUID.randomUUID().toString(); + private static final AtomicReference MESSAGES_PUSHED_INSTANT = new AtomicReference<>(); + + private EventHubAsyncClient client; + private EventHubConsumer consumer; + + @Rule + public TestName testName = new TestName(); + + public SetPrefetchCountTest() { + super(new ClientLogger(SetPrefetchCountTest.class)); + } + + @Override + protected String testName() { + return testName.getMethodName(); + } + + @Override + protected void beforeTest() { + skipIfNotRecordMode(); + + final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(getReactorProvider()); + client = new EventHubAsyncClient(getConnectionOptions(), getReactorProvider(), handlerProvider); + + setupEventTestData(client); + } + + @Override + protected void afterTest() { + dispose(consumer, client); + } + + /** + * Test that we can use a very large prefetch number with {@link EventHubConsumerOptions} + */ + @Test + public void setLargePrefetchCount() throws InterruptedException { + // Arrange + // Since we cannot test receiving very large prefetch like 10000 in a unit test, DefaultPrefetchCount * 3 was + // chosen + final int eventCount = NUMBER_OF_EVENTS; + final CountDownLatch countDownLatch = new CountDownLatch(eventCount); + final EventHubConsumerOptions options = new EventHubConsumerOptions() + .retry(Retry.getDefaultRetry()) + .prefetchCount(2000); + + consumer = client.createConsumer(EventHubAsyncClient.DEFAULT_CONSUMER_GROUP_NAME, PARTITION_ID, + EventPosition.fromEnqueuedTime(MESSAGES_PUSHED_INSTANT.get()), options); + + final Disposable subscription = consumer.receive() + .filter(x -> isMatchingEvent(x, MESSAGE_TRACKING_VALUE)) + .take(eventCount).subscribe(event -> countDownLatch.countDown()); + + // Act + try { + countDownLatch.await(1, TimeUnit.MINUTES); + + // Assert + Assert.assertEquals(0, countDownLatch.getCount()); + } finally { + subscription.dispose(); + } + } + + /** + * Test for small prefetch count on EventHubConsumer continues to get messages. + */ + @Test + public void setSmallPrefetchCount() throws InterruptedException { + // Arrange + final int eventCount = 30; + final CountDownLatch countDownLatch = new CountDownLatch(eventCount); + final EventHubConsumerOptions options = new EventHubConsumerOptions().prefetchCount(11); + + consumer = client.createConsumer(EventHubAsyncClient.DEFAULT_CONSUMER_GROUP_NAME, PARTITION_ID, + EventPosition.fromEnqueuedTime(MESSAGES_PUSHED_INSTANT.get()), options); + + final Disposable subscription = consumer.receive().filter(x -> isMatchingEvent(x, MESSAGE_TRACKING_VALUE)) + .take(eventCount).subscribe(event -> countDownLatch.countDown()); + + try { + // Act + countDownLatch.await(45, TimeUnit.SECONDS); + + // Assert + Assert.assertEquals(0, countDownLatch.getCount()); + } finally { + subscription.dispose(); + } + } + + /** + * When we run this test, we check if there have been events already pushed to the partition, if not, we push some + * events there. + */ + private void setupEventTestData(EventHubAsyncClient client) { + if (HAS_PUSHED_EVENTS.getAndSet(true)) { + logger.info("Already pushed events to partition. Skipping."); + return; + } + + logger.info("Pushing events to partition. Message tracking value: {}", MESSAGE_TRACKING_VALUE); + + final EventHubProducerOptions producerOptions = new EventHubProducerOptions() + .partitionId(PARTITION_ID); + final EventHubProducer producer = client.createProducer(producerOptions); + final Flux events = TestUtils.getEvents(NUMBER_OF_EVENTS, MESSAGE_TRACKING_VALUE); + + try { + MESSAGES_PUSHED_INSTANT.set(Instant.now()); + producer.send(events).block(TIMEOUT); + } finally { + dispose(producer); + } + } +} diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/TestUtils.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/TestUtils.java similarity index 100% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/TestUtils.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/TestUtils.java diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ActiveClientTokenManagerTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ActiveClientTokenManagerTest.java similarity index 100% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ActiveClientTokenManagerTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ActiveClientTokenManagerTest.java diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ApiTestBase.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ApiTestBase.java similarity index 98% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ApiTestBase.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ApiTestBase.java index 544c105838d2e..c6dfeec5ff7e3 100644 --- a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ApiTestBase.java +++ b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ApiTestBase.java @@ -11,7 +11,7 @@ import com.azure.core.test.TestMode; import com.azure.core.util.logging.ClientLogger; import com.azure.messaging.eventhubs.EventHubSharedAccessKeyCredential; -import com.azure.messaging.eventhubs.ProxyConfiguration; +import com.azure.messaging.eventhubs.models.ProxyConfiguration; import org.apache.qpid.proton.reactor.Reactor; import org.apache.qpid.proton.reactor.Selectable; import org.junit.After; diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/CBSChannelTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/CBSChannelTest.java similarity index 100% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/CBSChannelTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/CBSChannelTest.java diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ConnectionStringPropertiesTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ConnectionStringPropertiesTest.java similarity index 100% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ConnectionStringPropertiesTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ConnectionStringPropertiesTest.java diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/EndpointStateNotifierBaseTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/EndpointStateNotifierBaseTest.java similarity index 100% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/EndpointStateNotifierBaseTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/EndpointStateNotifierBaseTest.java diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/MockReactorHandlerProvider.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/MockReactorHandlerProvider.java similarity index 100% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/MockReactorHandlerProvider.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/MockReactorHandlerProvider.java diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/MockReactorProvider.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/MockReactorProvider.java similarity index 100% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/MockReactorProvider.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/MockReactorProvider.java diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ReactorConnectionIntegrationTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ReactorConnectionIntegrationTest.java similarity index 100% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ReactorConnectionIntegrationTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ReactorConnectionIntegrationTest.java diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ReactorConnectionTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ReactorConnectionTest.java similarity index 99% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ReactorConnectionTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ReactorConnectionTest.java index 449fe21e82b76..e631a76f3fb96 100644 --- a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ReactorConnectionTest.java +++ b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ReactorConnectionTest.java @@ -8,7 +8,7 @@ import com.azure.core.amqp.Retry; import com.azure.core.amqp.TransportType; import com.azure.core.credentials.TokenCredential; -import com.azure.messaging.eventhubs.ProxyConfiguration; +import com.azure.messaging.eventhubs.models.ProxyConfiguration; import com.azure.messaging.eventhubs.implementation.handler.ConnectionHandler; import com.azure.messaging.eventhubs.implementation.handler.SessionHandler; import org.apache.qpid.proton.amqp.Symbol; diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ReactorReceiverTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ReactorReceiverTest.java similarity index 100% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ReactorReceiverTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ReactorReceiverTest.java diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ReactorSessionTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ReactorSessionTest.java similarity index 100% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ReactorSessionTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/ReactorSessionTest.java diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/TokenResourceProviderTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/TokenResourceProviderTest.java similarity index 100% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/TokenResourceProviderTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/TokenResourceProviderTest.java diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/handler/ConnectionHandlerTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/handler/ConnectionHandlerTest.java similarity index 100% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/handler/ConnectionHandlerTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/handler/ConnectionHandlerTest.java diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/handler/DispatchHandlerTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/handler/DispatchHandlerTest.java similarity index 100% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/handler/DispatchHandlerTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/handler/DispatchHandlerTest.java diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/handler/HandlerTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/handler/HandlerTest.java similarity index 100% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/handler/HandlerTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/implementation/handler/HandlerTest.java diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubConsumerOptionsTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/models/EventHubConsumerOptionsTest.java similarity index 98% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubConsumerOptionsTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/models/EventHubConsumerOptionsTest.java index a7ee3620c1e50..c9b240487f2f9 100644 --- a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubConsumerOptionsTest.java +++ b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/models/EventHubConsumerOptionsTest.java @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -package com.azure.messaging.eventhubs; +package com.azure.messaging.eventhubs.models; import org.junit.Assert; import org.junit.Test; diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubProducerOptionsTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/models/EventHubProducerOptionsTest.java similarity index 96% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubProducerOptionsTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/models/EventHubProducerOptionsTest.java index 12db5b6cc92da..eba44d5b987f0 100644 --- a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/EventHubProducerOptionsTest.java +++ b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/models/EventHubProducerOptionsTest.java @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -package com.azure.messaging.eventhubs; +package com.azure.messaging.eventhubs.models; import com.azure.core.amqp.ExponentialRetry; import com.azure.core.amqp.Retry; diff --git a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/ProxyConfigurationTest.java b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/models/ProxyConfigurationTest.java similarity index 97% rename from eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/ProxyConfigurationTest.java rename to sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/models/ProxyConfigurationTest.java index 5ab9fc2ca0029..f34722f9127b7 100644 --- a/eventhubs/client/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/ProxyConfigurationTest.java +++ b/sdk/eventhubs/azure-eventhubs/src/test/java/com/azure/messaging/eventhubs/models/ProxyConfigurationTest.java @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -package com.azure.messaging.eventhubs; +package com.azure.messaging.eventhubs.models; import org.junit.Assert; import org.junit.Test; @@ -14,7 +14,7 @@ import java.net.InetSocketAddress; import java.net.Proxy; -import static com.azure.messaging.eventhubs.ProxyConfiguration.SYSTEM_DEFAULTS; +import static com.azure.messaging.eventhubs.models.ProxyConfiguration.SYSTEM_DEFAULTS; @RunWith(Theories.class) public class ProxyConfigurationTest { diff --git a/sdk/eventhubs/ci.yml b/sdk/eventhubs/ci.yml new file mode 100644 index 0000000000000..545d53726cace --- /dev/null +++ b/sdk/eventhubs/ci.yml @@ -0,0 +1,23 @@ +# DO NOT EDIT THIS FILE +# This file is generated automatically and any changes will be lost. + +trigger: + branches: + include: + - master + paths: + include: + - sdk/eventhubs/ + +pr: + branches: + include: + - master + paths: + include: + - sdk/eventhubs/ + +jobs: + - template: ../../eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: eventhubs \ No newline at end of file diff --git a/eventhubs/data-plane/azure-eventhubs-eph/.gitignore b/sdk/eventhubs/microsoft-azure-eventhubs-eph/.gitignore similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/.gitignore rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/.gitignore diff --git a/eventhubs/data-plane/azure-eventhubs-eph/Overview.md b/sdk/eventhubs/microsoft-azure-eventhubs-eph/Overview.md similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/Overview.md rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/Overview.md diff --git a/eventhubs/data-plane/azure-eventhubs-eph/Readme.md b/sdk/eventhubs/microsoft-azure-eventhubs-eph/Readme.md similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/Readme.md rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/Readme.md diff --git a/eventhubs/data-plane/azure-eventhubs-eph/pom.xml b/sdk/eventhubs/microsoft-azure-eventhubs-eph/pom.xml similarity index 97% rename from eventhubs/data-plane/azure-eventhubs-eph/pom.xml rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/pom.xml index 0a3bd414a75e3..fad46c57eae75 100644 --- a/eventhubs/data-plane/azure-eventhubs-eph/pom.xml +++ b/sdk/eventhubs/microsoft-azure-eventhubs-eph/pom.xml @@ -8,7 +8,7 @@ com.microsoft.azure azure-eventhubs-clients 2.3.1 - ../pom.xml + ../pom.data.xml 4.0.0 diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/AzureBlobLease.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/AzureBlobLease.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/AzureBlobLease.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/AzureBlobLease.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/AzureStorageCheckpointLeaseManager.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/AzureStorageCheckpointLeaseManager.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/AzureStorageCheckpointLeaseManager.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/AzureStorageCheckpointLeaseManager.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/AzureStoragePartitionManagerOptions.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/AzureStoragePartitionManagerOptions.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/AzureStoragePartitionManagerOptions.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/AzureStoragePartitionManagerOptions.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/BaseLease.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/BaseLease.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/BaseLease.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/BaseLease.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/Checkpoint.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/Checkpoint.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/Checkpoint.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/Checkpoint.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/Closable.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/Closable.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/Closable.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/Closable.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/CloseReason.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/CloseReason.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/CloseReason.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/CloseReason.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/CompleteLease.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/CompleteLease.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/CompleteLease.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/CompleteLease.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/DefaultEventProcessorFactory.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/DefaultEventProcessorFactory.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/DefaultEventProcessorFactory.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/DefaultEventProcessorFactory.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/EventProcessorHost.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/EventProcessorHost.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/EventProcessorHost.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/EventProcessorHost.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/EventProcessorHostActionStrings.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/EventProcessorHostActionStrings.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/EventProcessorHostActionStrings.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/EventProcessorHostActionStrings.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/EventProcessorOptions.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/EventProcessorOptions.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/EventProcessorOptions.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/EventProcessorOptions.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ExceptionReceivedEventArgs.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ExceptionReceivedEventArgs.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ExceptionReceivedEventArgs.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ExceptionReceivedEventArgs.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ExceptionWithAction.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ExceptionWithAction.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ExceptionWithAction.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ExceptionWithAction.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/HostContext.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/HostContext.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/HostContext.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/HostContext.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ICheckpointManager.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ICheckpointManager.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ICheckpointManager.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ICheckpointManager.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/IEventProcessor.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/IEventProcessor.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/IEventProcessor.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/IEventProcessor.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/IEventProcessorFactory.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/IEventProcessorFactory.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/IEventProcessorFactory.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/IEventProcessorFactory.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ILeaseManager.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ILeaseManager.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ILeaseManager.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ILeaseManager.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/InMemoryCheckpointManager.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/InMemoryCheckpointManager.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/InMemoryCheckpointManager.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/InMemoryCheckpointManager.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/InMemoryLeaseManager.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/InMemoryLeaseManager.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/InMemoryLeaseManager.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/InMemoryLeaseManager.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/LeaseLostException.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/LeaseLostException.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/LeaseLostException.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/LeaseLostException.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/LoggingUtils.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/LoggingUtils.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/LoggingUtils.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/LoggingUtils.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionContext.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionContext.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionContext.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionContext.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionManager.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionManager.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionManager.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionManager.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionManagerOptions.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionManagerOptions.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionManagerOptions.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionManagerOptions.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionPump.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionPump.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionPump.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionPump.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionScanner.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionScanner.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionScanner.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionScanner.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PumpManager.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PumpManager.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PumpManager.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PumpManager.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/CheckpointManagerTest.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/CheckpointManagerTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/CheckpointManagerTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/CheckpointManagerTest.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/DummyPump.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/DummyPump.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/DummyPump.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/DummyPump.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/EPHConstructorTests.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/EPHConstructorTests.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/EPHConstructorTests.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/EPHConstructorTests.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/LeaseManagerTest.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/LeaseManagerTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/LeaseManagerTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/LeaseManagerTest.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PartitionManagerTest.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PartitionManagerTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PartitionManagerTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PartitionManagerTest.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PerTestSettings.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PerTestSettings.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PerTestSettings.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PerTestSettings.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PrefabEventProcessor.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PrefabEventProcessor.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PrefabEventProcessor.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PrefabEventProcessor.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PrefabGeneralErrorHandler.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PrefabGeneralErrorHandler.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PrefabGeneralErrorHandler.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PrefabGeneralErrorHandler.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PrefabProcessorFactory.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PrefabProcessorFactory.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PrefabProcessorFactory.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PrefabProcessorFactory.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/RealEventHubUtilities.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/RealEventHubUtilities.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/RealEventHubUtilities.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/RealEventHubUtilities.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/Repros.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/Repros.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/Repros.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/Repros.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/SadPathTests.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/SadPathTests.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/SadPathTests.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/SadPathTests.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/SmokeTest.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/SmokeTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/SmokeTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/SmokeTest.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/TestBase.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/TestBase.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/TestBase.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/TestBase.java diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/TestUtilities.java b/sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/TestUtilities.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/TestUtilities.java rename to sdk/eventhubs/microsoft-azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/TestUtilities.java diff --git a/eventhubs/data-plane/azure-eventhubs-extensions/pom.xml b/sdk/eventhubs/microsoft-azure-eventhubs-extensions/pom.xml similarity index 97% rename from eventhubs/data-plane/azure-eventhubs-extensions/pom.xml rename to sdk/eventhubs/microsoft-azure-eventhubs-extensions/pom.xml index a2a85d70883b5..5faee62b351f4 100644 --- a/eventhubs/data-plane/azure-eventhubs-extensions/pom.xml +++ b/sdk/eventhubs/microsoft-azure-eventhubs-extensions/pom.xml @@ -8,7 +8,7 @@ com.microsoft.azure azure-eventhubs-clients 2.3.1 - ../pom.xml + ../pom.data.xml 4.0.0 diff --git a/eventhubs/data-plane/azure-eventhubs-extensions/src/main/java/com/microsoft/azure/eventhubs/extensions/appender/EventHubsAppender.java b/sdk/eventhubs/microsoft-azure-eventhubs-extensions/src/main/java/com/microsoft/azure/eventhubs/extensions/appender/EventHubsAppender.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-extensions/src/main/java/com/microsoft/azure/eventhubs/extensions/appender/EventHubsAppender.java rename to sdk/eventhubs/microsoft-azure-eventhubs-extensions/src/main/java/com/microsoft/azure/eventhubs/extensions/appender/EventHubsAppender.java diff --git a/eventhubs/data-plane/azure-eventhubs-extensions/src/main/java/com/microsoft/azure/eventhubs/extensions/appender/EventHubsManager.java b/sdk/eventhubs/microsoft-azure-eventhubs-extensions/src/main/java/com/microsoft/azure/eventhubs/extensions/appender/EventHubsManager.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs-extensions/src/main/java/com/microsoft/azure/eventhubs/extensions/appender/EventHubsManager.java rename to sdk/eventhubs/microsoft-azure-eventhubs-extensions/src/main/java/com/microsoft/azure/eventhubs/extensions/appender/EventHubsManager.java diff --git a/eventhubs/data-plane/azure-eventhubs/pom.xml b/sdk/eventhubs/microsoft-azure-eventhubs/pom.xml similarity index 95% rename from eventhubs/data-plane/azure-eventhubs/pom.xml rename to sdk/eventhubs/microsoft-azure-eventhubs/pom.xml index 6f567e713080e..dc90cc557ca52 100644 --- a/eventhubs/data-plane/azure-eventhubs/pom.xml +++ b/sdk/eventhubs/microsoft-azure-eventhubs/pom.xml @@ -8,7 +8,7 @@ com.microsoft.azure azure-eventhubs-clients 2.3.1 - ../pom.xml + ../pom.data.xml 4.0.0 diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/AuthorizationFailedException.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/AuthorizationFailedException.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/AuthorizationFailedException.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/AuthorizationFailedException.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/BatchOptions.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/BatchOptions.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/BatchOptions.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/BatchOptions.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/CommunicationException.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/CommunicationException.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/CommunicationException.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/CommunicationException.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ConnectionStringBuilder.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ConnectionStringBuilder.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ConnectionStringBuilder.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ConnectionStringBuilder.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ErrorContext.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ErrorContext.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ErrorContext.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ErrorContext.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventData.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventData.java old mode 100755 new mode 100644 similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventData.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventData.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventDataBatch.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventDataBatch.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventDataBatch.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventDataBatch.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventHubClient.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventHubClient.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventHubClient.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventHubClient.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventHubException.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventHubException.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventHubException.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventHubException.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventHubRuntimeInformation.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventHubRuntimeInformation.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventHubRuntimeInformation.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventHubRuntimeInformation.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventPosition.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventPosition.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventPosition.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventPosition.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/IllegalConnectionStringFormatException.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/IllegalConnectionStringFormatException.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/IllegalConnectionStringFormatException.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/IllegalConnectionStringFormatException.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/IllegalEntityException.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/IllegalEntityException.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/IllegalEntityException.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/IllegalEntityException.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/OperationCancelledException.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/OperationCancelledException.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/OperationCancelledException.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/OperationCancelledException.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionReceiveHandler.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionReceiveHandler.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionReceiveHandler.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionReceiveHandler.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionReceiver.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionReceiver.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionReceiver.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionReceiver.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionRuntimeInformation.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionRuntimeInformation.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionRuntimeInformation.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionRuntimeInformation.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionSender.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionSender.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionSender.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionSender.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PayloadSizeExceededException.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PayloadSizeExceededException.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PayloadSizeExceededException.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PayloadSizeExceededException.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/QuotaExceededException.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/QuotaExceededException.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/QuotaExceededException.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/QuotaExceededException.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ReceiverDisconnectedException.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ReceiverDisconnectedException.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ReceiverDisconnectedException.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ReceiverDisconnectedException.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ReceiverOptions.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ReceiverOptions.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ReceiverOptions.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ReceiverOptions.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ReceiverRuntimeInformation.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ReceiverRuntimeInformation.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ReceiverRuntimeInformation.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ReceiverRuntimeInformation.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/RetryPolicy.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/RetryPolicy.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/RetryPolicy.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/RetryPolicy.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ServerBusyException.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ServerBusyException.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ServerBusyException.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ServerBusyException.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/TimeoutException.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/TimeoutException.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/TimeoutException.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/TimeoutException.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/TransportType.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/TransportType.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/TransportType.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/TransportType.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ActiveClientTokenManager.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ActiveClientTokenManager.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ActiveClientTokenManager.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ActiveClientTokenManager.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpConnection.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpConnection.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpConnection.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpConnection.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpConstants.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpConstants.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpConstants.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpConstants.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpErrorCode.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpErrorCode.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpErrorCode.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpErrorCode.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpException.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpException.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpException.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpException.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpLink.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpLink.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpLink.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpLink.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpReceiver.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpReceiver.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpReceiver.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpReceiver.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpResponseCode.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpResponseCode.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpResponseCode.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpResponseCode.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpSender.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpSender.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpSender.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpSender.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpUtil.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpUtil.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpUtil.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpUtil.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/BaseLinkHandler.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/BaseLinkHandler.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/BaseLinkHandler.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/BaseLinkHandler.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/CBSChannel.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/CBSChannel.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/CBSChannel.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/CBSChannel.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ClientConstants.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ClientConstants.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ClientConstants.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ClientConstants.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ClientEntity.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ClientEntity.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ClientEntity.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ClientEntity.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ConnectionHandler.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ConnectionHandler.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ConnectionHandler.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ConnectionHandler.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/CustomIOHandler.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/CustomIOHandler.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/CustomIOHandler.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/CustomIOHandler.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/DispatchHandler.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/DispatchHandler.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/DispatchHandler.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/DispatchHandler.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ErrorContextProvider.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ErrorContextProvider.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ErrorContextProvider.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ErrorContextProvider.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventDataBatchImpl.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventDataBatchImpl.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventDataBatchImpl.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventDataBatchImpl.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventDataImpl.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventDataImpl.java old mode 100755 new mode 100644 similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventDataImpl.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventDataImpl.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventDataUtil.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventDataUtil.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventDataUtil.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventDataUtil.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventHubClientImpl.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventHubClientImpl.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventHubClientImpl.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventHubClientImpl.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventPositionImpl.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventPositionImpl.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventPositionImpl.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventPositionImpl.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ExceptionUtil.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ExceptionUtil.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ExceptionUtil.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ExceptionUtil.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/FaultTolerantObject.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/FaultTolerantObject.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/FaultTolerantObject.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/FaultTolerantObject.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/IOObject.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/IOObject.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/IOObject.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/IOObject.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/IteratorUtil.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/IteratorUtil.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/IteratorUtil.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/IteratorUtil.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ManagementChannel.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ManagementChannel.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ManagementChannel.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ManagementChannel.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageOperationResult.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageOperationResult.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageOperationResult.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageOperationResult.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageReceiver.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageReceiver.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageReceiver.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageReceiver.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageSender.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageSender.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageSender.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageSender.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageWrapper.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageWrapper.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageWrapper.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageWrapper.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessagingFactory.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessagingFactory.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessagingFactory.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessagingFactory.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/Operation.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/Operation.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/Operation.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/Operation.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/OperationResult.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/OperationResult.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/OperationResult.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/OperationResult.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/OperationResultBase.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/OperationResultBase.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/OperationResultBase.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/OperationResultBase.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/PartitionReceiverImpl.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/PartitionReceiverImpl.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/PartitionReceiverImpl.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/PartitionReceiverImpl.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/PartitionSenderImpl.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/PartitionSenderImpl.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/PartitionSenderImpl.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/PartitionSenderImpl.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/PassByRef.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/PassByRef.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/PassByRef.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/PassByRef.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ProtonUtil.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ProtonUtil.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ProtonUtil.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ProtonUtil.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReactorDispatcher.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReactorDispatcher.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReactorDispatcher.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReactorDispatcher.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReactorHandler.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReactorHandler.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReactorHandler.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReactorHandler.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceiveLinkHandler.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceiveLinkHandler.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceiveLinkHandler.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceiveLinkHandler.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceivePump.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceivePump.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceivePump.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceivePump.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceiverContext.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceiverContext.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceiverContext.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceiverContext.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceiverSettingsProvider.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceiverSettingsProvider.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceiverSettingsProvider.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceiverSettingsProvider.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReplayableWorkItem.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReplayableWorkItem.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReplayableWorkItem.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReplayableWorkItem.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RequestResponseChannel.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RequestResponseChannel.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RequestResponseChannel.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RequestResponseChannel.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RequestResponseCloser.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RequestResponseCloser.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RequestResponseCloser.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RequestResponseCloser.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RequestResponseOpener.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RequestResponseOpener.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RequestResponseOpener.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RequestResponseOpener.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RetryExponential.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RetryExponential.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RetryExponential.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RetryExponential.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SchedulerProvider.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SchedulerProvider.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SchedulerProvider.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SchedulerProvider.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SendLinkHandler.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SendLinkHandler.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SendLinkHandler.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SendLinkHandler.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SenderContext.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SenderContext.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SenderContext.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SenderContext.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SessionHandler.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SessionHandler.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SessionHandler.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SessionHandler.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SessionProvider.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SessionProvider.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SessionProvider.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SessionProvider.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SharedAccessSignatureTokenProvider.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SharedAccessSignatureTokenProvider.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SharedAccessSignatureTokenProvider.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SharedAccessSignatureTokenProvider.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/StringUtil.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/StringUtil.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/StringUtil.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/StringUtil.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/TimeoutTracker.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/TimeoutTracker.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/TimeoutTracker.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/TimeoutTracker.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/Timer.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/Timer.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/Timer.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/Timer.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/TrackingUtil.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/TrackingUtil.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/TrackingUtil.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/TrackingUtil.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/WebSocketConnectionHandler.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/WebSocketConnectionHandler.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/WebSocketConnectionHandler.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/WebSocketConnectionHandler.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/WebSocketProxyConnectionHandler.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/WebSocketProxyConnectionHandler.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/WebSocketProxyConnectionHandler.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/WebSocketProxyConnectionHandler.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/WorkItem.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/WorkItem.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/WorkItem.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/WorkItem.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/concurrency/ConcurrentReceiversTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/concurrency/ConcurrentReceiversTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/concurrency/ConcurrentReceiversTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/concurrency/ConcurrentReceiversTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/concurrency/EventHubClientTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/concurrency/EventHubClientTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/concurrency/EventHubClientTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/concurrency/EventHubClientTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/connstrbuilder/ConnStrBuilderTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/connstrbuilder/ConnStrBuilderTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/connstrbuilder/ConnStrBuilderTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/connstrbuilder/ConnStrBuilderTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/connstrbuilder/TransportTypeTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/connstrbuilder/TransportTypeTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/connstrbuilder/TransportTypeTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/connstrbuilder/TransportTypeTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/BackCompatTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/BackCompatTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/BackCompatTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/BackCompatTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/EventDataBatchTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/EventDataBatchTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/EventDataBatchTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/EventDataBatchTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/EventDataTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/EventDataTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/EventDataTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/EventDataTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/InteropAmqpPropertiesTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/InteropAmqpPropertiesTest.java old mode 100755 new mode 100644 similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/InteropAmqpPropertiesTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/InteropAmqpPropertiesTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/InteropEventBodyTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/InteropEventBodyTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/InteropEventBodyTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/InteropEventBodyTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/ClientEntityCreateTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/ClientEntityCreateTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/ClientEntityCreateTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/ClientEntityCreateTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/MsgFactoryOpenCloseTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/MsgFactoryOpenCloseTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/MsgFactoryOpenCloseTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/MsgFactoryOpenCloseTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/ReactorFaultTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/ReactorFaultTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/ReactorFaultTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/ReactorFaultTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/ReceiverEpochTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/ReceiverEpochTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/ReceiverEpochTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/ReceiverEpochTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/RetryPolicyTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/RetryPolicyTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/RetryPolicyTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/RetryPolicyTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/SecurityExceptionsTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/SecurityExceptionsTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/SecurityExceptionsTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/SecurityExceptionsTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/SendLargeMessageTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/SendLargeMessageTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/SendLargeMessageTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/SendLargeMessageTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/WebSocketsSendLargeMessageTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/WebSocketsSendLargeMessageTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/WebSocketsSendLargeMessageTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/WebSocketsSendLargeMessageTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/impl/EventDataOrderTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/impl/EventDataOrderTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/impl/EventDataOrderTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/impl/EventDataOrderTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/jproxy/ProxyNegotiationHandler.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/jproxy/ProxyNegotiationHandler.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/jproxy/ProxyNegotiationHandler.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/jproxy/ProxyNegotiationHandler.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/jproxy/ProxyServer.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/jproxy/ProxyServer.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/jproxy/ProxyServer.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/jproxy/ProxyServer.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/jproxy/SimpleProxy.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/jproxy/SimpleProxy.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/jproxy/SimpleProxy.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/jproxy/SimpleProxy.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/ApiTestBase.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/ApiTestBase.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/ApiTestBase.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/ApiTestBase.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/FaultInjectingReactorFactory.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/FaultInjectingReactorFactory.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/FaultInjectingReactorFactory.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/FaultInjectingReactorFactory.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/SasTokenTestBase.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/SasTokenTestBase.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/SasTokenTestBase.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/SasTokenTestBase.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/TestBase.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/TestBase.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/TestBase.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/TestBase.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/TestContext.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/TestContext.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/TestContext.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/TestContext.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/mock/MockServer.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/mock/MockServer.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/mock/MockServer.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/mock/MockServer.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/mock/Sender1MsgOnLinkFlowHandler.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/mock/Sender1MsgOnLinkFlowHandler.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/mock/Sender1MsgOnLinkFlowHandler.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/mock/Sender1MsgOnLinkFlowHandler.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/mock/ServerTraceHandler.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/mock/ServerTraceHandler.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/mock/ServerTraceHandler.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/mock/ServerTraceHandler.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxyReceiveTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxyReceiveTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxyReceiveTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxyReceiveTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxySelectorTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxySelectorTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxySelectorTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxySelectorTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxySendLargeMessageTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxySendLargeMessageTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxySendLargeMessageTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxySendLargeMessageTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxySendTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxySendTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxySendTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxySendTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/EventDataBatchAPITest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/EventDataBatchAPITest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/EventDataBatchAPITest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/EventDataBatchAPITest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiveParallelManualTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiveParallelManualTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiveParallelManualTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiveParallelManualTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceivePumpEventHubTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceivePumpEventHubTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceivePumpEventHubTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceivePumpEventHubTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceivePumpTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceivePumpTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceivePumpTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceivePumpTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiveTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiveTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiveTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiveTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiverIdentifierTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiverIdentifierTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiverIdentifierTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiverIdentifierTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiverRuntimeMetricsTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiverRuntimeMetricsTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiverRuntimeMetricsTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiverRuntimeMetricsTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/RequestResponseTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/RequestResponseTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/RequestResponseTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/RequestResponseTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SasTokenReceiveTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SasTokenReceiveTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SasTokenReceiveTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SasTokenReceiveTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SasTokenSendTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SasTokenSendTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SasTokenSendTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SasTokenSendTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SendTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SendTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SendTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SendTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SetPrefetchCountTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SetPrefetchCountTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SetPrefetchCountTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SetPrefetchCountTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/WebSocketsReceiveTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/WebSocketsReceiveTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/WebSocketsReceiveTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/WebSocketsReceiveTest.java diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/WebSocketsSendTest.java b/sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/WebSocketsSendTest.java similarity index 100% rename from eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/WebSocketsSendTest.java rename to sdk/eventhubs/microsoft-azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/WebSocketsSendTest.java diff --git a/eventhubs/data-plane/pom.xml b/sdk/eventhubs/pom.data.xml similarity index 100% rename from eventhubs/data-plane/pom.xml rename to sdk/eventhubs/pom.data.xml diff --git a/sdk/eventhubs/pom.service.xml b/sdk/eventhubs/pom.service.xml new file mode 100644 index 0000000000000..23edea1a17d6e --- /dev/null +++ b/sdk/eventhubs/pom.service.xml @@ -0,0 +1,18 @@ + + + 4.0.0 + com.azure + azure-eventhubs-service + pom + 1.0.0 + + microsoft-azure-eventhubs + microsoft-azure-eventhubs-eph + microsoft-azure-eventhubs-extensions + ../core + azure-eventhubs + + diff --git a/sdk/eventhubs/tests.yml b/sdk/eventhubs/tests.yml new file mode 100644 index 0000000000000..e6aa03d5f6a63 --- /dev/null +++ b/sdk/eventhubs/tests.yml @@ -0,0 +1,10 @@ +trigger: none + +jobs: + - template: ../../eng/pipelines/templates/jobs/archetype-sdk-tests.yml + parameters: + ServiceDirectory: eventhubs + EnvVars: + AZURE_EVENTHUBS_CONNECTION_STRING: $(java-event-hubs-test-connection-string) + AZURE_TEST_MODE: RECORD + AZURE_LOG_LEVEL: 4 diff --git a/sdk/keyvault/azure-keyvault-keys/README.md b/sdk/keyvault/azure-keyvault-keys/README.md index 33419a0b7dd58..e0899f1f273f7 100644 --- a/sdk/keyvault/azure-keyvault-keys/README.md +++ b/sdk/keyvault/azure-keyvault-keys/README.md @@ -303,7 +303,7 @@ This project has adopted the [Microsoft Open Source Code of Conduct](https://ope [source_code]: https://github.com/Azure/azure-sdk-for-java/tree/master/sdk/keyvault/azure-keyvault-keys/src -[api_documentation]: not-valid-link +[api_documentation]: https://azure.github.io/azure-sdk-for-java/track2reports/index.html [azkeyvault_docs]: https://docs.microsoft.com/en-us/azure/key-vault/ [azure_identity]: https://github.com/Azure/azure-sdk-for-java/tree/master/identity/client [maven]: https://maven.apache.org/ diff --git a/sdk/keyvault/azure-keyvault-keys/src/main/java/com/azure/security/keyvault/keys/KeyAsyncClient.java b/sdk/keyvault/azure-keyvault-keys/src/main/java/com/azure/security/keyvault/keys/KeyAsyncClient.java index da44d45f04a26..b8b4ad5c4816c 100644 --- a/sdk/keyvault/azure-keyvault-keys/src/main/java/com/azure/security/keyvault/keys/KeyAsyncClient.java +++ b/sdk/keyvault/azure-keyvault-keys/src/main/java/com/azure/security/keyvault/keys/KeyAsyncClient.java @@ -7,17 +7,16 @@ import com.azure.core.exception.ResourceModifiedException; import com.azure.core.exception.ResourceNotFoundException; import com.azure.core.http.HttpPipeline; -import com.azure.core.http.rest.PagedResponse; import com.azure.core.http.rest.Response; -import com.azure.core.http.rest.SimpleResponse; import com.azure.core.http.rest.VoidResponse; +import com.azure.core.http.rest.SimpleResponse; +import com.azure.core.http.rest.PagedResponse; +import com.azure.core.http.rest.PagedFlux; import com.azure.core.implementation.RestProxy; +import com.azure.core.implementation.annotation.ReturnType; import com.azure.core.implementation.annotation.ServiceClient; -import com.azure.core.implementation.util.ImplUtils; -import com.azure.core.util.Context; +import com.azure.core.implementation.annotation.ServiceMethod; import com.azure.core.util.logging.ClientLogger; -import com.azure.security.keyvault.keys.implementation.DeletedKeyPage; -import com.azure.security.keyvault.keys.implementation.KeyBasePage; import com.azure.security.keyvault.keys.models.DeletedKey; import com.azure.security.keyvault.keys.models.EcKeyCreateOptions; import com.azure.security.keyvault.keys.models.Key; @@ -34,10 +33,11 @@ import java.util.Objects; import java.util.function.Function; -import org.reactivestreams.Publisher; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; +import static com.azure.core.implementation.util.FluxUtil.monoContext; + /** * The KeyAsyncClient provides asynchronous methods to manage {@link Key keys} in the Azure Key Vault. The client * supports creating, retrieving, updating, deleting, purging, backing up, restoring and listing the {@link Key keys}. The client @@ -48,6 +48,7 @@ * {@codesnippet com.azure.security.keyvault.keys.async.keyclient.instantiation} * * @see KeyClientBuilder + * @see PagedFlux */ @ServiceClient(builder = KeyClientBuilder.class, isAsync = true, serviceInterfaces = KeyService.class) public final class KeyAsyncClient { @@ -94,12 +95,13 @@ public final class KeyAsyncClient { * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the {@link Key created key}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> createKey(String name, KeyType keyType) { KeyRequestParameters parameters = new KeyRequestParameters().kty(keyType); - return service.createKey(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.createKey(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Creating key - {}", name)) .doOnSuccess(response -> logger.info("Created key - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to create key - {}", name, error)); + .doOnError(error -> logger.warning("Failed to create key - {}", name, error))); } /** @@ -130,16 +132,17 @@ public Mono> createKey(String name, KeyType keyType) { * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the {@link Key created key}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> createKey(KeyCreateOptions keyCreateOptions) { Objects.requireNonNull(keyCreateOptions, "The key options parameter cannot be null."); KeyRequestParameters parameters = new KeyRequestParameters() .kty(keyCreateOptions.keyType()) .keyOps(keyCreateOptions.keyOperations()) .keyAttributes(new KeyRequestAttributes(keyCreateOptions)); - return service.createKey(endpoint, keyCreateOptions.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.createKey(endpoint, keyCreateOptions.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Creating key - {}", keyCreateOptions.name())) .doOnSuccess(response -> logger.info("Created key - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to create key - {}", keyCreateOptions.name(), error)); + .doOnError(error -> logger.warning("Failed to create key - {}", keyCreateOptions.name(), error))); } /** @@ -172,6 +175,7 @@ public Mono> createKey(KeyCreateOptions keyCreateOptions) { * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the {@link Key created key}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> createRsaKey(RsaKeyCreateOptions rsaKeyCreateOptions) { Objects.requireNonNull(rsaKeyCreateOptions, "The Rsa key options parameter cannot be null."); KeyRequestParameters parameters = new KeyRequestParameters() @@ -179,10 +183,10 @@ public Mono> createRsaKey(RsaKeyCreateOptions rsaKeyCreateOptions) .keySize(rsaKeyCreateOptions.keySize()) .keyOps(rsaKeyCreateOptions.keyOperations()) .keyAttributes(new KeyRequestAttributes(rsaKeyCreateOptions)); - return service.createKey(endpoint, rsaKeyCreateOptions.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.createKey(endpoint, rsaKeyCreateOptions.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Creating Rsa key - {}", rsaKeyCreateOptions.name())) .doOnSuccess(response -> logger.info("Created Rsa key - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to create Rsa key - {}", rsaKeyCreateOptions.name(), error)); + .doOnError(error -> logger.warning("Failed to create Rsa key - {}", rsaKeyCreateOptions.name(), error))); } /** @@ -215,6 +219,7 @@ public Mono> createRsaKey(RsaKeyCreateOptions rsaKeyCreateOptions) * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the {@link Key created key}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> createEcKey(EcKeyCreateOptions ecKeyCreateOptions) { Objects.requireNonNull(ecKeyCreateOptions, "The Ec key options options cannot be null."); KeyRequestParameters parameters = new KeyRequestParameters() @@ -222,10 +227,10 @@ public Mono> createEcKey(EcKeyCreateOptions ecKeyCreateOptions) { .curve(ecKeyCreateOptions.curve()) .keyOps(ecKeyCreateOptions.keyOperations()) .keyAttributes(new KeyRequestAttributes(ecKeyCreateOptions)); - return service.createKey(endpoint, ecKeyCreateOptions.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.createKey(endpoint, ecKeyCreateOptions.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Creating Ec key - {}", ecKeyCreateOptions.name())) .doOnSuccess(response -> logger.info("Created Ec key - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to create Ec key - {}", ecKeyCreateOptions.name(), error)); + .doOnError(error -> logger.warning("Failed to create Ec key - {}", ecKeyCreateOptions.name(), error))); } /** @@ -245,12 +250,13 @@ public Mono> createEcKey(EcKeyCreateOptions ecKeyCreateOptions) { * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the {@link Key imported key}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> importKey(String name, JsonWebKey keyMaterial) { KeyImportRequestParameters parameters = new KeyImportRequestParameters().key(keyMaterial); - return service.importKey(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.importKey(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Importing key - {}", name)) .doOnSuccess(response -> logger.info("Imported key - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to import key - {}", name, error)); + .doOnError(error -> logger.warning("Failed to import key - {}", name, error))); } /** @@ -279,16 +285,17 @@ public Mono> importKey(String name, JsonWebKey keyMaterial) { * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the {@link Key imported key}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> importKey(KeyImportOptions keyImportOptions) { Objects.requireNonNull(keyImportOptions, "The key import configuration parameter cannot be null."); KeyImportRequestParameters parameters = new KeyImportRequestParameters() .key(keyImportOptions.keyMaterial()) .hsm(keyImportOptions.hsm()) .keyAttributes(new KeyRequestAttributes(keyImportOptions)); - return service.importKey(endpoint, keyImportOptions.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.importKey(endpoint, keyImportOptions.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Importing key - {}", keyImportOptions.name())) .doOnSuccess(response -> logger.info("Imported key - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to import key - {}", keyImportOptions.name(), error)); + .doOnError(error -> logger.warning("Failed to import key - {}", keyImportOptions.name(), error))); } /** @@ -310,15 +317,15 @@ public Mono> importKey(KeyImportOptions keyImportOptions) { * @throws HttpRequestException if {@code name} or {@code version} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the requested {@link Key key}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> getKey(String name, String version) { - String keyVersion = ""; - if (version != null) { - keyVersion = version; + if (version == null) { + return getKey(name); } - return service.getKey(endpoint, name, keyVersion, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.getKey(endpoint, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Retrieving key - {}", name)) .doOnSuccess(response -> logger.info("Retrieved key - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to get key - {}", name, error)); + .doOnError(error -> logger.warning("Failed to get key - {}", name, error))); } /** @@ -339,11 +346,12 @@ public Mono> getKey(String name, String version) { * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the requested {@link Key key}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> getKey(String name) { - return getKey(name, "") + return monoContext(context -> getKey(name, "") .doOnRequest(ignored -> logger.info("Retrieving key - {}", name)) .doOnSuccess(response -> logger.info("Retrieved key - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to get key - {}", name, error)); + .doOnError(error -> logger.warning("Failed to get key - {}", name, error))); } @@ -365,16 +373,14 @@ public Mono> getKey(String name) { * @throws HttpRequestException if {@link KeyBase#name()} name} or {@link KeyBase#version() version} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the requested {@link Key key}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> getKey(KeyBase keyBase) { Objects.requireNonNull(keyBase, "The Key Base parameter cannot be null."); - String keyVersion = ""; - if (keyBase.version() != null) { - keyVersion = keyBase.version(); + if (keyBase.version() == null) { + return getKey(keyBase.name()); } - return service.getKey(endpoint, keyBase.name(), keyVersion, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) - .doOnRequest(ignored -> logger.info("Retrieving key - {}", keyBase.name())) - .doOnSuccess(response -> logger.info("Retrieved key - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to get key - {}", keyBase.name(), error)); + + return getKey(keyBase.name(), keyBase.version()); } /** @@ -401,16 +407,17 @@ public Mono> getKey(KeyBase keyBase) { * @throws HttpRequestException if {@link KeyBase#name() name} or {@link KeyBase#version() version} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the {@link KeyBase updated key}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> updateKey(KeyBase key) { Objects.requireNonNull(key, "The key input parameter cannot be null."); KeyRequestParameters parameters = new KeyRequestParameters() .tags(key.tags()) .keyAttributes(new KeyRequestAttributes(key)); - return service.updateKey(endpoint, key.name(), key.version(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.updateKey(endpoint, key.name(), key.version(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Updating key - {}", key.name())) .doOnSuccess(response -> logger.info("Updated key - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to update key - {}", key.name(), error)); + .doOnError(error -> logger.warning("Failed to update key - {}", key.name(), error))); } /** @@ -438,6 +445,7 @@ public Mono> updateKey(KeyBase key) { * @throws HttpRequestException if {@link KeyBase#name() name} or {@link KeyBase#version() version} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the {@link KeyBase updated key}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> updateKey(KeyBase key, KeyOperation... keyOperations) { Objects.requireNonNull(key, "The key input parameter cannot be null."); KeyRequestParameters parameters = new KeyRequestParameters() @@ -445,10 +453,10 @@ public Mono> updateKey(KeyBase key, KeyOperation... keyOperations) .keyOps(Arrays.asList(keyOperations)) .keyAttributes(new KeyRequestAttributes(key)); - return service.updateKey(endpoint, key.name(), key.version(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.updateKey(endpoint, key.name(), key.version(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Updating key - {}", key.name())) .doOnSuccess(response -> logger.info("Updated key - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to update key - {}", key.name(), error)); + .doOnError(error -> logger.warning("Failed to update key - {}", key.name(), error))); } /** @@ -471,11 +479,12 @@ public Mono> updateKey(KeyBase key, KeyOperation... keyOperations) * @throws HttpRequestException when a key with {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the {@link DeletedKey deleted key}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> deleteKey(String name) { - return service.deleteKey(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.deleteKey(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Deleting key - {}", name)) .doOnSuccess(response -> logger.info("Deleted key - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to delete key - {}", name, error)); + .doOnError(error -> logger.warning("Failed to delete key - {}", name, error))); } /** @@ -496,11 +505,12 @@ public Mono> deleteKey(String name) { * @throws HttpRequestException when a key with {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the {@link DeletedKey deleted key}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> getDeletedKey(String name) { - return service.getDeletedKey(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.getDeletedKey(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Retrieving deleted key - {}", name)) .doOnSuccess(response -> logger.info("Retrieved deleted key - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to get key - {}", name, error)); + .doOnError(error -> logger.warning("Failed to get key - {}", name, error))); } /** @@ -521,11 +531,12 @@ public Mono> getDeletedKey(String name) { * @throws HttpRequestException when a key with {@code name} is empty string. * @return A {@link Mono} containing a {@link VoidResponse}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono purgeDeletedKey(String name) { - return service.purgeDeletedKey(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.purgeDeletedKey(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Purging deleted key - {}", name)) .doOnSuccess(response -> logger.info("Purged deleted key - {}", name)) - .doOnError(error -> logger.warning("Failed to purge deleted key - {}", name, error)); + .doOnError(error -> logger.warning("Failed to purge deleted key - {}", name, error))); } /** @@ -547,11 +558,12 @@ public Mono purgeDeletedKey(String name) { * @throws HttpRequestException when a key with {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the {@link Key recovered key}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> recoverDeletedKey(String name) { - return service.recoverDeletedKey(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.recoverDeletedKey(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Recovering deleted key - {}", name)) .doOnSuccess(response -> logger.info("Recovered deleted key - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to recover deleted key - {}", name, error)); + .doOnError(error -> logger.warning("Failed to recover deleted key - {}", name, error))); } /** @@ -577,13 +589,14 @@ public Mono> recoverDeletedKey(String name) { * @throws HttpRequestException when a key with {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the backed up key blob. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> backupKey(String name) { - return service.backupKey(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.backupKey(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Backing up key - {}", name)) .doOnSuccess(response -> logger.info("Backed up key - {}", name)) .doOnError(error -> logger.warning("Failed to backup key - {}", name, error)) .flatMap(base64URLResponse -> Mono.just(new SimpleResponse(base64URLResponse.request(), - base64URLResponse.statusCode(), base64URLResponse.headers(), base64URLResponse.value().value()))); + base64URLResponse.statusCode(), base64URLResponse.headers(), base64URLResponse.value().value())))); } /** @@ -608,12 +621,13 @@ public Mono> backupKey(String name) { * @throws ResourceModifiedException when {@code backup} blob is malformed. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the {@link Key restored key}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> restoreKey(byte[] backup) { KeyRestoreRequestParameters parameters = new KeyRestoreRequestParameters().keyBackup(backup); - return service.restoreKey(endpoint, API_VERSION, parameters, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.restoreKey(endpoint, API_VERSION, parameters, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Attempting to restore key")) .doOnSuccess(response -> logger.info("Restored Key - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to restore key - {}", error)); + .doOnError(error -> logger.warning("Failed to restore key - {}", error))); } /** @@ -629,14 +643,37 @@ public Mono> restoreKey(byte[] backup) { * .map(Response::value); * * - * @return A {@link Flux} containing {@link KeyBase key} of all the keys in the vault. + * @return A {@link PagedFlux} containing {@link KeyBase key} of all the keys in the vault. */ - public Flux listKeys() { + @ServiceMethod(returns = ReturnType.COLLECTION) + public PagedFlux listKeys() { + return new PagedFlux<>( + () -> monoContext(context -> listKeysFirstPage()), + continuationToken -> monoContext(context -> listKeysNextPage(continuationToken))); + } + + /* + * Gets attributes of all the keys given by the {@code nextPageLink} that was retrieved from a call to + * {@link KeyAsyncClient#listKeys()}. + * + * @param continuationToken The {@link PagedResponse#nextLink()} from a previous, successful call to one of the listKeys operations. + * @return A {@link Mono} of {@link PagedResponse} from the next page of results. + */ + private Mono> listKeysNextPage(String continuationToken) { + return service.getKeys(endpoint, continuationToken, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) + .doOnRequest(ignored -> logger.info("Listing next keys page - Page {} ", continuationToken)) + .doOnSuccess(response -> logger.info("Listed next keys page - Page {} ", continuationToken)) + .doOnError(error -> logger.warning("Failed to list next keys page - Page {} ", continuationToken, error)); + } + + /* + * Calls the service and retrieve first page result. It makes one call and retrieve {@code DEFAULT_MAX_PAGE_RESULTS} values. + */ + private Mono> listKeysFirstPage() { return service.getKeys(endpoint, DEFAULT_MAX_PAGE_RESULTS, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) - .doOnRequest(ignored -> logger.info("Listing keys")) - .doOnSuccess(response -> logger.info("Listed keys")) - .doOnError(error -> logger.warning("Failed to list keys", error)) - .flatMapMany(r -> extractAndFetchKeys(r, Context.NONE)); + .doOnRequest(ignored -> logger.info("Listing keys")) + .doOnSuccess(response -> logger.info("Listed keys")) + .doOnError(error -> logger.warning("Failed to list keys", error)); } /** @@ -652,14 +689,37 @@ public Flux listKeys() { * System.out.printf("Deleted key's recovery Id %s \n", deletedKey.recoveryId())); * * - * @return A {@link Flux} containing all of the {@link DeletedKey deleted keys} in the vault. + * @return A {@link PagedFlux} containing all of the {@link DeletedKey deleted keys} in the vault. + */ + @ServiceMethod(returns = ReturnType.COLLECTION) + public PagedFlux listDeletedKeys() { + return new PagedFlux<>( + () -> monoContext(context -> listDeletedKeysFirstPage()), + continuationToken -> monoContext(context -> listDeletedKeysNextPage(continuationToken))); + } + + /* + * Gets attributes of all the keys given by the {@code nextPageLink} that was retrieved from a call to + * {@link KeyAsyncClient#listDeletedKeys()}. + * + * @param continuationToken The {@link PagedResponse#nextLink()} from a previous, successful call to one of the list operations. + * @return A {@link Mono} of {@link PagedResponse} from the next page of results. + */ + private Mono> listDeletedKeysNextPage(String continuationToken) { + return service.getDeletedKeys(endpoint, continuationToken, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) + .doOnRequest(ignored -> logger.info("Listing next deleted keys page - Page {} ", continuationToken)) + .doOnSuccess(response -> logger.info("Listed next deleted keys page - Page {} ", continuationToken)) + .doOnError(error -> logger.warning("Failed to list next deleted keys page - Page {} ", continuationToken, error)); + } + + /* + * Calls the service and retrieve first page result. It makes one call and retrieve {@code DEFAULT_MAX_PAGE_RESULTS} values. */ - public Flux listDeletedKeys() { + private Mono> listDeletedKeysFirstPage() { return service.getDeletedKeys(endpoint, DEFAULT_MAX_PAGE_RESULTS, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) - .doOnRequest(ignored -> logger.info("Listing deleted keys")) - .doOnSuccess(response -> logger.info("Listed deleted keys")) - .doOnError(error -> logger.warning("Failed to list deleted keys", error)) - .flatMapMany(r -> extractAndFetchDeletedKeys(r, Context.NONE)); + .doOnRequest(ignored -> logger.info("Listing deleted keys")) + .doOnSuccess(response -> logger.info("Listed deleted keys")) + .doOnError(error -> logger.warning("Failed to list deleted keys", error)); } /** @@ -679,44 +739,34 @@ public Flux listDeletedKeys() { * @param name The name of the key. * @throws ResourceNotFoundException when a key with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a key with {@code name} is empty string. - * @return A {@link Flux} containing {@link KeyBase key} of all the versions of the specified key in the vault. Flux is empty if key with {@code name} does not exist in key vault. + * @return A {@link PagedFlux} containing {@link KeyBase key} of all the versions of the specified key in the vault. Flux is empty if key with {@code name} does not exist in key vault. */ - public Flux listKeyVersions(String name) { - return service.getKeyVersions(endpoint, name, DEFAULT_MAX_PAGE_RESULTS, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) - .doOnRequest(ignored -> logger.info("Listing key versions - {}", name)) - .doOnSuccess(response -> logger.info("Listed key versions - {}", name)) - .doOnError(error -> logger.warning(String.format("Failed to list key versions - {}", name), error)) - .flatMapMany(r -> extractAndFetchKeys(r, Context.NONE)); + @ServiceMethod(returns = ReturnType.COLLECTION) + public PagedFlux listKeyVersions(String name) { + return new PagedFlux<>( + () -> monoContext(context -> listKeyVersionsFirstPage(name)), + continuationToken -> monoContext(context -> listKeyVersionsNextPage(continuationToken))); } - /** - * Gets attributes of all the keys given by the {@code nextPageLink} that was retrieved from a call to - * {@link KeyAsyncClient#listKeys()}. - * - * @param nextPageLink The {@link KeyBasePage#nextLink()} from a previous, successful call to one of the list operations. - * @return A stream of {@link KeyBase key} from the next page of results. - */ - private Flux listKeysNext(String nextPageLink, Context context) { - return service.getKeys(endpoint, nextPageLink, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE).flatMapMany(r -> extractAndFetchKeys(r, context)); - } - - private Publisher extractAndFetchKeys(PagedResponse page, Context context) { - return ImplUtils.extractAndFetch(page, context, this::listKeysNext); + private Mono> listKeyVersionsFirstPage(String name) { + return service.getKeyVersions(endpoint, name, DEFAULT_MAX_PAGE_RESULTS, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) + .doOnRequest(ignored -> logger.info("Listing key versions - {}", name)) + .doOnSuccess(response -> logger.info("Listed key versions - {}", name)) + .doOnError(error -> logger.warning(String.format("Failed to list key versions - {}", name), error)); } - /** + /* * Gets attributes of all the keys given by the {@code nextPageLink} that was retrieved from a call to - * {@link KeyAsyncClient#listDeletedKeys()}. + * {@link KeyAsyncClient#listKeyVersions()}. * - * @param nextPageLink The {@link DeletedKeyPage#nextLink()} from a previous, successful call to one of the list operations. - * @return A stream of {@link KeyBase key} from the next page of results. + * @param continuationToken The {@link PagedResponse#nextLink()} from a previous, successful call to one of the listKeys operations. + * @return A {@link Mono} of {@link PagedResponse} from the next page of results. */ - private Flux listDeletedKeysNext(String nextPageLink, Context context) { - return service.getDeletedKeys(endpoint, nextPageLink, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE).flatMapMany(r -> extractAndFetchDeletedKeys(r, context)); - } - - private Publisher extractAndFetchDeletedKeys(PagedResponse page, Context context) { - return ImplUtils.extractAndFetch(page, context, this::listDeletedKeysNext); + private Mono> listKeyVersionsNextPage(String continuationToken) { + return service.getKeys(endpoint, continuationToken, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) + .doOnRequest(ignored -> logger.info("Listing next key versions page - Page {} ", continuationToken)) + .doOnSuccess(response -> logger.info("Listed next key versions page - Page {} ", continuationToken)) + .doOnError(error -> logger.warning("Failed to list next key versions page - Page {} ", continuationToken, error)); } } diff --git a/sdk/keyvault/azure-keyvault-keys/src/test/java/com/azure/security/keyvault/keys/KeyAsyncClientTest.java b/sdk/keyvault/azure-keyvault-keys/src/test/java/com/azure/security/keyvault/keys/KeyAsyncClientTest.java index 2600b68df917c..dd0b711996b88 100644 --- a/sdk/keyvault/azure-keyvault-keys/src/test/java/com/azure/security/keyvault/keys/KeyAsyncClientTest.java +++ b/sdk/keyvault/azure-keyvault-keys/src/test/java/com/azure/security/keyvault/keys/KeyAsyncClientTest.java @@ -337,7 +337,7 @@ public void getDeletedKey() { assertNotNull(deletedKey); }).verifyComplete(); pollOnKeyDeletion(keyToDeleteAndGet.name()); - sleep(30000); + sleepInRecordMode(30000); StepVerifier.create(client.getDeletedKey(keyToDeleteAndGet.name())) .assertNext(deletedKeyResponse -> { @@ -353,7 +353,7 @@ public void getDeletedKey() { assertEquals(HttpResponseStatus.NO_CONTENT.code(), voidResponse.statusCode()); }).verifyComplete(); pollOnKeyPurge(keyToDeleteAndGet.name()); - sleep(15000); + sleepInRecordMode(15000); }); } // @@ -421,9 +421,9 @@ public void listKeyVersions() { client.createKey(key).subscribe(keyResponse -> assertKeyEquals(key, keyResponse.value())); sleepInRecordMode(1000); } - sleep(30000); + sleepInRecordMode(30000); client.listKeyVersions(keyName).subscribe(output::add); - sleep(30000); + sleepInRecordMode(30000); assertEquals(keyVersions.size(), output.size()); @@ -455,9 +455,9 @@ public void listKeys() { client.createKey(key).subscribe(keyResponse -> assertKeyEquals(key, keyResponse.value())); sleepInRecordMode(1000); } - sleep(30000); + sleepInRecordMode(30000); client.listKeys().subscribe(output::add); - sleep(30000); + sleepInRecordMode(30000); for (KeyBase actualKey : output) { if (keys.containsKey(actualKey.name())) { diff --git a/sdk/keyvault/azure-keyvault-keys/src/test/java/com/azure/security/keyvault/keys/KeyClientTest.java b/sdk/keyvault/azure-keyvault-keys/src/test/java/com/azure/security/keyvault/keys/KeyClientTest.java index 8b3eb47ac5763..a07fa17a52de8 100644 --- a/sdk/keyvault/azure-keyvault-keys/src/test/java/com/azure/security/keyvault/keys/KeyClientTest.java +++ b/sdk/keyvault/azure-keyvault-keys/src/test/java/com/azure/security/keyvault/keys/KeyClientTest.java @@ -217,7 +217,7 @@ public void restoreKey() { pollOnKeyDeletion(keyToBackupAndRestore.name()); client.purgeDeletedKey(keyToBackupAndRestore.name()); pollOnKeyPurge(keyToBackupAndRestore.name()); - sleep(60000); + sleepInRecordMode(60000); Key restoredKey = client.restoreKey(backupBytes).value(); assertEquals(keyToBackupAndRestore.name(), restoredKey.name()); assertEquals(keyToBackupAndRestore.expires(), restoredKey.expires()); @@ -240,7 +240,7 @@ public void listKeys() { HashMap keysToList = keys; for (KeyCreateOptions key : keysToList.values()) { assertKeyEquals(key, client.createKey(key)); - sleep(5000); + sleepInRecordMode(5000); } for (KeyBase actualKey : client.listKeys()) { diff --git a/sdk/keyvault/azure-keyvault-secrets/README.md b/sdk/keyvault/azure-keyvault-secrets/README.md index e8ffe95e55d5e..70a2c89a0c2ae 100644 --- a/sdk/keyvault/azure-keyvault-secrets/README.md +++ b/sdk/keyvault/azure-keyvault-secrets/README.md @@ -287,7 +287,7 @@ This project has adopted the [Microsoft Open Source Code of Conduct](https://ope [source_code]: https://github.com/Azure/azure-sdk-for-java/tree/master/sdk/keyvault/azure-keyvault-secrets/src -[api_documentation]: not-valid-link +[api_documentation]: https://azure.github.io/azure-sdk-for-java/track2reports/index.html [azure_identity]: https://github.com/Azure/azure-sdk-for-java/tree/master/identity/client [azkeyvault_docs]: https://docs.microsoft.com/en-us/azure/key-vault/ [maven]: https://maven.apache.org/ diff --git a/sdk/keyvault/azure-keyvault-secrets/src/main/java/com/azure/security/keyvault/secrets/SecretAsyncClient.java b/sdk/keyvault/azure-keyvault-secrets/src/main/java/com/azure/security/keyvault/secrets/SecretAsyncClient.java index 0ed4b9d76c89c..8adfceac47ffe 100644 --- a/sdk/keyvault/azure-keyvault-secrets/src/main/java/com/azure/security/keyvault/secrets/SecretAsyncClient.java +++ b/sdk/keyvault/azure-keyvault-secrets/src/main/java/com/azure/security/keyvault/secrets/SecretAsyncClient.java @@ -4,21 +4,20 @@ package com.azure.security.keyvault.secrets; import com.azure.core.http.HttpPipeline; -import com.azure.core.http.rest.PagedResponse; import com.azure.core.http.rest.Response; -import com.azure.core.http.rest.SimpleResponse; import com.azure.core.http.rest.VoidResponse; +import com.azure.core.http.rest.PagedResponse; +import com.azure.core.http.rest.PagedFlux; +import com.azure.core.http.rest.SimpleResponse; import com.azure.core.implementation.RestProxy; +import com.azure.core.implementation.annotation.ReturnType; import com.azure.core.implementation.annotation.ServiceClient; -import com.azure.core.implementation.util.ImplUtils; +import com.azure.core.implementation.annotation.ServiceMethod; import com.azure.core.util.Context; import com.azure.core.util.logging.ClientLogger; -import com.azure.security.keyvault.secrets.implementation.SecretBasePage; import com.azure.security.keyvault.secrets.models.DeletedSecret; import com.azure.security.keyvault.secrets.models.Secret; import com.azure.security.keyvault.secrets.models.SecretBase; -import com.azure.security.keyvault.secrets.implementation.DeletedSecretPage; -import org.reactivestreams.Publisher; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -29,6 +28,8 @@ import com.azure.core.exception.ResourceModifiedException; import com.azure.core.exception.HttpRequestException; +import static com.azure.core.implementation.util.FluxUtil.monoContext; + /** * The SecretAsyncClient provides asynchronous methods to manage {@link Secret secrets} in the Azure Key Vault. The client * supports creating, retrieving, updating, deleting, purging, backing up, restoring and listing the {@link Secret secrets}. The client @@ -38,6 +39,7 @@ * {@codesnippet com.azure.security.keyvault.secretclient.async.construct} * * @see SecretClientBuilder + * @see PagedFlux */ @ServiceClient(builder = SecretClientBuilder.class, isAsync = true, serviceInterfaces = SecretService.class) public final class SecretAsyncClient { @@ -89,6 +91,7 @@ public final class SecretAsyncClient { * @throws HttpRequestException if {@link Secret#name() name} or {@link Secret#value() value} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the {@link Secret created secret}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> setSecret(Secret secret) { Objects.requireNonNull(secret, "The Secret input parameter cannot be null."); SecretRequestParameters parameters = new SecretRequestParameters() @@ -97,10 +100,10 @@ public Mono> setSecret(Secret secret) { .contentType(secret.contentType()) .secretAttributes(new SecretRequestAttributes(secret)); - return service.setSecret(endpoint, secret.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.setSecret(endpoint, secret.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Setting secret - {}", secret.name())) .doOnSuccess(response -> logger.info("Set secret - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to set secret - {}", secret.name(), error)); + .doOnError(error -> logger.warning("Failed to set secret - {}", secret.name(), error))); } /** @@ -121,12 +124,13 @@ public Mono> setSecret(Secret secret) { * @throws HttpRequestException if {@code name} or {@code value} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the {@link Secret created secret}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> setSecret(String name, String value) { SecretRequestParameters parameters = new SecretRequestParameters().value(value); - return service.setSecret(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.setSecret(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Setting secret - {}", name)) .doOnSuccess(response -> logger.info("Set secret - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to set secret - {}", name, error)); + .doOnError(error -> logger.warning("Failed to set secret - {}", name, error))); } /** @@ -149,15 +153,16 @@ public Mono> setSecret(String name, String value) { * @throws HttpRequestException if {@code name} name} or {@code version} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the requested {@link Secret secret}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> getSecret(String name, String version) { - String secretVersion = ""; - if (version != null) { - secretVersion = version; + if (version == null) { + return getSecret(name); } - return service.getSecret(endpoint, name, secretVersion, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) + + return monoContext(context -> service.getSecret(endpoint, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignoredValue -> logger.info("Retrieving secret - {}", name)) .doOnSuccess(response -> logger.info("Retrieved secret - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to get secret - {}", name, error)); + .doOnError(error -> logger.warning("Failed to get secret - {}", name, error))); } /** @@ -178,16 +183,13 @@ public Mono> getSecret(String name, String version) { * @throws HttpRequestException if {@link SecretBase#name()} name} or {@link SecretBase#version() version} is empty string. * @return A {@link Response} whose {@link Response#value() value} contains the requested {@link Secret secret}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> getSecret(SecretBase secretBase) { Objects.requireNonNull(secretBase, "The Secret Base parameter cannot be null."); - String secretVersion = ""; - if (secretBase.version() != null) { - secretVersion = secretBase.version(); + if (secretBase.version() == null) { + return getSecret(secretBase.name()); } - return service.getSecret(endpoint, secretBase.name(), secretVersion, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) - .doOnRequest(ignoredValue -> logger.info("Retrieving secret - {}", secretBase.name())) - .doOnSuccess(response -> logger.info("Retrieved secret - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to get secret - {}", secretBase.name(), error)); + return getSecret(secretBase.name(), secretBase.version()); } /** * Get the latest version of the specified secret from the key vault. The get operation is applicable to any secret stored in Azure Key Vault. @@ -207,6 +209,7 @@ public Mono> getSecret(SecretBase secretBase) { * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the requested {@link Secret secret}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> getSecret(String name) { return getSecret(name, ""); } @@ -236,6 +239,7 @@ public Mono> getSecret(String name) { * @throws HttpRequestException if {@link SecretBase#name()} name} or {@link SecretBase#version() version} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the {@link SecretBase updated secret}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> updateSecret(SecretBase secret) { Objects.requireNonNull(secret, "The secret input parameter cannot be null."); SecretRequestParameters parameters = new SecretRequestParameters() @@ -243,10 +247,10 @@ public Mono> updateSecret(SecretBase secret) { .contentType(secret.contentType()) .secretAttributes(new SecretRequestAttributes(secret)); - return service.updateSecret(endpoint, secret.name(), secret.version(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.updateSecret(endpoint, secret.name(), secret.version(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Updating secret - {}", secret.name())) .doOnSuccess(response -> logger.info("Updated secret - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to update secret - {}", secret.name(), error)); + .doOnError(error -> logger.warning("Failed to update secret - {}", secret.name(), error))); } /** @@ -267,11 +271,12 @@ public Mono> updateSecret(SecretBase secret) { * @throws HttpRequestException when a secret with {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the {@link DeletedSecret deleted secret}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> deleteSecret(String name) { - return service.deleteSecret(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.deleteSecret(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Deleting secret - {}", name)) .doOnSuccess(response -> logger.info("Deleted secret - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to delete secret - {}", name, error)); + .doOnError(error -> logger.warning("Failed to delete secret - {}", name, error))); } /** @@ -292,11 +297,12 @@ public Mono> deleteSecret(String name) { * @throws HttpRequestException when a secret with {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the {@link DeletedSecret deleted secret}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> getDeletedSecret(String name) { - return service.getDeletedSecret(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.getDeletedSecret(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Retrieving deleted secret - {}", name)) .doOnSuccess(response -> logger.info("Retrieved deleted secret - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to retrieve deleted secret - {}", name, error)); + .doOnError(error -> logger.warning("Failed to retrieve deleted secret - {}", name, error))); } /** @@ -317,11 +323,12 @@ public Mono> getDeletedSecret(String name) { * @throws HttpRequestException when a secret with {@code name} is empty string. * @return A {@link Mono} containing a {@link VoidResponse}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono purgeDeletedSecret(String name) { - return service.purgeDeletedSecret(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.purgeDeletedSecret(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Purging deleted secret - {}", name)) .doOnSuccess(response -> logger.info("Purged deleted secret - {}", name)) - .doOnError(error -> logger.warning("Failed to purge deleted secret - {}", name, error)); + .doOnError(error -> logger.warning("Failed to purge deleted secret - {}", name, error))); } /** @@ -342,11 +349,12 @@ public Mono purgeDeletedSecret(String name) { * @throws HttpRequestException when a secret with {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the {@link Secret recovered secret}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> recoverDeletedSecret(String name) { - return service.recoverDeletedSecret(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.recoverDeletedSecret(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Recovering deleted secret - {}", name)) .doOnSuccess(response -> logger.info("Recovered deleted secret - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to recover deleted secret - {}", name, error)); + .doOnError(error -> logger.warning("Failed to recover deleted secret - {}", name, error))); } /** @@ -366,13 +374,14 @@ public Mono> recoverDeletedSecret(String name) { * @throws HttpRequestException when a secret with {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the backed up secret blob. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> backupSecret(String name) { - return service.backupSecret(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.backupSecret(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Backing up secret - {}", name)) .doOnSuccess(response -> logger.info("Backed up secret - {}", name)) .doOnError(error -> logger.warning("Failed to back up secret - {}", name, error)) .flatMap(base64URLResponse -> Mono.just(new SimpleResponse(base64URLResponse.request(), - base64URLResponse.statusCode(), base64URLResponse.headers(), base64URLResponse.value().value()))); + base64URLResponse.statusCode(), base64URLResponse.headers(), base64URLResponse.value().value())))); } /** @@ -391,12 +400,13 @@ public Mono> backupSecret(String name) { * @throws ResourceModifiedException when {@code backup} blob is malformed. * @return A {@link Mono} containing a {@link Response} whose {@link Response#value() value} contains the {@link Secret restored secret}. */ + @ServiceMethod(returns = ReturnType.SINGLE) public Mono> restoreSecret(byte[] backup) { SecretRestoreRequestParameters parameters = new SecretRestoreRequestParameters().secretBackup(backup); - return service.restoreSecret(endpoint, API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) + return monoContext(context -> service.restoreSecret(endpoint, API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Attempting to restore secret")) .doOnSuccess(response -> logger.info("Restored secret - {}", response.value().name())) - .doOnError(error -> logger.warning("Failed to restore secret", error)); + .doOnError(error -> logger.warning("Failed to restore secret", error))); } /** @@ -412,14 +422,37 @@ public Mono> restoreSecret(byte[] backup) { * .map(Response::value); * * - * @return A {@link Flux} containing {@link SecretBase secret} of all the secrets in the vault. + * @return A {@link PagedFlux} containing {@link SecretBase secret} of all the secrets in the vault. + */ + @ServiceMethod(returns = ReturnType.COLLECTION) + public PagedFlux listSecrets() { + return new PagedFlux<>( + () -> monoContext(context -> listSecretsFirstPage()), + continuationToken -> monoContext(context -> listSecretsNextPage(continuationToken))); + } + + /* + * Gets attributes of all the secrets given by the {@code nextPageLink} that was retrieved from a call to + * {@link SecretAsyncClient#listSecrets()}. + * + * @param continuationToken The {@link PagedResponse#nextLink()} from a previous, successful call to one of the list operations. + * @return A {@link Mono} of {@link PagedResponse} from the next page of results. + */ + private Mono> listSecretsNextPage(String continuationToken) { + return service.getSecrets(endpoint, continuationToken, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) + .doOnRequest(ignoredValue -> logger.info("Retrieving the next secrets page - Page {}", continuationToken)) + .doOnSuccess(response -> logger.info("Retrieved the next secrets page - Page {}", continuationToken)) + .doOnError(error -> logger.warning("Failed to retrieve the next secrets page - Page {}", continuationToken, error)); + } + + /* + * Calls the service and retrieve first page result. It makes one call and retrieve {@code DEFAULT_MAX_PAGE_RESULTS} values. */ - public Flux listSecrets() { + private Mono> listSecretsFirstPage() { return service.getSecrets(endpoint, DEFAULT_MAX_PAGE_RESULTS, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) - .doOnRequest(ignored -> logger.info("Listing secrets")) - .doOnSuccess(response -> logger.info("Listed secrets")) - .doOnError(error -> logger.warning("Failed to list secrets", error)) - .flatMapMany(r -> extractAndFetchSecrets(r, Context.NONE)); + .doOnRequest(ignored -> logger.info("Listing secrets")) + .doOnSuccess(response -> logger.info("Listed secrets")) + .doOnError(error -> logger.warning("Failed to list secrets", error)); } /** @@ -436,12 +469,35 @@ public Flux listSecrets() { * * @return A {@link Flux} containing all of the {@link DeletedSecret deleted secrets} in the vault. */ - public Flux listDeletedSecrets() { + @ServiceMethod(returns = ReturnType.COLLECTION) + public PagedFlux listDeletedSecrets() { + return new PagedFlux<>( + () -> monoContext(context -> listDeletedSecretsFirstPage()), + continuationToken -> monoContext(context -> listDeletedSecretsNextPage(continuationToken, Context.NONE))); + } + + /** + * Gets attributes of all the secrets given by the {@code nextPageLink} that was retrieved from a call to + * {@link SecretAsyncClient#listDeletedSecrets()}. + * + * @param continuationToken The {@link PagedResponse#nextLink()} from a previous, successful call to one of the list operations. + * @return A {@link Mono} of {@link PagedResponse} from the next page of results. + */ + private Mono> listDeletedSecretsNextPage(String continuationToken, Context context) { + return service.getDeletedSecrets(endpoint, continuationToken, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) + .doOnRequest(ignoredValue -> logger.info("Retrieving the next deleted secrets page - Page {}", continuationToken)) + .doOnSuccess(response -> logger.info("Retrieved the next deleted secrets page - Page {}", continuationToken)) + .doOnError(error -> logger.warning("Failed to retrieve the next deleted secrets page - Page {}", continuationToken, error)); + } + + /* + * Calls the service and retrieve first page result. It makes one call and retrieve {@code DEFAULT_MAX_PAGE_RESULTS} values. + */ + private Mono> listDeletedSecretsFirstPage() { return service.getDeletedSecrets(endpoint, DEFAULT_MAX_PAGE_RESULTS, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) - .doOnRequest(ignored -> logger.info("Listing deleted secrets")) - .doOnSuccess(response -> logger.info("Listed deleted secrets")) - .doOnError(error -> logger.warning("Failed to list deleted secrets", error)) - .flatMapMany(r -> extractAndFetchDeletedSecrets(r, Context.NONE)); + .doOnRequest(ignored -> logger.info("Listing deleted secrets")) + .doOnSuccess(response -> logger.info("Listed deleted secrets")) + .doOnError(error -> logger.warning("Failed to list deleted secrets", error)); } /** @@ -461,51 +517,37 @@ public Flux listDeletedSecrets() { * @param name The name of the secret. * @throws ResourceNotFoundException when a secret with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a secret with {@code name} is empty string. - * @return A {@link Flux} containing {@link SecretBase secret} of all the versions of the specified secret in the vault. Flux is empty if secret with {@code name} does not exist in key vault + * @return A {@link PagedFlux} containing {@link SecretBase secret} of all the versions of the specified secret in the vault. Flux is empty if secret with {@code name} does not exist in key vault */ - public Flux listSecretVersions(String name) { - return service.getSecretVersions(endpoint, name, DEFAULT_MAX_PAGE_RESULTS, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) - .doOnRequest(ignored -> logger.info("Listing secret versions - {}", name)) - .doOnSuccess(response -> logger.info("Listed secret versions - {}", name)) - .doOnError(error -> logger.warning(String.format("Failed to list secret versions - {}", name), error)) - .flatMapMany(r -> extractAndFetchSecrets(r, Context.NONE)); + @ServiceMethod(returns = ReturnType.COLLECTION) + public PagedFlux listSecretVersions(String name) { + return new PagedFlux<>( + () -> monoContext(context -> listSecretVersionsFirstPage(name)), + continuationToken -> monoContext(context -> listSecretVersionsNextPage(continuationToken))); } - /** - * Gets attributes of all the secrets given by the {@code nextPageLink} that was retrieved from a call to - * {@link SecretAsyncClient#listSecrets()}. + /* + * Gets attributes of all the secrets versions given by the {@code nextPageLink} that was retrieved from a call to + * {@link SecretAsyncClient#listSecretVersions()}. * - * @param nextPageLink The {@link SecretBasePage#nextLink()} from a previous, successful call to one of the list operations. - * @return A stream of {@link SecretBase secret} from the next page of results. + * @param continuationToken The {@link PagedResponse#nextLink()} from a previous, successful call to one of the list operations. + * + * @return A {@link Mono} of {@link PagedResponse} from the next page of results. */ - private Flux listSecretsNext(String nextPageLink, Context context) { - return service.getSecrets(endpoint, nextPageLink, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) - .doOnRequest(ignoredValue -> logger.info("Retrieving the next listing page - Page {}", nextPageLink)) - .doOnSuccess(response -> logger.info("Retrieved the next listing page - Page {}", nextPageLink)) - .doOnError(error -> logger.warning("Failed to retrieve the next listing page - Page {}", nextPageLink, error)) - .flatMapMany(r -> extractAndFetchSecrets(r, context)); + private Mono> listSecretVersionsNextPage(String continuationToken) { + return service.getSecrets(endpoint, continuationToken, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) + .doOnRequest(ignoredValue -> logger.info("Retrieving the next secrets versions page - Page {}", continuationToken)) + .doOnSuccess(response -> logger.info("Retrieved the next secrets versions page - Page {}", continuationToken)) + .doOnError(error -> logger.warning("Failed to retrieve the next secrets versions page - Page {}", continuationToken, error)); } - private Publisher extractAndFetchSecrets(PagedResponse page, Context context) { - return ImplUtils.extractAndFetch(page, context, this::listSecretsNext); - } - - /** - * Gets attributes of all the secrets given by the {@code nextPageLink} that was retrieved from a call to - * {@link SecretAsyncClient#listDeletedSecrets()}. - * - * @param nextPageLink The {@link DeletedSecretPage#nextLink()} from a previous, successful call to one of the list operations. - * @return A stream of {@link SecretBase secret} from the next page of results. + /* + * Calls the service and retrieve first page result. It makes one call and retrieve {@code DEFAULT_MAX_PAGE_RESULTS} values. */ - private Flux listDeletedSecretsNext(String nextPageLink, Context context) { - return service.getDeletedSecrets(endpoint, nextPageLink, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) - .doOnRequest(ignoredValue -> logger.info("Retrieving the next listing page - Page {}", nextPageLink)) - .doOnSuccess(response -> logger.info("Retrieved the next listing page - Page {}", nextPageLink)) - .doOnError(error -> logger.warning("Failed to retrieve the next listing page - Page {}", nextPageLink, error)) - .flatMapMany(r -> extractAndFetchDeletedSecrets(r, context)); - } - - private Publisher extractAndFetchDeletedSecrets(PagedResponse page, Context context) { - return ImplUtils.extractAndFetch(page, context, this::listDeletedSecretsNext); + private Mono> listSecretVersionsFirstPage(String name) { + return service.getSecretVersions(endpoint, name, DEFAULT_MAX_PAGE_RESULTS, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) + .doOnRequest(ignored -> logger.info("Listing secret versions - {}", name)) + .doOnSuccess(response -> logger.info("Listed secret versions - {}", name)) + .doOnError(error -> logger.warning(String.format("Failed to list secret versions - {}", name), error)); } } diff --git a/sdk/keyvault/azure-keyvault-secrets/src/samples/java/com/azure/security/keyvault/secrets/SecretClientJavaDocCodeSnippets.java b/sdk/keyvault/azure-keyvault-secrets/src/samples/java/com/azure/security/keyvault/secrets/SecretClientJavaDocCodeSnippets.java index 16875208d6e89..3b71f4e882ccc 100644 --- a/sdk/keyvault/azure-keyvault-secrets/src/samples/java/com/azure/security/keyvault/secrets/SecretClientJavaDocCodeSnippets.java +++ b/sdk/keyvault/azure-keyvault-secrets/src/samples/java/com/azure/security/keyvault/secrets/SecretClientJavaDocCodeSnippets.java @@ -13,7 +13,7 @@ import com.azure.security.keyvault.secrets.models.SecretBase; /** - * This class contains code samples for generating javadocs through doclets for {@link SecretClient] + * This class contains code samples for generating javadocs through doclets for {@link SecretClient} */ public final class SecretClientJavaDocCodeSnippets { diff --git a/sdk/keyvault/azure-keyvault-secrets/src/test/java/com/azure/security/keyvault/secrets/SecretAsyncClientTest.java b/sdk/keyvault/azure-keyvault-secrets/src/test/java/com/azure/security/keyvault/secrets/SecretAsyncClientTest.java index bbae5720cb30b..2e11cd2c52487 100644 --- a/sdk/keyvault/azure-keyvault-secrets/src/test/java/com/azure/security/keyvault/secrets/SecretAsyncClientTest.java +++ b/sdk/keyvault/azure-keyvault-secrets/src/test/java/com/azure/security/keyvault/secrets/SecretAsyncClientTest.java @@ -217,7 +217,7 @@ public void getDeletedSecret() { assertNotNull(deletedSecret); }).verifyComplete(); pollOnSecretDeletion(secretToDeleteAndGet.name()); - sleep(30000); + sleepInRecordMode(30000); StepVerifier.create(client.getDeletedSecret(secretToDeleteAndGet.name())) .assertNext(deletedSecretResponse -> { @@ -233,7 +233,7 @@ public void getDeletedSecret() { assertEquals(HttpResponseStatus.NO_CONTENT.code(), voidResponse.statusCode()); }).verifyComplete(); pollOnSecretPurge(secretToDeleteAndGet.name()); - sleep(10000); + sleepInRecordMode(10000); }); } @@ -331,7 +331,7 @@ public void restoreSecret() { }).verifyComplete(); pollOnSecretPurge(secretToBackupAndRestore.name()); - sleep(60000); + sleepInRecordMode(60000); StepVerifier.create(client.restoreSecret(backup)) .assertNext(response -> { @@ -416,9 +416,9 @@ public void listSecretVersions() { client.setSecret(secret).subscribe(secretResponse -> assertSecretEquals(secret, secretResponse.value())); sleepInRecordMode(1000); } - sleep(30000); + sleepInRecordMode(30000); client.listSecretVersions(secretName).subscribe(output::add); - sleep(30000); + sleepInRecordMode(30000); assertEquals(secretVersions.size(), output.size()); @@ -450,9 +450,9 @@ public void listSecrets() { client.setSecret(secret).subscribe(secretResponse -> assertSecretEquals(secret, secretResponse.value())); sleepInRecordMode(1000); } - sleep(30000); + sleepInRecordMode(30000); client.listSecrets().subscribe(output::add); - sleep(30000); + sleepInRecordMode(30000); for (SecretBase actualSecret : output) { if (secretsToList.containsKey(actualSecret.name())) { diff --git a/sdk/keyvault/pom.service.xml b/sdk/keyvault/pom.service.xml index 862a0ba0d96ee..98d1b31a7867c 100644 --- a/sdk/keyvault/pom.service.xml +++ b/sdk/keyvault/pom.service.xml @@ -19,7 +19,7 @@ microsoft-azure-keyvault-complete - ../../core + ../core ../identity/azure-identity azure-keyvault-keys azure-keyvault-secrets diff --git a/storage/client/blob/src/main/java/com/azure/storage/blob/BlobAsyncClient.java b/storage/client/blob/src/main/java/com/azure/storage/blob/BlobAsyncClient.java index fa4a0e77edac5..2601107aaa0b6 100644 --- a/storage/client/blob/src/main/java/com/azure/storage/blob/BlobAsyncClient.java +++ b/storage/client/blob/src/main/java/com/azure/storage/blob/BlobAsyncClient.java @@ -24,6 +24,8 @@ import com.azure.storage.blob.models.ReliableDownloadOptions; import com.azure.storage.blob.models.SourceModifiedAccessConditions; import com.azure.storage.blob.models.StorageAccountInfo; +import com.azure.storage.blob.models.UserDelegationKey; +import com.azure.storage.common.credentials.SharedKeyCredential; import io.netty.buffer.ByteBuf; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -38,36 +40,36 @@ import java.nio.channels.AsynchronousFileChannel; import java.nio.file.Paths; import java.nio.file.StandardOpenOption; +import java.time.OffsetDateTime; import java.util.ArrayList; import java.util.List; import static com.azure.storage.blob.Utility.postProcessResponse; /** - * Client to a blob of any type: block, append, or page. It may only be instantiated through a {@link BlobClientBuilder} or via - * the method {@link ContainerAsyncClient#getBlobAsyncClient(String)}. This class does not hold any state about a particular - * blob, but is instead a convenient way of sending appropriate requests to the resource on the service. + * Client to a blob of any type: block, append, or page. It may only be instantiated through a {@link BlobClientBuilder} + * or via the method {@link ContainerAsyncClient#getBlobAsyncClient(String)}. This class does not hold any state about a + * particular blob, but is instead a convenient way of sending appropriate requests to the resource on the service. * *

    * This client offers the ability to download blobs. Note that uploading data is specific to each type of blob. Please * refer to the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient} for upload options. This - * client can be converted into one of these clients easily through the methods {@link #asBlockBlobAsyncClient}, - * {@link #asPageBlobAsyncClient}, and {@link #asAppendBlobAsyncClient()}. + * client can be converted into one of these clients easily through the methods {@link #asBlockBlobAsyncClient}, {@link + * #asPageBlobAsyncClient}, and {@link #asAppendBlobAsyncClient()}. * *

    * This client contains operations on a blob. Operations on a container are available on {@link ContainerAsyncClient}, * and operations on the service are available on {@link StorageAsyncClient}. * *

    - * Please refer to the Azure Docs - * for more information. + * Please refer to the Azure + * Docs for more information. * *

    - * Note this client is an async client that returns reactive responses from Spring Reactor Core - * project (https://projectreactor.io/). Calling the methods in this client will NOT - * start the actual network operation, until {@code .subscribe()} is called on the reactive response. - * You can simply convert one of these responses to a {@link java.util.concurrent.CompletableFuture} - * object through {@link Mono#toFuture()}. + * Note this client is an async client that returns reactive responses from Spring Reactor Core project + * (https://projectreactor.io/). Calling the methods in this client will NOT start the actual network + * operation, until {@code .subscribe()} is called on the reactive response. You can simply convert one of these + * responses to a {@link java.util.concurrent.CompletableFuture} object through {@link Mono#toFuture()}. */ public class BlobAsyncClient { private static final int BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * Constants.MB; @@ -78,6 +80,7 @@ public class BlobAsyncClient { /** * Package-private constructor for use by {@link BlobClientBuilder}. + * * @param azureBlobStorageBuilder the API client builder for blob storage API */ BlobAsyncClient(AzureBlobStorageBuilder azureBlobStorageBuilder, String snapshot) { @@ -89,8 +92,7 @@ public class BlobAsyncClient { * Creates a new {@link BlockBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be block blobs. * - * @return - * A {@link BlockBlobAsyncClient} to this resource. + * @return A {@link BlockBlobAsyncClient} to this resource. */ public BlockBlobAsyncClient asBlockBlobAsyncClient() { return new BlockBlobAsyncClient(new AzureBlobStorageBuilder() @@ -102,8 +104,7 @@ public BlockBlobAsyncClient asBlockBlobAsyncClient() { * Creates a new {@link AppendBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be append blobs. * - * @return - * A {@link AppendBlobAsyncClient} to this resource. + * @return A {@link AppendBlobAsyncClient} to this resource. */ public AppendBlobAsyncClient asAppendBlobAsyncClient() { return new AppendBlobAsyncClient(new AzureBlobStorageBuilder() @@ -115,8 +116,7 @@ public AppendBlobAsyncClient asAppendBlobAsyncClient() { * Creates a new {@link PageBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be page blobs. * - * @return - * A {@link PageBlobAsyncClient} to this resource. + * @return A {@link PageBlobAsyncClient} to this resource. */ public PageBlobAsyncClient asPageBlobAsyncClient() { return new PageBlobAsyncClient(new AzureBlobStorageBuilder() @@ -125,8 +125,8 @@ public PageBlobAsyncClient asPageBlobAsyncClient() { } /** - * Initializes a {@link ContainerAsyncClient} object pointing to the container this blob is in. This method does - * not create a container. It simply constructs the URL to the container and offers access to methods relevant to + * Initializes a {@link ContainerAsyncClient} object pointing to the container this blob is in. This method does not + * create a container. It simply constructs the URL to the container and offers access to methods relevant to * containers. * * @return A {@link ContainerAsyncClient} object pointing to the container containing the blob @@ -140,6 +140,7 @@ public ContainerAsyncClient getContainerAsyncClient() { /** * Gets the URL of the blob represented by this client. + * * @return the URL. * @throws RuntimeException If the blob is using a malformed URL. */ @@ -158,8 +159,7 @@ public URL getBlobUrl() { /** * Gets if the blob this client represents exists in the cloud. * - * @return - * true if the blob exists, false if it doesn't + * @return true if the blob exists, false if it doesn't */ public Mono> exists() { return this.getProperties() @@ -174,11 +174,8 @@ public Mono> exists() { * Copies the data at the source URL to a blob. For more information, see the * Azure Docs * - * @param sourceURL - * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. - * - * @return - * A reactive response containing the copy ID for the long running operation. + * @param sourceURL The source URL to copy from. URLs outside of Azure may only be copied to block blobs. + * @return A reactive response containing the copy ID for the long running operation. */ public Mono> startCopyFromURL(URL sourceURL) { return this.startCopyFromURL(sourceURL, null, null, null); @@ -188,23 +185,17 @@ public Mono> startCopyFromURL(URL sourceURL) { * Copies the data at the source URL to a blob. For more information, see the * Azure Docs * - * @param sourceURL - * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. - * @param metadata - * {@link Metadata} - * @param sourceModifiedAccessConditions - * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the - * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob - * was changed relative to the given request. The request will fail if the specified condition is not - * satisfied. - * @param destAccessConditions - * {@link BlobAccessConditions} against the destination. - * - * @return - * A reactive response containing the copy ID for the long running operation. + * @param sourceURL The source URL to copy from. URLs outside of Azure may only be copied to block blobs. + * @param metadata {@link Metadata} + * @param sourceModifiedAccessConditions {@link ModifiedAccessConditions} against the source. Standard HTTP Access + * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions + * related to when the blob was changed relative to the given request. The request will fail if the specified + * condition is not satisfied. + * @param destAccessConditions {@link BlobAccessConditions} against the destination. + * @return A reactive response containing the copy ID for the long running operation. */ public Mono> startCopyFromURL(URL sourceURL, Metadata metadata, - ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { + ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { metadata = metadata == null ? new Metadata() : metadata; sourceModifiedAccessConditions = sourceModifiedAccessConditions == null ? new ModifiedAccessConditions() : sourceModifiedAccessConditions; @@ -218,7 +209,7 @@ public Mono> startCopyFromURL(URL sourceURL, Metadata metadata, .sourceIfNoneMatch(sourceModifiedAccessConditions.ifNoneMatch()); return postProcessResponse(this.azureBlobStorage.blobs().startCopyFromURLWithRestResponseAsync( - null, null, sourceURL, null, metadata, null, sourceConditions, + null, null, sourceURL, null, metadata, null, sourceConditions, destAccessConditions.modifiedAccessConditions(), destAccessConditions.leaseAccessConditions(), Context.NONE)) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } @@ -226,12 +217,9 @@ public Mono> startCopyFromURL(URL sourceURL, Metadata metadata, /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * - * @param copyId - * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link - * BlobStartCopyFromURLHeaders} object. - * - * @return - * A reactive response signalling completion. + * @param copyId The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link + * BlobStartCopyFromURLHeaders} object. + * @return A reactive response signalling completion. */ public Mono abortCopyFromURL(String copyId) { return this.abortCopyFromURL(copyId, null); @@ -240,15 +228,11 @@ public Mono abortCopyFromURL(String copyId) { /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * - * @param copyId - * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link - * BlobStartCopyFromURLHeaders} object. - * @param leaseAccessConditions - * By setting lease access conditions, requests will fail if the provided lease does not match the active - * lease on the blob. - * - * @return - * A reactive response signalling completion. + * @param copyId The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link + * BlobStartCopyFromURLHeaders} object. + * @param leaseAccessConditions By setting lease access conditions, requests will fail if the provided lease does + * not match the active lease on the blob. + * @return A reactive response signalling completion. */ public Mono abortCopyFromURL(String copyId, LeaseAccessConditions leaseAccessConditions) { return postProcessResponse(this.azureBlobStorage.blobs().abortCopyFromURLWithRestResponseAsync( @@ -259,11 +243,8 @@ public Mono abortCopyFromURL(String copyId, LeaseAccessConditions /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * - * @param copySource - * The source URL to copy from. - * - * @return - * A reactive response containing the copy ID for the long running operation. + * @param copySource The source URL to copy from. + * @return A reactive response containing the copy ID for the long running operation. */ public Mono> copyFromURL(URL copySource) { return this.copyFromURL(copySource, null, null, null); @@ -272,23 +253,17 @@ public Mono> copyFromURL(URL copySource) { /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * - * @param copySource - * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. - * @param metadata - * {@link Metadata} - * @param sourceModifiedAccessConditions - * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the - * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob - * was changed relative to the given request. The request will fail if the specified condition is not - * satisfied. - * @param destAccessConditions - * {@link BlobAccessConditions} against the destination. - * - * @return - * A reactive response containing the copy ID for the long running operation. + * @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs. + * @param metadata {@link Metadata} + * @param sourceModifiedAccessConditions {@link ModifiedAccessConditions} against the source. Standard HTTP Access + * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions + * related to when the blob was changed relative to the given request. The request will fail if the specified + * condition is not satisfied. + * @param destAccessConditions {@link BlobAccessConditions} against the destination. + * @return A reactive response containing the copy ID for the long running operation. */ public Mono> copyFromURL(URL copySource, Metadata metadata, - ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { + ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { metadata = metadata == null ? new Metadata() : metadata; sourceModifiedAccessConditions = sourceModifiedAccessConditions == null ? new ModifiedAccessConditions() : sourceModifiedAccessConditions; @@ -308,30 +283,27 @@ public Mono> copyFromURL(URL copySource, Metadata metadata, } /** - * Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. + * Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or + * {@link AppendBlobClient}. * - * @return - * A reactive response containing the blob data. + * @return A reactive response containing the blob data. */ public Mono>> download() { return this.download(null, null, false, null); } /** - * Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. + * Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link + * PageBlobClient}, or {@link AppendBlobClient}. * - * @param range - * {@link BlobRange} - * @param accessConditions - * {@link BlobAccessConditions} - * @param rangeGetContentMD5 - * Whether the contentMD5 for the specified blob range should be returned. + * @param range {@link BlobRange} + * @param accessConditions {@link BlobAccessConditions} + * @param rangeGetContentMD5 Whether the contentMD5 for the specified blob range should be returned. * @param options {@link ReliableDownloadOptions} - * * @return A reactive response containing the blob data. */ public Mono>> download(BlobRange range, BlobAccessConditions accessConditions, - boolean rangeGetContentMD5, ReliableDownloadOptions options) { + boolean rangeGetContentMD5, ReliableDownloadOptions options) { return this.download(range, accessConditions, rangeGetContentMD5) .map(response -> new SimpleResponse<>( response.rawResponse(), @@ -345,18 +317,13 @@ public Mono>> download(BlobRange range, BlobAccessCond * Note that the response body has reliable download functionality built in, meaning that a failed download stream * will be automatically retried. This behavior may be configured with {@link ReliableDownloadOptions}. * - * @param range - * {@link BlobRange} - * @param accessConditions - * {@link BlobAccessConditions} - * @param rangeGetContentMD5 - * Whether the contentMD5 for the specified blob range should be returned. - * + * @param range {@link BlobRange} + * @param accessConditions {@link BlobAccessConditions} + * @param rangeGetContentMD5 Whether the contentMD5 for the specified blob range should be returned. * @return Emits the successful response. - * - * @apiNote ## Sample Code \n - * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=upload_download "Sample code for BlobAsyncClient.download")] \n - * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + * @apiNote ## Sample Code \n [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=upload_download + * "Sample code for BlobAsyncClient.download")] \n For more samples, please see the [Samples + * file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) */ Mono download(BlobRange range, BlobAccessConditions accessConditions, boolean rangeGetContentMD5) { range = range == null ? new BlobRange(0) : range; @@ -372,7 +339,7 @@ Mono download(BlobRange range, BlobAccessConditions acces return postProcessResponse(this.azureBlobStorage.blobs().downloadWithRestResponseAsync( null, null, snapshot, null, null, range.toHeaderValue(), getMD5, null, null, null, null, - accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), Context.NONE)) + accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), Context.NONE)) // Convert the autorest response to a DownloadAsyncResponse, which enable reliable download. .map(response -> { // If there wasn't an etag originally specified, lock on the one returned. @@ -388,7 +355,8 @@ Mono download(BlobRange range, BlobAccessConditions acces /** * Downloads the entire blob into a file specified by the path. The file will be created if it doesn't exist. - * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. + * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link + * AppendBlobClient}. *

    * This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra call, * use the other overload providing the {@link BlobRange} parameter. @@ -402,21 +370,17 @@ public Mono downloadToFile(String filePath) { /** * Downloads a range of bytes blob into a file specified by the path. The file will be created if it doesn't exist. - * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. + * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link + * AppendBlobClient}. *

    * This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra call, * provide the {@link BlobRange} parameter. * - * @param filePath - * A non-null {@link OutputStream} instance where the downloaded data will be written. - * @param range - * {@link BlobRange} - * @param blockSize - * the size of a chunk to download at a time, in bytes - * @param accessConditions - * {@link BlobAccessConditions} - * @param rangeGetContentMD5 - * Whether the contentMD5 for the specified blob range should be returned. + * @param filePath A non-null {@link OutputStream} instance where the downloaded data will be written. + * @param range {@link BlobRange} + * @param blockSize the size of a chunk to download at a time, in bytes + * @param accessConditions {@link BlobAccessConditions} + * @param rangeGetContentMD5 Whether the contentMD5 for the specified blob range should be returned. * @param options {@link ReliableDownloadOptions} * @return An empty response * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB. @@ -478,8 +442,7 @@ private List sliceBlobRange(BlobRange blobRange, Integer blockSize) { /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * - * @return - * A reactive response signalling completion. + * @return A reactive response signalling completion. */ public Mono delete() { return this.delete(null, null); @@ -488,18 +451,14 @@ public Mono delete() { /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * - * @param deleteBlobSnapshotOptions - * Specifies the behavior for deleting the snapshots on this blob. {@code Include} will delete the base blob - * and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being deleted, you must - * pass null. - * @param accessConditions - * {@link BlobAccessConditions} - * - * @return - * A reactive response signalling completion. + * @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include} + * will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being + * deleted, you must pass null. + * @param accessConditions {@link BlobAccessConditions} + * @return A reactive response signalling completion. */ public Mono delete(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, - BlobAccessConditions accessConditions) { + BlobAccessConditions accessConditions) { accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; return postProcessResponse(this.azureBlobStorage.blobs().deleteWithRestResponseAsync( @@ -512,8 +471,7 @@ public Mono delete(DeleteSnapshotsOptionType deleteBlobSnapshotOpt /** * Returns the blob's metadata and properties. * - * @return - * A reactive response containing the blob properties and metadata. + * @return A reactive response containing the blob properties and metadata. */ public Mono> getProperties() { return this.getProperties(null); @@ -522,11 +480,8 @@ public Mono> getProperties() { /** * Returns the blob's metadata and properties. * - * @param accessConditions - * {@link BlobAccessConditions} - * - * @return - * A reactive response containing the blob properties and metadata. + * @param accessConditions {@link BlobAccessConditions} + * @return A reactive response containing the blob properties and metadata. */ public Mono> getProperties(BlobAccessConditions accessConditions) { accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; @@ -539,34 +494,27 @@ public Mono> getProperties(BlobAccessConditions accessC } /** - * Changes a blob's HTTP header properties. if only one HTTP header is updated, the - * others will all be erased. In order to preserve existing values, they must be - * passed alongside the header being changed. For more information, see the + * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In + * order to preserve existing values, they must be passed alongside the header being changed. For more information, + * see the * Azure Docs. * - * @param headers - * {@link BlobHTTPHeaders} - * - * @return - * A reactive response signalling completion. + * @param headers {@link BlobHTTPHeaders} + * @return A reactive response signalling completion. */ public Mono setHTTPHeaders(BlobHTTPHeaders headers) { return this.setHTTPHeaders(headers, null); } /** - * Changes a blob's HTTP header properties. if only one HTTP header is updated, the - * others will all be erased. In order to preserve existing values, they must be - * passed alongside the header being changed. For more information, see the + * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In + * order to preserve existing values, they must be passed alongside the header being changed. For more information, + * see the * Azure Docs. * - * @param headers - * {@link BlobHTTPHeaders} - * @param accessConditions - * {@link BlobAccessConditions} - * - * @return - * A reactive response signalling completion. + * @param headers {@link BlobHTTPHeaders} + * @param accessConditions {@link BlobAccessConditions} + * @return A reactive response signalling completion. */ public Mono setHTTPHeaders(BlobHTTPHeaders headers, BlobAccessConditions accessConditions) { accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; @@ -578,32 +526,25 @@ public Mono setHTTPHeaders(BlobHTTPHeaders headers, BlobAccessCond } /** - * Changes a blob's metadata. The specified metadata in this method will replace existing - * metadata. If old values must be preserved, they must be downloaded and included in the - * call to this method. For more information, see the Azure Docs. - * - * @param metadata - * {@link Metadata} + * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values + * must be preserved, they must be downloaded and included in the call to this method. For more information, see the + * Azure Docs. * - * @return - * A reactive response signalling completion. + * @param metadata {@link Metadata} + * @return A reactive response signalling completion. */ public Mono setMetadata(Metadata metadata) { return this.setMetadata(metadata, null); } /** - * Changes a blob's metadata. The specified metadata in this method will replace existing - * metadata. If old values must be preserved, they must be downloaded and included in the - * call to this method. For more information, see the Azure Docs. - * - * @param metadata - * {@link Metadata} - * @param accessConditions - * {@link BlobAccessConditions} + * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values + * must be preserved, they must be downloaded and included in the call to this method. For more information, see the + * Azure Docs. * - * @return - * A reactive response signalling completion. + * @param metadata {@link Metadata} + * @param accessConditions {@link BlobAccessConditions} + * @return A reactive response signalling completion. */ public Mono setMetadata(Metadata metadata, BlobAccessConditions accessConditions) { metadata = metadata == null ? new Metadata() : metadata; @@ -619,8 +560,7 @@ public Mono setMetadata(Metadata metadata, BlobAccessConditions ac /** * Creates a read-only snapshot of a blob. * - * @return - * A reactive response containing the ID of the new snapshot. + * @return A reactive response containing the ID of the new snapshot. */ public Mono> createSnapshot() { return this.createSnapshot(null, null); @@ -629,13 +569,9 @@ public Mono> createSnapshot() { /** * Creates a read-only snapshot of a blob. * - * @param metadata - * {@link Metadata} - * @param accessConditions - * {@link BlobAccessConditions} - * - * @return - * A reactive response containing the ID of the new snapshot. + * @param metadata {@link Metadata} + * @param accessConditions {@link BlobAccessConditions} + * @return A reactive response containing the ID of the new snapshot. */ public Mono> createSnapshot(Metadata metadata, BlobAccessConditions accessConditions) { metadata = metadata == null ? new Metadata() : metadata; @@ -651,13 +587,11 @@ public Mono> createSnapshot(Metadata metadata, BlobAccessCondit /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of - * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. - * - * @param tier - * The new tier for the blob. + * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's + * etag. * - * @return - * A reactive response signalling completion. + * @param tier The new tier for the blob. + * @return A reactive response signalling completion. */ public Mono setTier(AccessTier tier) { return this.setTier(tier, null); @@ -666,16 +600,13 @@ public Mono setTier(AccessTier tier) { /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of - * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. - * - * @param tier - * The new tier for the blob. - * @param leaseAccessConditions - * By setting lease access conditions, requests will fail if the provided lease does not match the active - * lease on the blob. + * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's + * etag. * - * @return - * A reactive response signalling completion. + * @param tier The new tier for the blob. + * @param leaseAccessConditions By setting lease access conditions, requests will fail if the provided lease does + * not match the active lease on the blob. + * @return A reactive response signalling completion. */ public Mono setTier(AccessTier tier, LeaseAccessConditions leaseAccessConditions) { Utility.assertNotNull("tier", tier); @@ -688,8 +619,7 @@ public Mono setTier(AccessTier tier, LeaseAccessConditions leaseAc /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * - * @return - * A reactive response signalling completion. + * @return A reactive response signalling completion. */ public Mono undelete() { return postProcessResponse(this.azureBlobStorage.blobs().undeleteWithRestResponseAsync(null, @@ -701,14 +631,10 @@ public Mono undelete() { * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * - * @param proposedId - * A {@code String} in any valid GUID format. May be null. - * @param duration - * The duration of the lease, in seconds, or negative one (-1) for a lease that - * never expires. A non-infinite lease can be between 15 and 60 seconds. - * - * @return - * A reactive response containing the lease ID. + * @param proposedId A {@code String} in any valid GUID format. May be null. + * @param duration The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A + * non-infinite lease can be between 15 and 60 seconds. + * @return A reactive response containing the lease ID. */ public Mono> acquireLease(String proposedId, int duration) { return this.acquireLease(proposedId, duration, null); @@ -718,18 +644,13 @@ public Mono> acquireLease(String proposedId, int duration) { * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * - * @param proposedID - * A {@code String} in any valid GUID format. May be null. - * @param duration - * The duration of the lease, in seconds, or negative one (-1) for a lease that - * never expires. A non-infinite lease can be between 15 and 60 seconds. - * @param modifiedAccessConditions - * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used - * to construct conditions related to when the blob was changed relative to the given request. The request - * will fail if the specified condition is not satisfied. - * - * @return - * A reactive response containing the lease ID. + * @param proposedID A {@code String} in any valid GUID format. May be null. + * @param duration The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A + * non-infinite lease can be between 15 and 60 seconds. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @return A reactive response containing the lease ID. * @throws IllegalArgumentException If {@code duration} is outside the bounds of 15 to 60 or isn't -1. */ public Mono> acquireLease(String proposedID, int duration, ModifiedAccessConditions modifiedAccessConditions) { @@ -748,11 +669,8 @@ public Mono> acquireLease(String proposedID, int duration, Modi /** * Renews the blob's previously-acquired lease. * - * @param leaseID - * The leaseId of the active lease on the blob. - * - * @return - * A reactive response containing the renewed lease ID. + * @param leaseID The leaseId of the active lease on the blob. + * @return A reactive response containing the renewed lease ID. */ public Mono> renewLease(String leaseID) { return this.renewLease(leaseID, null); @@ -761,15 +679,11 @@ public Mono> renewLease(String leaseID) { /** * Renews the blob's previously-acquired lease. * - * @param leaseID - * The leaseId of the active lease on the blob. - * @param modifiedAccessConditions - * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used - * to construct conditions related to when the blob was changed relative to the given request. The request - * will fail if the specified condition is not satisfied. - * - * @return - * A reactive response containing the renewed lease ID. + * @param leaseID The leaseId of the active lease on the blob. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @return A reactive response containing the renewed lease ID. */ public Mono> renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return postProcessResponse(this.azureBlobStorage.blobs().renewLeaseWithRestResponseAsync(null, @@ -780,11 +694,8 @@ public Mono> renewLease(String leaseID, ModifiedAccessCondition /** * Releases the blob's previously-acquired lease. * - * @param leaseID - * The leaseId of the active lease on the blob. - * - * @return - * A reactive response signalling completion. + * @param leaseID The leaseId of the active lease on the blob. + * @return A reactive response signalling completion. */ public Mono releaseLease(String leaseID) { return this.releaseLease(leaseID, null); @@ -793,15 +704,11 @@ public Mono releaseLease(String leaseID) { /** * Releases the blob's previously-acquired lease. * - * @param leaseID - * The leaseId of the active lease on the blob. - * @param modifiedAccessConditions - * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used - * to construct conditions related to when the blob was changed relative to the given request. The request - * will fail if the specified condition is not satisfied. - * - * @return - * A reactive response signalling completion. + * @param leaseID The leaseId of the active lease on the blob. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @return A reactive response signalling completion. */ public Mono releaseLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return postProcessResponse(this.azureBlobStorage.blobs().releaseLeaseWithRestResponseAsync(null, @@ -813,8 +720,7 @@ public Mono releaseLease(String leaseID, ModifiedAccessConditions * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * - * @return - * A reactive response containing the remaining time in the broken lease in seconds. + * @return A reactive response containing the remaining time in the broken lease in seconds. */ public Mono> breakLease() { return this.breakLease(null, null); @@ -824,19 +730,15 @@ public Mono> breakLease() { * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * - * @param breakPeriodInSeconds - * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue - * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the - * time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be - * available before the break period has expired, but the lease may be held for longer than the break - * period. - * @param modifiedAccessConditions - * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used - * to construct conditions related to when the blob was changed relative to the given request. The request - * will fail if the specified condition is not satisfied. - * - * @return - * A reactive response containing the remaining time in the broken lease in seconds. + * @param breakPeriodInSeconds An optional {@code Integer} representing the proposed duration of seconds that the + * lease should continue before it is broken, between 0 and 60 seconds. This break period is only used if it is + * shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease + * will not be available before the break period has expired, but the lease may be held for longer than the break + * period. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @return A reactive response containing the remaining time in the broken lease in seconds. */ public Mono> breakLease(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions) { return postProcessResponse(this.azureBlobStorage.blobs().breakLeaseWithRestResponseAsync(null, @@ -847,30 +749,23 @@ public Mono> breakLease(Integer breakPeriodInSeconds, Modified /** * ChangeLease changes the blob's lease ID. * - * @param leaseId - * The leaseId of the active lease on the blob. - * @param proposedID - * A {@code String} in any valid GUID format. - * - * @return - * A reactive response containing the new lease ID. + * @param leaseId The leaseId of the active lease on the blob. + * @param proposedID A {@code String} in any valid GUID format. + * @return A reactive response containing the new lease ID. */ public Mono> changeLease(String leaseId, String proposedID) { return this.changeLease(leaseId, proposedID, null); } /** - * ChangeLease changes the blob's lease ID. For more information, see the Azure Docs. - * - * @param leaseId - * The leaseId of the active lease on the blob. - * @param proposedID - * A {@code String} in any valid GUID format. - * @param modifiedAccessConditions - * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used - * to construct conditions related to when the blob was changed relative to the given request. The request - * will fail if the specified condition is not satisfied. + * ChangeLease changes the blob's lease ID. For more information, see the Azure + * Docs. * + * @param leaseId The leaseId of the active lease on the blob. + * @param proposedID A {@code String} in any valid GUID format. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the new lease ID. */ public Mono> changeLease(String leaseId, String proposedID, ModifiedAccessConditions modifiedAccessConditions) { @@ -880,7 +775,8 @@ public Mono> changeLease(String leaseId, String proposedID, Mod } /** - * Returns the sku name and account kind for the account. For more information, please see the Azure Docs. + * Returns the sku name and account kind for the account. For more information, please see the Azure Docs. * * @return a reactor response containing the sku name and account kind. */ @@ -890,4 +786,197 @@ public Mono> getAccountInfo() { this.azureBlobStorage.blobs().getAccountInfoWithRestResponseAsync(null, null, Context.NONE)) .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } + + /** + * Generates a user delegation SAS with the specified parameters + * + * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS + * @param accountName The {@code String} account name for the SAS + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @return A string that represents the SAS token + */ + public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, + BlobSASPermission permissions, OffsetDateTime expiryTime) { + return this.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, null /* + startTime */, null /* version */, null /*sasProtocol */, null /* ipRange */, null /* cacheControl */, null + /*contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /* contentType */); + } + + /** + * Generates a user delegation SAS token with the specified parameters + * + * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS + * @param accountName The {@code String} account name for the SAS + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @param startTime An optional {@code OffsetDateTime} start time for the SAS + * @param version An optional {@code String} version for the SAS + * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS + * @param ipRange An optional {@code IPRange} ip address range for the SAS + * @return A string that represents the SAS token + */ + public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, + BlobSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, + SASProtocol sasProtocol, IPRange ipRange) { + return this.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, startTime, + version, sasProtocol, ipRange, null /* cacheControl */, null /* contentDisposition */, null /* + contentEncoding */, null /* contentLanguage */, null /* contentType */); + } + + /** + * Generates a user delegation SAS token with the specified parameters + * + * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS + * @param accountName The {@code String} account name for the SAS + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @param startTime An optional {@code OffsetDateTime} start time for the SAS + * @param version An optional {@code String} version for the SAS + * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS + * @param ipRange An optional {@code IPRange} ip address range for the SAS + * @param cacheControl An optional {@code String} cache-control header for the SAS. + * @param contentDisposition An optional {@code String} content-disposition header for the SAS. + * @param contentEncoding An optional {@code String} content-encoding header for the SAS. + * @param contentLanguage An optional {@code String} content-language header for the SAS. + * @param contentType An optional {@code String} content-type header for the SAS. + * @return A string that represents the SAS token + */ + public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, + BlobSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, + SASProtocol sasProtocol, IPRange ipRange, String cacheControl, String contentDisposition, + String contentEncoding, String contentLanguage, String contentType) { + + ServiceSASSignatureValues serviceSASSignatureValues = new ServiceSASSignatureValues(version, sasProtocol, + startTime, expiryTime, permissions == null ? null : permissions.toString(), ipRange, null /* identifier*/, + cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); + + ServiceSASSignatureValues values = configureServiceSASSignatureValues(serviceSASSignatureValues, accountName); + + SASQueryParameters sasQueryParameters = values.generateSASQueryParameters(userDelegationKey); + + return sasQueryParameters.encode(); + } + + /** + * Generates a SAS token with the specified parameters + * + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @return A string that represents the SAS token + */ + public String generateSAS(BlobSASPermission permissions, OffsetDateTime expiryTime) { + return this.generateSAS(null, permissions, expiryTime, null /* startTime */, /* identifier */ null /* + version */, null /* sasProtocol */, null /* ipRange */, null /* cacheControl */, null /* contentLanguage*/, + null /* contentEncoding */, null /* contentLanguage */, null /* contentType */); + } + + /** + * Generates a SAS token with the specified parameters + * + * @param identifier The {@code String} name of the access policy on the container this SAS references if any + * @return A string that represents the SAS token + */ + public String generateSAS(String identifier) { + return this.generateSAS(identifier, null /* permissions */, null /* expiryTime */, null /* startTime */, + null /* version */, null /* sasProtocol */, null /* ipRange */, null /* cacheControl */, null /* + contentLanguage*/, null /* contentEncoding */, null /* contentLanguage */, null /* contentType */); + } + + /** + * Generates a SAS token with the specified parameters + * + * @param identifier The {@code String} name of the access policy on the container this SAS references if any + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @param startTime An optional {@code OffsetDateTime} start time for the SAS + * @param version An optional {@code String} version for the SAS + * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS + * @param ipRange An optional {@code IPRange} ip address range for the SAS + * @return A string that represents the SAS token + */ + public String generateSAS(String identifier, BlobSASPermission permissions, OffsetDateTime expiryTime, + OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange) { + return this.generateSAS(identifier, permissions, expiryTime, startTime, version, sasProtocol, ipRange, null + /* cacheControl */, null /* contentLanguage*/, null /* contentEncoding */, null /* contentLanguage */, + null /* contentType */); + } + + /** + * Generates a SAS token with the specified parameters + * + * @param identifier The {@code String} name of the access policy on the container this SAS references if any + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @param startTime An optional {@code OffsetDateTime} start time for the SAS + * @param version An optional {@code String} version for the SAS + * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS + * @param ipRange An optional {@code IPRange} ip address range for the SAS + * @param cacheControl An optional {@code String} cache-control header for the SAS. + * @param contentDisposition An optional {@code String} content-disposition header for the SAS. + * @param contentEncoding An optional {@code String} content-encoding header for the SAS. + * @param contentLanguage An optional {@code String} content-language header for the SAS. + * @param contentType An optional {@code String} content-type header for the SAS. + * @return A string that represents the SAS token + */ + public String generateSAS(String identifier, BlobSASPermission permissions, OffsetDateTime expiryTime, + OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange, String cacheControl, + String contentDisposition, String contentEncoding, String contentLanguage, String contentType) { + + ServiceSASSignatureValues serviceSASSignatureValues = new ServiceSASSignatureValues(version, sasProtocol, + startTime, expiryTime, permissions == null ? null : permissions.toString(), ipRange, identifier, + cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); + + SharedKeyCredential sharedKeyCredential = + Utility.getSharedKeyCredential(this.azureBlobStorage.httpPipeline()); + + Utility.assertNotNull("sharedKeyCredential", sharedKeyCredential); + + ServiceSASSignatureValues values = configureServiceSASSignatureValues(serviceSASSignatureValues, + sharedKeyCredential.accountName()); + + SASQueryParameters sasQueryParameters = values.generateSASQueryParameters(sharedKeyCredential); + + return sasQueryParameters.encode(); + } + + /** + * Sets serviceSASSignatureValues parameters dependent on the current blob type + */ + ServiceSASSignatureValues configureServiceSASSignatureValues(ServiceSASSignatureValues serviceSASSignatureValues, + String accountName) { + + // Set canonical name + serviceSASSignatureValues.canonicalName(this.azureBlobStorage.url(), accountName); + + // Set snapshotId + serviceSASSignatureValues.snapshotId(getSnapshotId()); + + // Set resource + if (isSnapshot()) { + serviceSASSignatureValues.resource(Constants.UrlConstants.SAS_BLOB_SNAPSHOT_CONSTANT); + } else { + serviceSASSignatureValues.resource(Constants.UrlConstants.SAS_BLOB_CONSTANT); + } + + return serviceSASSignatureValues; + } + + /** + * Gets the snapshotId for a blob resource + * + * @return A string that represents the snapshotId of the snapshot blob + */ + public String getSnapshotId() { + return this.snapshot; + } + + /** + * Determines if a blob is a snapshot + * + * @return A boolean that indicates if a blob is a snapshot + */ + public boolean isSnapshot() { + return this.snapshot != null; + } } diff --git a/storage/client/blob/src/main/java/com/azure/storage/blob/BlobClient.java b/storage/client/blob/src/main/java/com/azure/storage/blob/BlobClient.java index 106b84729a39d..a937b8ca47613 100644 --- a/storage/client/blob/src/main/java/com/azure/storage/blob/BlobClient.java +++ b/storage/client/blob/src/main/java/com/azure/storage/blob/BlobClient.java @@ -16,6 +16,7 @@ import com.azure.storage.blob.models.ModifiedAccessConditions; import com.azure.storage.blob.models.ReliableDownloadOptions; import com.azure.storage.blob.models.StorageAccountInfo; +import com.azure.storage.blob.models.UserDelegationKey; import reactor.core.publisher.Mono; import java.io.IOException; @@ -23,31 +24,33 @@ import java.io.UncheckedIOException; import java.net.URL; import java.time.Duration; +import java.time.OffsetDateTime; /** - * Client to a blob of any type: block, append, or page. It may only be instantiated through a {@link BlobClientBuilder} or via - * the method {@link ContainerClient#getBlobClient(String)}. This class does not hold any state about a particular - * blob, but is instead a convenient way of sending appropriate requests to the resource on the service. + * Client to a blob of any type: block, append, or page. It may only be instantiated through a {@link BlobClientBuilder} + * or via the method {@link ContainerClient#getBlobClient(String)}. This class does not hold any state about a + * particular blob, but is instead a convenient way of sending appropriate requests to the resource on the service. * *

    * This client offers the ability to download blobs. Note that uploading data is specific to each type of blob. Please * refer to the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient} for upload options. This - * client can be converted into one of these clients easily through the methods {@link #asBlockBlobClient}, {@link #asPageBlobClient}, - * and {@link #asAppendBlobClient}. + * client can be converted into one of these clients easily through the methods {@link #asBlockBlobClient}, {@link + * #asPageBlobClient}, and {@link #asAppendBlobClient}. * *

    - * This client contains operations on a blob. Operations on a container are available on {@link ContainerClient}, - * and operations on the service are available on {@link StorageClient}. + * This client contains operations on a blob. Operations on a container are available on {@link ContainerClient}, and + * operations on the service are available on {@link StorageClient}. * *

    - * Please refer to the Azure Docs - * for more information. + * Please refer to the Azure + * Docs for more information. */ public class BlobClient { private final BlobAsyncClient blobAsyncClient; /** * Package-private constructor for use by {@link BlobClientBuilder}. + * * @param blobAsyncClient the async blob client */ BlobClient(BlobAsyncClient blobAsyncClient) { @@ -55,45 +58,41 @@ public class BlobClient { } /** - * Creates a new {@link BlockBlobClient} to this resource, maintaining configurations. Only do this for blobs - * that are known to be block blobs. + * Creates a new {@link BlockBlobClient} to this resource, maintaining configurations. Only do this for blobs that + * are known to be block blobs. * - * @return - * A {@link BlockBlobClient} to this resource. + * @return A {@link BlockBlobClient} to this resource. */ public BlockBlobClient asBlockBlobClient() { return new BlockBlobClient(blobAsyncClient.asBlockBlobAsyncClient()); } /** - * Creates a new {@link AppendBlobClient} to this resource, maintaining configurations. Only do this for blobs - * that are known to be append blobs. + * Creates a new {@link AppendBlobClient} to this resource, maintaining configurations. Only do this for blobs that + * are known to be append blobs. * - * @return - * A {@link AppendBlobClient} to this resource. + * @return A {@link AppendBlobClient} to this resource. */ public AppendBlobClient asAppendBlobClient() { return new AppendBlobClient(blobAsyncClient.asAppendBlobAsyncClient()); } /** - * Creates a new {@link PageBlobClient} to this resource, maintaining configurations. Only do this for blobs - * that are known to be page blobs. + * Creates a new {@link PageBlobClient} to this resource, maintaining configurations. Only do this for blobs that + * are known to be page blobs. * - * @return - * A {@link PageBlobClient} to this resource. + * @return A {@link PageBlobClient} to this resource. */ public PageBlobClient asPageBlobClient() { return new PageBlobClient(blobAsyncClient.asPageBlobAsyncClient()); } /** - * Initializes a {@link ContainerClient} object pointing to the container this blob is in. This method does - * not create a container. It simply constructs the URL to the container and offers access to methods relevant to + * Initializes a {@link ContainerClient} object pointing to the container this blob is in. This method does not + * create a container. It simply constructs the URL to the container and offers access to methods relevant to * containers. * - * @return - * A {@link ContainerClient} object pointing to the container containing the blob + * @return A {@link ContainerClient} object pointing to the container containing the blob */ public ContainerClient getContainerClient() { return new ContainerClient(blobAsyncClient.getContainerAsyncClient()); @@ -101,6 +100,7 @@ public ContainerClient getContainerClient() { /** * Gets the URL of the blob represented by this client. + * * @return the URL. */ public URL getBlobUrl() { @@ -112,9 +112,7 @@ public URL getBlobUrl() { *

    * * @return An InputStream object that represents the stream to use for reading from the blob. - * - * @throws StorageException - * If a storage service error occurred. + * @throws StorageException If a storage service error occurred. */ public final BlobInputStream openInputStream() { return openInputStream(new BlobRange(0), null); @@ -124,15 +122,11 @@ public final BlobInputStream openInputStream() { * Opens a blob input stream to download the specified range of the blob. *

    * - * @param range - * {@link BlobRange} - * @param accessConditions - * An {@link BlobAccessConditions} object that represents the access conditions for the blob. - * + * @param range {@link BlobRange} + * @param accessConditions An {@link BlobAccessConditions} object that represents the access conditions for the + * blob. * @return An InputStream object that represents the stream to use for reading from the blob. - * - * @throws StorageException - * If a storage service error occurred. + * @throws StorageException If a storage service error occurred. */ public final BlobInputStream openInputStream(BlobRange range, BlobAccessConditions accessConditions) { return new BlobInputStream(blobAsyncClient, range.offset(), range.count(), accessConditions); @@ -150,10 +144,8 @@ public Response exists() { /** * Gets if the container this client represents exists in the cloud. * - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * @return - * true if the container exists, false if it doesn't + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return true if the container exists, false if it doesn't */ public Response exists(Duration timeout) { Mono> response = blobAsyncClient.exists(); @@ -165,11 +157,8 @@ public Response exists(Duration timeout) { * Copies the data at the source URL to a blob. For more information, see the * Azure Docs * - * @param sourceURL - * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. - * - * @return - * The copy ID for the long running operation. + * @param sourceURL The source URL to copy from. URLs outside of Azure may only be copied to block blobs. + * @return The copy ID for the long running operation. */ public Response startCopyFromURL(URL sourceURL) { return this.startCopyFromURL(sourceURL, null, null, null, null); @@ -179,26 +168,19 @@ public Response startCopyFromURL(URL sourceURL) { * Copies the data at the source URL to a blob. For more information, see the * Azure Docs * - * @param sourceURL - * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. - * @param metadata - * {@link Metadata} - * @param sourceModifiedAccessConditions - * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the - * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob - * was changed relative to the given request. The request will fail if the specified condition is not - * satisfied. - * @param destAccessConditions - * {@link BlobAccessConditions} against the destination. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * - * @return - * The copy ID for the long running operation. + * @param sourceURL The source URL to copy from. URLs outside of Azure may only be copied to block blobs. + * @param metadata {@link Metadata} + * @param sourceModifiedAccessConditions {@link ModifiedAccessConditions} against the source. Standard HTTP Access + * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions + * related to when the blob was changed relative to the given request. The request will fail if the specified + * condition is not satisfied. + * @param destAccessConditions {@link BlobAccessConditions} against the destination. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The copy ID for the long running operation. */ public Response startCopyFromURL(URL sourceURL, Metadata metadata, - ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions, - Duration timeout) { + ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions, + Duration timeout) { Mono> response = blobAsyncClient .startCopyFromURL(sourceURL, metadata, sourceModifiedAccessConditions, destAccessConditions); @@ -208,9 +190,8 @@ public Response startCopyFromURL(URL sourceURL, Metadata metadata, /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * - * @param copyId - * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link - * BlobStartCopyFromURLHeaders} object. + * @param copyId The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link + * BlobStartCopyFromURLHeaders} object. * @return A response containing status code and HTTP headers. */ public VoidResponse abortCopyFromURL(String copyId) { @@ -220,14 +201,11 @@ public VoidResponse abortCopyFromURL(String copyId) { /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * - * @param copyId - * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link - * BlobStartCopyFromURLHeaders} object. - * @param leaseAccessConditions - * By setting lease access conditions, requests will fail if the provided lease does not match the active - * lease on the blob. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param copyId The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link + * BlobStartCopyFromURLHeaders} object. + * @param leaseAccessConditions By setting lease access conditions, requests will fail if the provided lease does + * not match the active lease on the blob. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @return A response containing status code and HTTP headers. */ public VoidResponse abortCopyFromURL(String copyId, LeaseAccessConditions leaseAccessConditions, Duration timeout) { @@ -240,11 +218,8 @@ public VoidResponse abortCopyFromURL(String copyId, LeaseAccessConditions leaseA /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * - * @param copySource - * The source URL to copy from. - * - * @return - * The copy ID for the long running operation. + * @param copySource The source URL to copy from. + * @return The copy ID for the long running operation. */ public Response copyFromURL(URL copySource) { return this.copyFromURL(copySource, null, null, null, null); @@ -253,26 +228,19 @@ public Response copyFromURL(URL copySource) { /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * - * @param copySource - * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. - * @param metadata - * {@link Metadata} - * @param sourceModifiedAccessConditions - * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the - * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob - * was changed relative to the given request. The request will fail if the specified condition is not - * satisfied. - * @param destAccessConditions - * {@link BlobAccessConditions} against the destination. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * - * @return - * The copy ID for the long running operation. + * @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs. + * @param metadata {@link Metadata} + * @param sourceModifiedAccessConditions {@link ModifiedAccessConditions} against the source. Standard HTTP Access + * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions + * related to when the blob was changed relative to the given request. The request will fail if the specified + * condition is not satisfied. + * @param destAccessConditions {@link BlobAccessConditions} against the destination. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The copy ID for the long running operation. */ public Response copyFromURL(URL copySource, Metadata metadata, - ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions, - Duration timeout) { + ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions, + Duration timeout) { Mono> response = blobAsyncClient .copyFromURL(copySource, metadata, sourceModifiedAccessConditions, destAccessConditions); @@ -283,8 +251,7 @@ public Response copyFromURL(URL copySource, Metadata metadata, * Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient}, * {@link PageBlobClient}, or {@link AppendBlobClient}. * - * @param stream - * A non-null {@link OutputStream} instance where the downloaded data will be written. + * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @return A response containing status code and HTTP headers. * @throws UncheckedIOException If an I/O error occurs. */ @@ -293,23 +260,17 @@ public VoidResponse download(OutputStream stream) { } /** - * Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link BlockBlobClient}, - * {@link PageBlobClient}, or {@link AppendBlobClient}. + * Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link + * BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * - * @param stream - * A non-null {@link OutputStream} instance where the downloaded data will be written. + * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @param options {@link ReliableDownloadOptions} - * @param range - * {@link BlobRange} - * @param accessConditions - * {@link BlobAccessConditions} - * @param rangeGetContentMD5 - * Whether the contentMD5 for the specified blob range should be returned. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param range {@link BlobRange} + * @param accessConditions {@link BlobAccessConditions} + * @param rangeGetContentMD5 Whether the contentMD5 for the specified blob range should be returned. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @return A response containing status code and HTTP headers. * @throws UncheckedIOException If an I/O error occurs. - * */ public VoidResponse download(OutputStream stream, ReliableDownloadOptions options, BlobRange range, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, Duration timeout) { @@ -331,10 +292,10 @@ public VoidResponse download(OutputStream stream, ReliableDownloadOptions option /** * Downloads the entire blob into a file specified by the path. The file will be created if it doesn't exist. - * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. + * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link + * AppendBlobClient}. * - * @param filePath - * A non-null {@link OutputStream} instance where the downloaded data will be written. + * @param filePath A non-null {@link OutputStream} instance where the downloaded data will be written. * @throws IOException If an I/O error occurs */ public void downloadToFile(String filePath) throws IOException { @@ -343,25 +304,20 @@ public void downloadToFile(String filePath) throws IOException { /** * Downloads a range of bytes blob into a file specified by the path. The file will be created if it doesn't exist. - * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. + * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link + * AppendBlobClient}. * - * @param filePath - * A non-null {@link OutputStream} instance where the downloaded data will be written. + * @param filePath A non-null {@link OutputStream} instance where the downloaded data will be written. * @param options {@link ReliableDownloadOptions} - * @param range - * {@link BlobRange} - * @param blockSize - * the size of a chunk to download at a time, in bytes - * @param accessConditions - * {@link BlobAccessConditions} - * @param rangeGetContentMD5 - * Whether the contentMD5 for the specified blob range should be returned. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param range {@link BlobRange} + * @param blockSize the size of a chunk to download at a time, in bytes + * @param accessConditions {@link BlobAccessConditions} + * @param rangeGetContentMD5 Whether the contentMD5 for the specified blob range should be returned. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @throws IOException If an I/O error occurs */ public void downloadToFile(String filePath, ReliableDownloadOptions options, BlobRange range, Integer blockSize, - BlobAccessConditions accessConditions, boolean rangeGetContentMD5, Duration timeout) throws IOException { + BlobAccessConditions accessConditions, boolean rangeGetContentMD5, Duration timeout) throws IOException { Mono download = blobAsyncClient.downloadToFile(filePath, range, blockSize, accessConditions, rangeGetContentMD5, options); try { @@ -383,19 +339,15 @@ public VoidResponse delete() { /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * - * @param deleteBlobSnapshotOptions - * Specifies the behavior for deleting the snapshots on this blob. {@code Include} will delete the base blob - * and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being deleted, you must - * pass null. - * @param accessConditions - * {@link BlobAccessConditions} - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * + * @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include} + * will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being + * deleted, you must pass null. + * @param accessConditions {@link BlobAccessConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @return A response containing status code and HTTP headers. */ public VoidResponse delete(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, - BlobAccessConditions accessConditions, Duration timeout) { + BlobAccessConditions accessConditions, Duration timeout) { Mono response = blobAsyncClient .delete(deleteBlobSnapshotOptions, accessConditions); @@ -405,8 +357,7 @@ public VoidResponse delete(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, /** * Returns the blob's metadata and properties. * - * @return - * The blob properties and metadata. + * @return The blob properties and metadata. */ public Response getProperties() { return this.getProperties(null, null); @@ -415,13 +366,9 @@ public Response getProperties() { /** * Returns the blob's metadata and properties. * - * @param accessConditions - * {@link BlobAccessConditions} - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * - * @return - * The blob properties and metadata. + * @param accessConditions {@link BlobAccessConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The blob properties and metadata. */ public Response getProperties(BlobAccessConditions accessConditions, Duration timeout) { Mono> response = blobAsyncClient @@ -431,13 +378,12 @@ public Response getProperties(BlobAccessConditions accessConditi } /** - * Changes a blob's HTTP header properties. if only one HTTP header is updated, the - * others will all be erased. In order to preserve existing values, they must be - * passed alongside the header being changed. For more information, see the + * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In + * order to preserve existing values, they must be passed alongside the header being changed. For more information, + * see the * Azure Docs. * - * @param headers - * {@link BlobHTTPHeaders} + * @param headers {@link BlobHTTPHeaders} * @return A response containing status code and HTTP headers. */ public VoidResponse setHTTPHeaders(BlobHTTPHeaders headers) { @@ -445,21 +391,18 @@ public VoidResponse setHTTPHeaders(BlobHTTPHeaders headers) { } /** - * Changes a blob's HTTP header properties. if only one HTTP header is updated, the - * others will all be erased. In order to preserve existing values, they must be - * passed alongside the header being changed. For more information, see the + * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In + * order to preserve existing values, they must be passed alongside the header being changed. For more information, + * see the * Azure Docs. * - * @param headers - * {@link BlobHTTPHeaders} - * @param accessConditions - * {@link BlobAccessConditions} - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param headers {@link BlobHTTPHeaders} + * @param accessConditions {@link BlobAccessConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @return A response containing status code and HTTP headers. */ public VoidResponse setHTTPHeaders(BlobHTTPHeaders headers, BlobAccessConditions accessConditions, - Duration timeout) { + Duration timeout) { Mono response = blobAsyncClient .setHTTPHeaders(headers, accessConditions); @@ -467,12 +410,11 @@ public VoidResponse setHTTPHeaders(BlobHTTPHeaders headers, BlobAccessConditions } /** - * Changes a blob's metadata. The specified metadata in this method will replace existing - * metadata. If old values must be preserved, they must be downloaded and included in the - * call to this method. For more information, see the Azure Docs. + * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values + * must be preserved, they must be downloaded and included in the call to this method. For more information, see the + * Azure Docs. * - * @param metadata - * {@link Metadata} + * @param metadata {@link Metadata} * @return A response containing status code and HTTP headers. */ public VoidResponse setMetadata(Metadata metadata) { @@ -480,16 +422,13 @@ public VoidResponse setMetadata(Metadata metadata) { } /** - * Changes a blob's metadata. The specified metadata in this method will replace existing - * metadata. If old values must be preserved, they must be downloaded and included in the - * call to this method. For more information, see the Azure Docs. + * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values + * must be preserved, they must be downloaded and included in the call to this method. For more information, see the + * Azure Docs. * - * @param metadata - * {@link Metadata} - * @param accessConditions - * {@link BlobAccessConditions} - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param metadata {@link Metadata} + * @param accessConditions {@link BlobAccessConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @return A response containing status code and HTTP headers. */ public VoidResponse setMetadata(Metadata metadata, BlobAccessConditions accessConditions, Duration timeout) { @@ -502,8 +441,7 @@ public VoidResponse setMetadata(Metadata metadata, BlobAccessConditions accessCo /** * Creates a read-only snapshot of a blob. * - * @return - * The ID of the new snapshot. + * @return The ID of the new snapshot. */ public Response createSnapshot() { return this.createSnapshot(null, null, null); @@ -512,15 +450,10 @@ public Response createSnapshot() { /** * Creates a read-only snapshot of a blob. * - * @param metadata - * {@link Metadata} - * @param accessConditions - * {@link BlobAccessConditions} - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * - * @return - * The ID of the new snapshot. + * @param metadata {@link Metadata} + * @param accessConditions {@link BlobAccessConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The ID of the new snapshot. */ public Response createSnapshot(Metadata metadata, BlobAccessConditions accessConditions, Duration timeout) { Mono> response = blobAsyncClient @@ -532,10 +465,10 @@ public Response createSnapshot(Metadata metadata, BlobAccessConditions a /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of - * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. + * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's + * etag. * - * @param tier - * The new tier for the blob. + * @param tier The new tier for the blob. * @return A response containing status code and HTTP headers. */ public VoidResponse setTier(AccessTier tier) { @@ -545,15 +478,13 @@ public VoidResponse setTier(AccessTier tier) { /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of - * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. - * - * @param tier - * The new tier for the blob. - * @param leaseAccessConditions - * By setting lease access conditions, requests will fail if the provided lease does not match the active - * lease on the blob. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's + * etag. + * + * @param tier The new tier for the blob. + * @param leaseAccessConditions By setting lease access conditions, requests will fail if the provided lease does + * not match the active lease on the blob. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @return A response containing status code and HTTP headers. */ public VoidResponse setTier(AccessTier tier, LeaseAccessConditions leaseAccessConditions, Duration timeout) { @@ -575,8 +506,7 @@ public VoidResponse undelete() { /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @return A response containing status code and HTTP headers. */ public VoidResponse undelete(Duration timeout) { @@ -590,14 +520,10 @@ public VoidResponse undelete(Duration timeout) { * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * - * @param proposedId - * A {@code String} in any valid GUID format. May be null. - * @param duration - * The duration of the lease, in seconds, or negative one (-1) for a lease that - * never expires. A non-infinite lease can be between 15 and 60 seconds. - * - * @return - * The lease ID. + * @param proposedId A {@code String} in any valid GUID format. May be null. + * @param duration The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A + * non-infinite lease can be between 15 and 60 seconds. + * @return The lease ID. */ public Response acquireLease(String proposedId, int duration) { return this.acquireLease(proposedId, duration, null, null); @@ -607,23 +533,17 @@ public Response acquireLease(String proposedId, int duration) { * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * - * @param proposedID - * A {@code String} in any valid GUID format. May be null. - * @param duration - * The duration of the lease, in seconds, or negative one (-1) for a lease that - * never expires. A non-infinite lease can be between 15 and 60 seconds. - * @param modifiedAccessConditions - * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used - * to construct conditions related to when the blob was changed relative to the given request. The request - * will fail if the specified condition is not satisfied. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * - * @return - * The lease ID. + * @param proposedID A {@code String} in any valid GUID format. May be null. + * @param duration The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A + * non-infinite lease can be between 15 and 60 seconds. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The lease ID. */ public Response acquireLease(String proposedID, int duration, - ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { + ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { Mono> response = blobAsyncClient .acquireLease(proposedID, duration, modifiedAccessConditions); @@ -633,11 +553,8 @@ public Response acquireLease(String proposedID, int duration, /** * Renews the blob's previously-acquired lease. * - * @param leaseID - * The leaseId of the active lease on the blob. - * - * @return - * The renewed lease ID. + * @param leaseID The leaseId of the active lease on the blob. + * @return The renewed lease ID. */ public Response renewLease(String leaseID) { return this.renewLease(leaseID, null, null); @@ -646,20 +563,15 @@ public Response renewLease(String leaseID) { /** * Renews the blob's previously-acquired lease. * - * @param leaseID - * The leaseId of the active lease on the blob. - * @param modifiedAccessConditions - * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used - * to construct conditions related to when the blob was changed relative to the given request. The request - * will fail if the specified condition is not satisfied. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * - * @return - * The renewed lease ID. + * @param leaseID The leaseId of the active lease on the blob. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The renewed lease ID. */ public Response renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions, - Duration timeout) { + Duration timeout) { Mono> response = blobAsyncClient .renewLease(leaseID, modifiedAccessConditions); @@ -669,8 +581,7 @@ public Response renewLease(String leaseID, ModifiedAccessConditions modi /** * Releases the blob's previously-acquired lease. * - * @param leaseID - * The leaseId of the active lease on the blob. + * @param leaseID The leaseId of the active lease on the blob. * @return A response containing status code and HTTP headers. */ public VoidResponse releaseLease(String leaseID) { @@ -680,18 +591,15 @@ public VoidResponse releaseLease(String leaseID) { /** * Releases the blob's previously-acquired lease. * - * @param leaseID - * The leaseId of the active lease on the blob. - * @param modifiedAccessConditions - * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used - * to construct conditions related to when the blob was changed relative to the given request. The request - * will fail if the specified condition is not satisfied. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param leaseID The leaseId of the active lease on the blob. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @return A response containing status code and HTTP headers. */ public VoidResponse releaseLease(String leaseID, - ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { + ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { Mono response = blobAsyncClient .releaseLease(leaseID, modifiedAccessConditions); @@ -702,8 +610,7 @@ public VoidResponse releaseLease(String leaseID, * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * - * @return - * The remaining time in the broken lease in seconds. + * @return The remaining time in the broken lease in seconds. */ public Response breakLease() { return this.breakLease(null, null, null); @@ -713,24 +620,19 @@ public Response breakLease() { * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * - * @param breakPeriodInSeconds - * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue - * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the - * time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be - * available before the break period has expired, but the lease may be held for longer than the break - * period. - * @param modifiedAccessConditions - * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used - * to construct conditions related to when the blob was changed relative to the given request. The request - * will fail if the specified condition is not satisfied. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * - * @return - * The remaining time in the broken lease in seconds. + * @param breakPeriodInSeconds An optional {@code Integer} representing the proposed duration of seconds that the + * lease should continue before it is broken, between 0 and 60 seconds. This break period is only used if it is + * shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease + * will not be available before the break period has expired, but the lease may be held for longer than the break + * period. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The remaining time in the broken lease in seconds. */ public Response breakLease(Integer breakPeriodInSeconds, - ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { + ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { Mono> response = blobAsyncClient .breakLease(breakPeriodInSeconds, modifiedAccessConditions); @@ -740,36 +642,28 @@ public Response breakLease(Integer breakPeriodInSeconds, /** * ChangeLease changes the blob's lease ID. * - * @param leaseId - * The leaseId of the active lease on the blob. - * @param proposedID - * A {@code String} in any valid GUID format. - * - * @return - * The new lease ID. + * @param leaseId The leaseId of the active lease on the blob. + * @param proposedID A {@code String} in any valid GUID format. + * @return The new lease ID. */ public Response changeLease(String leaseId, String proposedID) { return this.changeLease(leaseId, proposedID, null, null); } /** - * ChangeLease changes the blob's lease ID. For more information, see the Azure Docs. - * - * @param leaseId - * The leaseId of the active lease on the blob. - * @param proposedID - * A {@code String} in any valid GUID format. - * @param modifiedAccessConditions - * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used - * to construct conditions related to when the blob was changed relative to the given request. The request - * will fail if the specified condition is not satisfied. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * ChangeLease changes the blob's lease ID. For more information, see the Azure + * Docs. * + * @param leaseId The leaseId of the active lease on the blob. + * @param proposedID A {@code String} in any valid GUID format. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @return The new lease ID. */ public Response changeLease(String leaseId, String proposedID, - ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { + ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { Mono> response = blobAsyncClient .changeLease(leaseId, proposedID, modifiedAccessConditions); @@ -777,7 +671,8 @@ public Response changeLease(String leaseId, String proposedID, } /** - * Returns the sku name and account kind for the account. For more information, please see the Azure Docs. + * Returns the sku name and account kind for the account. For more information, please see the Azure Docs. * * @return The sku name and account kind. */ @@ -786,11 +681,10 @@ public Response getAccountInfo() { } /** - * Returns the sku name and account kind for the account. For more information, please see the Azure Docs. - * - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * Returns the sku name and account kind for the account. For more information, please see the Azure Docs. * + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @return The sku name and account kind. */ public Response getAccountInfo(Duration timeout) { @@ -799,4 +693,146 @@ public Response getAccountInfo(Duration timeout) { return Utility.blockWithOptionalTimeout(response, timeout); } + + /** + * Generates a user delegation SAS token with the specified parameters + * + * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS + * @param accountName The {@code String} account name for the SAS + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @return A string that represents the SAS token + */ + public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, + BlobSASPermission permissions, OffsetDateTime expiryTime) { + return this.blobAsyncClient.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime); + } + + /** + * Generates a user delegation SAS token with the specified parameters + * + * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS + * @param accountName The {@code String} account name for the SAS + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @param startTime An optional {@code OffsetDateTime} start time for the SAS + * @param version An optional {@code String} version for the SAS + * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS + * @param ipRange An optional {@code IPRange} ip address range for the SAS + * @return A string that represents the SAS token + */ + public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, + BlobSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, + SASProtocol sasProtocol, IPRange ipRange) { + return this.blobAsyncClient.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, + startTime, version, sasProtocol, ipRange); + } + + /** + * Generates a user delegation SAS token with the specified parameters + * + * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS + * @param accountName The {@code String} account name for the SAS + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @param startTime An optional {@code OffsetDateTime} start time for the SAS + * @param version An optional {@code String} version for the SAS + * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS + * @param ipRange An optional {@code IPRange} ip address range for the SAS + * @param cacheControl An optional {@code String} cache-control header for the SAS. + * @param contentDisposition An optional {@code String} content-disposition header for the SAS. + * @param contentEncoding An optional {@code String} content-encoding header for the SAS. + * @param contentLanguage An optional {@code String} content-language header for the SAS. + * @param contentType An optional {@code String} content-type header for the SAS. + * @return A string that represents the SAS token + */ + public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, + BlobSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, + SASProtocol sasProtocol, IPRange ipRange, String cacheControl, String contentDisposition, + String contentEncoding, String contentLanguage, String contentType) { + return this.blobAsyncClient.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, + startTime, version, sasProtocol, ipRange, cacheControl, contentDisposition, contentEncoding, + contentLanguage, contentType); + } + + /** + * Generates a SAS token with the specified parameters + * + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @return A string that represents the SAS token + */ + public String generateSAS(OffsetDateTime expiryTime, BlobSASPermission permissions) { + return this.blobAsyncClient.generateSAS(permissions, expiryTime); + } + + /** + * Generates a SAS token with the specified parameters + * + * @param identifier The {@code String} name of the access policy on the container this SAS references if any + * @return A string that represents the SAS token + */ + public String generateSAS(String identifier) { + return this.blobAsyncClient.generateSAS(identifier); + } + + /** + * Generates a SAS token with the specified parameters + * + * @param identifier The {@code String} name of the access policy on the container this SAS references if any + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @param startTime An optional {@code OffsetDateTime} start time for the SAS + * @param version An optional {@code String} version for the SAS + * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS + * @param ipRange An optional {@code IPRange} ip address range for the SAS + * @return A string that represents the SAS token + */ + public String generateSAS(String identifier, BlobSASPermission permissions, OffsetDateTime expiryTime, + OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange) { + return this.blobAsyncClient.generateSAS(identifier, permissions, expiryTime, startTime, version, sasProtocol, + ipRange); + } + + /** + * Generates a SAS token with the specified parameters + * + * @param identifier The {@code String} name of the access policy on the container this SAS references if any + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @param startTime An optional {@code OffsetDateTime} start time for the SAS + * @param version An optional {@code String} version for the SAS + * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS + * @param ipRange An optional {@code IPRange} ip address range for the SAS + * @param cacheControl An optional {@code String} cache-control header for the SAS. + * @param contentDisposition An optional {@code String} content-disposition header for the SAS. + * @param contentEncoding An optional {@code String} content-encoding header for the SAS. + * @param contentLanguage An optional {@code String} content-language header for the SAS. + * @param contentType An optional {@code String} content-type header for the SAS. + * @return A string that represents the SAS token + */ + public String generateSAS(String identifier, BlobSASPermission permissions, OffsetDateTime expiryTime, + OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange, String cacheControl, + String contentDisposition, String contentEncoding, String contentLanguage, String contentType) { + return this.blobAsyncClient.generateSAS(identifier, permissions, expiryTime, startTime, version, sasProtocol, + ipRange, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); + } + + /** + * Gets the snapshotId for a blob resource + * + * @return A string that represents the snapshotId of the snapshot blob + */ + public String getSnapshotId() { + return this.blobAsyncClient.getSnapshotId(); + } + + /** + * Determines if a blob is a snapshot + * + * @return A boolean that indicates if a blob is a snapshot + */ + public boolean isSnapshot() { + return this.blobAsyncClient.isSnapshot(); + } } diff --git a/storage/client/blob/src/main/java/com/azure/storage/blob/Constants.java b/storage/client/blob/src/main/java/com/azure/storage/blob/Constants.java index 653625b572c15..fa812df4b531b 100644 --- a/storage/client/blob/src/main/java/com/azure/storage/blob/Constants.java +++ b/storage/client/blob/src/main/java/com/azure/storage/blob/Constants.java @@ -288,6 +288,21 @@ static final class UrlConstants { */ public static final String SAS_SIGNED_KEY_VERSION = "skv"; + /** + * The SAS blob constant. + */ + public static final String SAS_BLOB_CONSTANT = "b"; + + /** + * The SAS blob snapshot constant. + */ + public static final String SAS_BLOB_SNAPSHOT_CONSTANT = "bs"; + + /** + * The SAS blob snapshot constant. + */ + public static final String SAS_CONTAINER_CONSTANT = "c"; + private UrlConstants() { // Private to prevent construction. } diff --git a/storage/client/blob/src/main/java/com/azure/storage/blob/ContainerAsyncClient.java b/storage/client/blob/src/main/java/com/azure/storage/blob/ContainerAsyncClient.java index b61d2b36f3c31..6d9e64f16a924 100644 --- a/storage/client/blob/src/main/java/com/azure/storage/blob/ContainerAsyncClient.java +++ b/storage/client/blob/src/main/java/com/azure/storage/blob/ContainerAsyncClient.java @@ -25,37 +25,39 @@ import com.azure.storage.blob.models.PublicAccessType; import com.azure.storage.blob.models.SignedIdentifier; import com.azure.storage.blob.models.StorageAccountInfo; +import com.azure.storage.blob.models.UserDelegationKey; +import com.azure.storage.common.credentials.SharedKeyCredential; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import java.net.MalformedURLException; import java.net.URL; import java.time.Duration; +import java.time.OffsetDateTime; import java.time.temporal.ChronoUnit; import java.util.List; import static com.azure.storage.blob.Utility.postProcessResponse; /** - * Client to a container. It may only be instantiated through a {@link ContainerClientBuilder} or via the method - * {@link StorageAsyncClient#getContainerAsyncClient(String)}. This class does not hold any - * state about a particular blob but is instead a convenient way of sending off appropriate requests to - * the resource on the service. It may also be used to construct URLs to blobs. + * Client to a container. It may only be instantiated through a {@link ContainerClientBuilder} or via the method {@link + * StorageAsyncClient#getContainerAsyncClient(String)}. This class does not hold any state about a particular blob but + * is instead a convenient way of sending off appropriate requests to the resource on the service. It may also be used + * to construct URLs to blobs. * *

    * This client contains operations on a container. Operations on a blob are available on {@link BlobAsyncClient} through * {@link #getBlobAsyncClient(String)}, and operations on the service are available on {@link StorageAsyncClient}. * *

    - * Please refer to the Azure Docs - * for more information on containers. + * Please refer to the Azure + * Docs for more information on containers. * *

    - * Note this client is an async client that returns reactive responses from Spring Reactor Core - * project (https://projectreactor.io/). Calling the methods in this client will NOT - * start the actual network operation, until {@code .subscribe()} is called on the reactive response. - * You can simply convert one of these responses to a {@link java.util.concurrent.CompletableFuture} - * object through {@link Mono#toFuture()}. + * Note this client is an async client that returns reactive responses from Spring Reactor Core project + * (https://projectreactor.io/). Calling the methods in this client will NOT start the actual network + * operation, until {@code .subscribe()} is called on the reactive response. You can simply convert one of these + * responses to a {@link java.util.concurrent.CompletableFuture} object through {@link Mono#toFuture()}. */ public final class ContainerAsyncClient { public static final String ROOT_CONTAINER_NAME = "$root"; @@ -68,6 +70,7 @@ public final class ContainerAsyncClient { /** * Package-private constructor for use by {@link ContainerClientBuilder}. + * * @param azureBlobStorageBuilder the API client builder for blob storage API */ ContainerAsyncClient(AzureBlobStorageBuilder azureBlobStorageBuilder) { @@ -76,15 +79,14 @@ public final class ContainerAsyncClient { /** * Creates a new {@link BlockBlobAsyncClient} object by concatenating the blobName to the end of - * ContainerAsyncClient's URL. The new BlockBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. - * To change the pipeline, create the BlockBlobAsyncClient and then call its WithPipeline method passing in the - * desired pipeline object. Or, call this package's NewBlockBlobAsyncClient instead of calling this object's - * NewBlockBlobAsyncClient method. - * - * @param blobName - * A {@code String} representing the name of the blob. - * - * @return A new {@link BlockBlobAsyncClient} object which references the blob with the specified name in this container. + * ContainerAsyncClient's URL. The new BlockBlobAsyncClient uses the same request policy pipeline as the + * ContainerAsyncClient. To change the pipeline, create the BlockBlobAsyncClient and then call its WithPipeline + * method passing in the desired pipeline object. Or, call this package's NewBlockBlobAsyncClient instead of calling + * this object's NewBlockBlobAsyncClient method. + * + * @param blobName A {@code String} representing the name of the blob. + * @return A new {@link BlockBlobAsyncClient} object which references the blob with the specified name in this + * container. */ public BlockBlobAsyncClient getBlockBlobAsyncClient(String blobName) { return getBlockBlobAsyncClient(blobName, null); @@ -92,17 +94,15 @@ public BlockBlobAsyncClient getBlockBlobAsyncClient(String blobName) { /** * Creates a new {@link BlockBlobAsyncClient} object by concatenating the blobName to the end of - * ContainerAsyncClient's URL. The new BlockBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. - * To change the pipeline, create the BlockBlobAsyncClient and then call its WithPipeline method passing in the - * desired pipeline object. Or, call this package's NewBlockBlobAsyncClient instead of calling this object's - * NewBlockBlobAsyncClient method. - * - * @param blobName - * A {@code String} representing the name of the blob. - * @param snapshot - * the snapshot identifier for the blob. - * - * @return A new {@link BlockBlobAsyncClient} object which references the blob with the specified name in this container. + * ContainerAsyncClient's URL. The new BlockBlobAsyncClient uses the same request policy pipeline as the + * ContainerAsyncClient. To change the pipeline, create the BlockBlobAsyncClient and then call its WithPipeline + * method passing in the desired pipeline object. Or, call this package's NewBlockBlobAsyncClient instead of calling + * this object's NewBlockBlobAsyncClient method. + * + * @param blobName A {@code String} representing the name of the blob. + * @param snapshot the snapshot identifier for the blob. + * @return A new {@link BlockBlobAsyncClient} object which references the blob with the specified name in this + * container. */ public BlockBlobAsyncClient getBlockBlobAsyncClient(String blobName, String snapshot) { return new BlockBlobAsyncClient(new AzureBlobStorageBuilder() @@ -111,34 +111,31 @@ public BlockBlobAsyncClient getBlockBlobAsyncClient(String blobName, String snap } /** - * Creates creates a new PageBlobAsyncClient object by concatenating blobName to the end of - * ContainerAsyncClient's URL. The new PageBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. - * To change the pipeline, create the PageBlobAsyncClient and then call its WithPipeline method passing in the - * desired pipeline object. Or, call this package's NewPageBlobAsyncClient instead of calling this object's - * NewPageBlobAsyncClient method. - * - * @param blobName - * A {@code String} representing the name of the blob. - * - * @return A new {@link PageBlobAsyncClient} object which references the blob with the specified name in this container. + * Creates creates a new PageBlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's + * URL. The new PageBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change the + * pipeline, create the PageBlobAsyncClient and then call its WithPipeline method passing in the desired pipeline + * object. Or, call this package's NewPageBlobAsyncClient instead of calling this object's NewPageBlobAsyncClient + * method. + * + * @param blobName A {@code String} representing the name of the blob. + * @return A new {@link PageBlobAsyncClient} object which references the blob with the specified name in this + * container. */ public PageBlobAsyncClient getPageBlobAsyncClient(String blobName) { return getPageBlobAsyncClient(blobName, null); } /** - * Creates creates a new PageBlobAsyncClient object by concatenating blobName to the end of - * ContainerAsyncClient's URL. The new PageBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. - * To change the pipeline, create the PageBlobAsyncClient and then call its WithPipeline method passing in the - * desired pipeline object. Or, call this package's NewPageBlobAsyncClient instead of calling this object's - * NewPageBlobAsyncClient method. - * - * @param blobName - * A {@code String} representing the name of the blob. - * @param snapshot - * the snapshot identifier for the blob. - * - * @return A new {@link PageBlobAsyncClient} object which references the blob with the specified name in this container. + * Creates creates a new PageBlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's + * URL. The new PageBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change the + * pipeline, create the PageBlobAsyncClient and then call its WithPipeline method passing in the desired pipeline + * object. Or, call this package's NewPageBlobAsyncClient instead of calling this object's NewPageBlobAsyncClient + * method. + * + * @param blobName A {@code String} representing the name of the blob. + * @param snapshot the snapshot identifier for the blob. + * @return A new {@link PageBlobAsyncClient} object which references the blob with the specified name in this + * container. */ public PageBlobAsyncClient getPageBlobAsyncClient(String blobName, String snapshot) { return new PageBlobAsyncClient(new AzureBlobStorageBuilder() @@ -147,34 +144,31 @@ public PageBlobAsyncClient getPageBlobAsyncClient(String blobName, String snapsh } /** - * Creates creates a new AppendBlobAsyncClient object by concatenating blobName to the end of - * ContainerAsyncClient's URL. The new AppendBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. - * To change the pipeline, create the AppendBlobAsyncClient and then call its WithPipeline method passing in the - * desired pipeline object. Or, call this package's NewAppendBlobAsyncClient instead of calling this object's + * Creates creates a new AppendBlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's + * URL. The new AppendBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change + * the pipeline, create the AppendBlobAsyncClient and then call its WithPipeline method passing in the desired + * pipeline object. Or, call this package's NewAppendBlobAsyncClient instead of calling this object's * NewAppendBlobAsyncClient method. * - * @param blobName - * A {@code String} representing the name of the blob. - * - * @return A new {@link AppendBlobAsyncClient} object which references the blob with the specified name in this container. + * @param blobName A {@code String} representing the name of the blob. + * @return A new {@link AppendBlobAsyncClient} object which references the blob with the specified name in this + * container. */ public AppendBlobAsyncClient getAppendBlobAsyncClient(String blobName) { return getAppendBlobAsyncClient(blobName, null); } /** - * Creates creates a new AppendBlobAsyncClient object by concatenating blobName to the end of - * ContainerAsyncClient's URL. The new AppendBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. - * To change the pipeline, create the AppendBlobAsyncClient and then call its WithPipeline method passing in the - * desired pipeline object. Or, call this package's NewAppendBlobAsyncClient instead of calling this object's + * Creates creates a new AppendBlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's + * URL. The new AppendBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change + * the pipeline, create the AppendBlobAsyncClient and then call its WithPipeline method passing in the desired + * pipeline object. Or, call this package's NewAppendBlobAsyncClient instead of calling this object's * NewAppendBlobAsyncClient method. * - * @param blobName - * A {@code String} representing the name of the blob. - * @param snapshot - * the snapshot identifier for the blob. - * - * @return A new {@link AppendBlobAsyncClient} object which references the blob with the specified name in this container. + * @param blobName A {@code String} representing the name of the blob. + * @param snapshot the snapshot identifier for the blob. + * @return A new {@link AppendBlobAsyncClient} object which references the blob with the specified name in this + * container. */ public AppendBlobAsyncClient getAppendBlobAsyncClient(String blobName, String snapshot) { return new AppendBlobAsyncClient(new AzureBlobStorageBuilder() @@ -183,15 +177,12 @@ public AppendBlobAsyncClient getAppendBlobAsyncClient(String blobName, String sn } /** - * Creates a new BlobAsyncClient object by concatenating blobName to the end of - * ContainerAsyncClient's URL. The new BlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. - * To change the pipeline, create the BlobAsyncClient and then call its WithPipeline method passing in the - * desired pipeline object. Or, call this package's getBlobAsyncClient instead of calling this object's - * getBlobAsyncClient method. - * - * @param blobName - * A {@code String} representing the name of the blob. + * Creates a new BlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's URL. The new + * BlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change the pipeline, create + * the BlobAsyncClient and then call its WithPipeline method passing in the desired pipeline object. Or, call this + * package's getBlobAsyncClient instead of calling this object's getBlobAsyncClient method. * + * @param blobName A {@code String} representing the name of the blob. * @return A new {@link BlobAsyncClient} object which references the blob with the specified name in this container. */ public BlobAsyncClient getBlobAsyncClient(String blobName) { @@ -199,17 +190,13 @@ public BlobAsyncClient getBlobAsyncClient(String blobName) { } /** - * Creates a new BlobAsyncClient object by concatenating blobName to the end of - * ContainerAsyncClient's URL. The new BlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. - * To change the pipeline, create the BlobAsyncClient and then call its WithPipeline method passing in the - * desired pipeline object. Or, call this package's getBlobAsyncClient instead of calling this object's - * getBlobAsyncClient method. - * - * @param blobName - * A {@code String} representing the name of the blob. - * @param snapshot - * the snapshot identifier for the blob. + * Creates a new BlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's URL. The new + * BlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. To change the pipeline, create + * the BlobAsyncClient and then call its WithPipeline method passing in the desired pipeline object. Or, call this + * package's getBlobAsyncClient instead of calling this object's getBlobAsyncClient method. * + * @param blobName A {@code String} representing the name of the blob. + * @param snapshot the snapshot identifier for the blob. * @return A new {@link BlobAsyncClient} object which references the blob with the specified name in this container. */ public BlobAsyncClient getBlobAsyncClient(String blobName, String snapshot) { @@ -221,8 +208,7 @@ public BlobAsyncClient getBlobAsyncClient(String blobName, String snapshot) { /** * Initializes a {@link StorageAsyncClient} object pointing to the storage account this container is in. * - * @return - * A {@link StorageAsyncClient} object pointing to the specified storage account + * @return A {@link StorageAsyncClient} object pointing to the specified storage account */ public StorageAsyncClient getStorageAsyncClient() { return new StorageAsyncClient(new AzureBlobStorageBuilder() @@ -232,6 +218,7 @@ public StorageAsyncClient getStorageAsyncClient() { /** * Gets the URL of the container represented by this client. + * * @return the URL. * @throws RuntimeException If the container has a malformed URL. */ @@ -246,8 +233,7 @@ public URL getContainerUrl() { /** * Gets if the container this client represents exists in the cloud. * - * @return - * true if the container exists, false if it doesn't + * @return true if the container exists, false if it doesn't */ public Mono> exists() { return this.getProperties(null) @@ -263,8 +249,7 @@ public Mono> exists() { * fails. For more information, see the * Azure Docs. * - * @return - * A reactive response signalling completion. + * @return A reactive response signalling completion. */ public Mono create() { return this.create(null, null); @@ -275,14 +260,10 @@ public Mono create() { * fails. For more information, see the * Azure Docs. * - * @param metadata - * {@link Metadata} - * @param accessType - * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header - * in the Azure Docs for more information. Pass null for no public access. - * - * @return - * A reactive response signalling completion. + * @param metadata {@link Metadata} + * @param accessType Specifies how the data in this container is available to the public. See the + * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. + * @return A reactive response signalling completion. */ public Mono create(Metadata metadata, PublicAccessType accessType) { metadata = metadata == null ? new Metadata() : metadata; @@ -293,20 +274,19 @@ public Mono create(Metadata metadata, PublicAccessType accessType) } /** - * Marks the specified container for deletion. The container and any blobs contained within it are later - * deleted during garbage collection. For more information, see the + * Marks the specified container for deletion. The container and any blobs contained within it are later deleted + * during garbage collection. For more information, see the * Azure Docs. * - * @return - * A reactive response signalling completion. + * @return A reactive response signalling completion. */ public Mono delete() { return this.delete(null); } /** - * Marks the specified container for deletion. The container and any blobs contained within it are later - * deleted during garbage collection. For more information, see the + * Marks the specified container for deletion. The container and any blobs contained within it are later deleted + * during garbage collection. For more information, see the * Azure Docs. * * @param accessConditions {@link ContainerAccessConditions} @@ -333,8 +313,7 @@ public Mono delete(ContainerAccessConditions accessConditions) { * Returns the container's metadata and system properties. For more information, see the * Azure Docs. * - * @return - * A reactive response containing the container properties. + * @return A reactive response containing the container properties. */ public Mono> getProperties() { return this.getProperties(null); @@ -344,12 +323,9 @@ public Mono> getProperties() { * Returns the container's metadata and system properties. For more information, see the * Azure Docs. * - * @param leaseAccessConditions - * By setting lease access conditions, requests will fail if the provided lease does not match the active - * lease on the blob. - * - * @return - * A reactive response containing the container properties. + * @param leaseAccessConditions By setting lease access conditions, requests will fail if the provided lease does + * not match the active lease on the blob. + * @return A reactive response containing the container properties. */ public Mono> getProperties(LeaseAccessConditions leaseAccessConditions) { return postProcessResponse(this.azureBlobStorage.containers() @@ -362,11 +338,8 @@ public Mono> getProperties(LeaseAccessConditions l * Sets the container's metadata. For more information, see the * Azure Docs. * - * @param metadata - * {@link Metadata} - * - * @return - * A reactive response signalling completion. + * @param metadata {@link Metadata} + * @return A reactive response signalling completion. */ public Mono setMetadata(Metadata metadata) { return this.setMetadata(metadata, null); @@ -379,11 +352,11 @@ public Mono setMetadata(Metadata metadata) { * @param metadata {@link Metadata} * @param accessConditions {@link ContainerAccessConditions} * @return A reactive response signalling completion. - * @throws UnsupportedOperationException If {@link ContainerAccessConditions#modifiedAccessConditions()} has anything - * set other than {@link ModifiedAccessConditions#ifModifiedSince()}. + * @throws UnsupportedOperationException If {@link ContainerAccessConditions#modifiedAccessConditions()} has + * anything set other than {@link ModifiedAccessConditions#ifModifiedSince()}. */ public Mono setMetadata(Metadata metadata, - ContainerAccessConditions accessConditions) { + ContainerAccessConditions accessConditions) { metadata = metadata == null ? new Metadata() : metadata; accessConditions = accessConditions == null ? new ContainerAccessConditions() : accessConditions; if (!validateNoEtag(accessConditions.modifiedAccessConditions()) @@ -405,8 +378,7 @@ public Mono setMetadata(Metadata metadata, * For more information, see the * Azure Docs. * - * @return - * A reactive response containing the container access policy. + * @return A reactive response containing the container access policy. */ public Mono> getAccessPolicy() { return this.getAccessPolicy(null); @@ -417,12 +389,9 @@ public Mono> getAccessPolicy() { * For more information, see the * Azure Docs. * - * @param leaseAccessConditions - * By setting lease access conditions, requests will fail if the provided lease does not match the active - * lease on the blob. - * - * @return - * A reactive response containing the container access policy. + * @param leaseAccessConditions By setting lease access conditions, requests will fail if the provided lease does + * not match the active lease on the blob. + * @return A reactive response containing the container access policy. */ public Mono> getAccessPolicy(LeaseAccessConditions leaseAccessConditions) { return postProcessResponse(this.azureBlobStorage.containers().getAccessPolicyWithRestResponseAsync(null, null, null, leaseAccessConditions, Context.NONE) @@ -435,19 +404,16 @@ public Mono> getAccessPolicy(LeaseAccessCondit * ensure the time formatting is compatible with the service. For more information, see the * Azure Docs. * - * @param accessType - * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header - * in the Azure Docs for more information. Pass null for no public access. - * @param identifiers - * A list of {@link SignedIdentifier} objects that specify the permissions for the container. Please see - * here - * for more information. Passing null will clear all access policies. - * - * @return - * A reactive response signalling completion. + * @param accessType Specifies how the data in this container is available to the public. See the + * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. + * @param identifiers A list of {@link SignedIdentifier} objects that specify the permissions for the container. + * Please see + * here + * for more information. Passing null will clear all access policies. + * @return A reactive response signalling completion. */ public Mono setAccessPolicy(PublicAccessType accessType, - List identifiers) { + List identifiers) { return this.setAccessPolicy(accessType, identifiers, null); } @@ -457,22 +423,19 @@ public Mono setAccessPolicy(PublicAccessType accessType, * ensure the time formatting is compatible with the service. For more information, see the * Azure Docs. * - * @param accessType - * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header - * in the Azure Docs for more information. Pass null for no public access. - * @param identifiers - * A list of {@link SignedIdentifier} objects that specify the permissions for the container. Please see - * here - * for more information. Passing null will clear all access policies. - * @param accessConditions - * {@link ContainerAccessConditions} - * + * @param accessType Specifies how the data in this container is available to the public. See the + * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. + * @param identifiers A list of {@link SignedIdentifier} objects that specify the permissions for the container. + * Please see + * here + * for more information. Passing null will clear all access policies. + * @param accessConditions {@link ContainerAccessConditions} * @return A reactive response signalling completion. * @throws UnsupportedOperationException If {@link ContainerAccessConditions#modifiedAccessConditions()} has either * {@link ModifiedAccessConditions#ifMatch()} or {@link ModifiedAccessConditions#ifNoneMatch()} set. */ public Mono setAccessPolicy(PublicAccessType accessType, - List identifiers, ContainerAccessConditions accessConditions) { + List identifiers, ContainerAccessConditions accessConditions) { accessConditions = accessConditions == null ? new ContainerAccessConditions() : accessConditions; if (!validateNoEtag(accessConditions.modifiedAccessConditions())) { @@ -508,53 +471,49 @@ OffsetDateTime.now will only give back milliseconds (more precise fields are zer } /** - * Returns a reactive Publisher emitting all the blobs in this container lazily as needed. - * The directories are flattened and only actual blobs and no directories are returned. + * Returns a reactive Publisher emitting all the blobs in this container lazily as needed. The directories are + * flattened and only actual blobs and no directories are returned. * *

    * Blob names are returned in lexicographic order. For more information, see the * Azure Docs. * *

    - * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob - * on the root level 'bar', will return + * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the + * root level 'bar', will return * *

      - *
    • foo/foo1 - *
    • foo/foo2 - *
    • bar + *
    • foo/foo1 + *
    • foo/foo2 + *
    • bar *
    * - * @return - * A reactive response emitting the flattened blobs. + * @return A reactive response emitting the flattened blobs. */ public Flux listBlobsFlat() { return this.listBlobsFlat(new ListBlobsOptions()); } /** - * Returns a reactive Publisher emitting all the blobs in this container lazily as needed. - * The directories are flattened and only actual blobs and no directories are returned. + * Returns a reactive Publisher emitting all the blobs in this container lazily as needed. The directories are + * flattened and only actual blobs and no directories are returned. * *

    * Blob names are returned in lexicographic order. For more information, see the * Azure Docs. * *

    - * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob - * on the root level 'bar', will return + * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the + * root level 'bar', will return * *

      - *
    • foo/foo1 - *
    • foo/foo2 - *
    • bar + *
    • foo/foo1 + *
    • foo/foo2 + *
    • bar *
    * - * @param options - * {@link ListBlobsOptions} - * - * @return - * A reactive response emitting the listed blobs, flattened. + * @param options {@link ListBlobsOptions} + * @return A reactive response emitting the listed blobs, flattened. */ public Flux listBlobsFlat(ListBlobsOptions options) { return listBlobsFlatSegment(null, options).flatMapMany(response -> listBlobsFlatHelper(options, response)); @@ -608,72 +567,63 @@ private Flux listBlobsFlatHelper(ListBlobsOptions options, ContainersL } /** - * Returns a reactive Publisher emitting all the blobs and directories (prefixes) under - * the given directory (prefix). Directories will have {@link BlobItem#isPrefix()} set to - * true. + * Returns a reactive Publisher emitting all the blobs and directories (prefixes) under the given directory + * (prefix). Directories will have {@link BlobItem#isPrefix()} set to true. * *

    * Blob names are returned in lexicographic order. For more information, see the * Azure Docs. * *

    - * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob - * on the root level 'bar', will return the following results when prefix=null: + * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the + * root level 'bar', will return the following results when prefix=null: * *

      - *
    • foo/ (isPrefix = true) - *
    • bar (isPrefix = false) + *
    • foo/ (isPrefix = true) + *
    • bar (isPrefix = false) *
    *

    * will return the following results when prefix="foo/": * *

      - *
    • foo/foo1 (isPrefix = false) - *
    • foo/foo2 (isPrefix = false) + *
    • foo/foo1 (isPrefix = false) + *
    • foo/foo2 (isPrefix = false) *
    * - * @param directory - * The directory to list blobs underneath - * - * @return - * A reactive response emitting the prefixes and blobs. + * @param directory The directory to list blobs underneath + * @return A reactive response emitting the prefixes and blobs. */ public Flux listBlobsHierarchy(String directory) { return this.listBlobsHierarchy("/", new ListBlobsOptions().prefix(directory)); } /** - * Returns a reactive Publisher emitting all the blobs and prefixes (directories) under - * the given prefix (directory). Directories will have {@link BlobItem#isPrefix()} set to - * true. + * Returns a reactive Publisher emitting all the blobs and prefixes (directories) under the given prefix + * (directory). Directories will have {@link BlobItem#isPrefix()} set to true. * *

    * Blob names are returned in lexicographic order. For more information, see the * Azure Docs. * *

    - * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob - * on the root level 'bar', will return the following results when prefix=null: + * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the + * root level 'bar', will return the following results when prefix=null: * *

      - *
    • foo/ (isPrefix = true) - *
    • bar (isPrefix = false) + *
    • foo/ (isPrefix = true) + *
    • bar (isPrefix = false) *
    *

    * will return the following results when prefix="foo/": * *

      - *
    • foo/foo1 (isPrefix = false) - *
    • foo/foo2 (isPrefix = false) + *
    • foo/foo1 (isPrefix = false) + *
    • foo/foo2 (isPrefix = false) *
    * - * @param delimiter - * The delimiter for blob hierarchy, "/" for hierarchy based on directories - * @param options - * {@link ListBlobsOptions} - * - * @return - * A reactive response emitting the prefixes and blobs. + * @param delimiter The delimiter for blob hierarchy, "/" for hierarchy based on directories + * @param options {@link ListBlobsOptions} + * @return A reactive response emitting the prefixes and blobs. */ public Flux listBlobsHierarchy(String delimiter, ListBlobsOptions options) { return listBlobsHierarchySegment(null, delimiter, options) @@ -719,7 +669,7 @@ private Mono listBlobsHierarchySegme } private Flux listBlobsHierarchyHelper(String delimiter, ListBlobsOptions options, - Context context, ContainersListBlobHierarchySegmentResponse response) { + Context context, ContainersListBlobHierarchySegmentResponse response) { Flux blobs; Flux prefixes; BlobHierarchyListSegment segment = response.value().segment(); @@ -816,14 +766,10 @@ private Flux listBlobsHierarchyHelper(String delimiter, ListBlobsOptio * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * - * @param proposedId - * A {@code String} in any valid GUID format. May be null. - * @param duration - * The duration of the lease, in seconds, or negative one (-1) for a lease that - * never expires. A non-infinite lease can be between 15 and 60 seconds. - * - * @return - * A reactive response containing the lease ID. + * @param proposedId A {@code String} in any valid GUID format. May be null. + * @param duration The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A + * non-infinite lease can be between 15 and 60 seconds. + * @return A reactive response containing the lease ID. */ public Mono> acquireLease(String proposedId, int duration) { return this.acquireLease(proposedId, duration, null); @@ -833,19 +779,15 @@ public Mono> acquireLease(String proposedId, int duration) { * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * - * @param proposedID - * A {@code String} in any valid GUID format. May be null. - * @param duration - * The duration of the lease, in seconds, or negative one (-1) for a lease that - * never expires. A non-infinite lease can be between 15 and 60 seconds. - * @param modifiedAccessConditions - * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used - * to construct conditions related to when the blob was changed relative to the given request. The request - * will fail if the specified condition is not satisfied. - * + * @param proposedID A {@code String} in any valid GUID format. May be null. + * @param duration The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A + * non-infinite lease can be between 15 and 60 seconds. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the lease ID. - * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions#ifMatch()} or - * {@link ModifiedAccessConditions#ifNoneMatch()} is set. + * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions#ifMatch()} or {@link + * ModifiedAccessConditions#ifNoneMatch()} is set. */ public Mono> acquireLease(String proposedID, int duration, ModifiedAccessConditions modifiedAccessConditions) { if (!this.validateNoEtag(modifiedAccessConditions)) { @@ -863,11 +805,8 @@ public Mono> acquireLease(String proposedID, int duration, Modi /** * Renews the blob's previously-acquired lease. * - * @param leaseID - * The leaseId of the active lease on the blob. - * - * @return - * A reactive response containing the renewed lease ID. + * @param leaseID The leaseId of the active lease on the blob. + * @return A reactive response containing the renewed lease ID. */ public Mono> renewLease(String leaseID) { return this.renewLease(leaseID, null); @@ -876,16 +815,13 @@ public Mono> renewLease(String leaseID) { /** * Renews the blob's previously-acquired lease. * - * @param leaseID - * The leaseId of the active lease on the blob. - * @param modifiedAccessConditions - * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used - * to construct conditions related to when the blob was changed relative to the given request. The request - * will fail if the specified condition is not satisfied. - * + * @param leaseID The leaseId of the active lease on the blob. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the renewed lease ID. - * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions#ifMatch()} or - * {@link ModifiedAccessConditions#ifNoneMatch()} is set. + * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions#ifMatch()} or {@link + * ModifiedAccessConditions#ifNoneMatch()} is set. */ public Mono> renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { if (!this.validateNoEtag(modifiedAccessConditions)) { @@ -903,11 +839,8 @@ public Mono> renewLease(String leaseID, ModifiedAccessCondition /** * Releases the blob's previously-acquired lease. * - * @param leaseID - * The leaseId of the active lease on the blob. - * - * @return - * A reactive response signalling completion. + * @param leaseID The leaseId of the active lease on the blob. + * @return A reactive response signalling completion. */ public Mono releaseLease(String leaseID) { return this.releaseLease(leaseID, null); @@ -916,16 +849,13 @@ public Mono releaseLease(String leaseID) { /** * Releases the blob's previously-acquired lease. * - * @param leaseID - * The leaseId of the active lease on the blob. - * @param modifiedAccessConditions - * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used - * to construct conditions related to when the blob was changed relative to the given request. The request - * will fail if the specified condition is not satisfied. - * + * @param leaseID The leaseId of the active lease on the blob. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. * @return A reactive response signalling completion. - * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions#ifMatch()} or - * {@link ModifiedAccessConditions#ifNoneMatch()} is set. + * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions#ifMatch()} or {@link + * ModifiedAccessConditions#ifNoneMatch()} is set. */ public Mono releaseLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { if (!this.validateNoEtag(modifiedAccessConditions)) { @@ -944,8 +874,7 @@ public Mono releaseLease(String leaseID, ModifiedAccessConditions * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * - * @return - * A reactive response containing the remaining time in the broken lease. + * @return A reactive response containing the remaining time in the broken lease. */ public Mono> breakLease() { return this.breakLease(null, null); @@ -955,20 +884,17 @@ public Mono> breakLease() { * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * - * @param breakPeriodInSeconds - * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue - * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the - * time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be - * available before the break period has expired, but the lease may be held for longer than the break - * period. - * @param modifiedAccessConditions - * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used - * to construct conditions related to when the blob was changed relative to the given request. The request - * will fail if the specified condition is not satisfied. - * + * @param breakPeriodInSeconds An optional {@code Integer} representing the proposed duration of seconds that the + * lease should continue before it is broken, between 0 and 60 seconds. This break period is only used if it is + * shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease + * will not be available before the break period has expired, but the lease may be held for longer than the break + * period. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the remaining time in the broken lease. - * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions#ifMatch()} or - * {@link ModifiedAccessConditions#ifNoneMatch()} is set. + * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions#ifMatch()} or {@link + * ModifiedAccessConditions#ifNoneMatch()} is set. */ public Mono> breakLease(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions) { if (!this.validateNoEtag(modifiedAccessConditions)) { @@ -986,33 +912,26 @@ public Mono> breakLease(Integer breakPeriodInSeconds, Modifie /** * ChangeLease changes the blob's lease ID. * - * @param leaseId - * The leaseId of the active lease on the blob. - * @param proposedID - * A {@code String} in any valid GUID format. - * - * @return - * A reactive response containing the new lease ID. + * @param leaseId The leaseId of the active lease on the blob. + * @param proposedID A {@code String} in any valid GUID format. + * @return A reactive response containing the new lease ID. */ public Mono> changeLease(String leaseId, String proposedID) { return this.changeLease(leaseId, proposedID, null); } /** - * ChangeLease changes the blob's lease ID. For more information, see the Azure Docs. - * - * @param leaseId - * The leaseId of the active lease on the blob. - * @param proposedID - * A {@code String} in any valid GUID format. - * @param modifiedAccessConditions - * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used - * to construct conditions related to when the blob was changed relative to the given request. The request - * will fail if the specified condition is not satisfied. - * + * ChangeLease changes the blob's lease ID. For more information, see the Azure + * Docs. + * + * @param leaseId The leaseId of the active lease on the blob. + * @param proposedID A {@code String} in any valid GUID format. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. * @return A reactive response containing the new lease ID. - * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions#ifMatch()} or - * {@link ModifiedAccessConditions#ifNoneMatch()} is set. + * @throws UnsupportedOperationException If either {@link ModifiedAccessConditions#ifMatch()} or {@link + * ModifiedAccessConditions#ifNoneMatch()} is set. */ public Mono> changeLease(String leaseId, String proposedID, ModifiedAccessConditions modifiedAccessConditions) { if (!this.validateNoEtag(modifiedAccessConditions)) { @@ -1031,8 +950,7 @@ public Mono> changeLease(String leaseId, String proposedID, Mod * Returns the sku name and account kind for the account. For more information, please see the * Azure Docs. * - * @return - * A reactive response containing the account info. + * @return A reactive response containing the account info. */ public Mono> getAccountInfo() { return postProcessResponse( @@ -1046,4 +964,170 @@ private boolean validateNoEtag(ModifiedAccessConditions modifiedAccessConditions } return modifiedAccessConditions.ifMatch() == null && modifiedAccessConditions.ifNoneMatch() == null; } + + /** + * Generates a user delegation SAS with the specified parameters + * + * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS + * @param accountName The {@code String} account name for the SAS + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @return A string that represents the SAS token + */ + public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, + ContainerSASPermission permissions, OffsetDateTime expiryTime) { + return this.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, null, null, + null, null, null, null, null, null, null); + } + + /** + * Generates a user delegation SAS token with the specified parameters + * + * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS + * @param accountName The {@code String} account name for the SAS + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @param startTime An optional {@code OffsetDateTime} start time for the SAS + * @param version An optional {@code String} version for the SAS + * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS + * @param ipRange An optional {@code IPRange} ip address range for the SAS + * @return A string that represents the SAS token + */ + public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, + ContainerSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, + SASProtocol sasProtocol, IPRange ipRange) { + return this.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, startTime, + version, sasProtocol, ipRange, null /* cacheControl */, null /* contentDisposition */, null /* + contentEncoding */, null /* contentLanguage */, null /* contentType */); + } + + /** + * Generates a user delegation SAS token with the specified parameters + * + * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS + * @param accountName The {@code String} account name for the SAS + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @param startTime An optional {@code OffsetDateTime} start time for the SAS + * @param version An optional {@code String} version for the SAS + * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS + * @param ipRange An optional {@code IPRange} ip address range for the SAS + * @param cacheControl An optional {@code String} cache-control header for the SAS. + * @param contentDisposition An optional {@code String} content-disposition header for the SAS. + * @param contentEncoding An optional {@code String} content-encoding header for the SAS. + * @param contentLanguage An optional {@code String} content-language header for the SAS. + * @param contentType An optional {@code String} content-type header for the SAS. + * @return A string that represents the SAS token + */ + public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, + ContainerSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, + SASProtocol sasProtocol, IPRange ipRange, String cacheControl, String contentDisposition, + String contentEncoding, String contentLanguage, String contentType) { + ServiceSASSignatureValues serviceSASSignatureValues = new ServiceSASSignatureValues(version, sasProtocol, + startTime, expiryTime, permissions == null ? null : permissions.toString(), ipRange, null /* identifier*/, + cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); + + ServiceSASSignatureValues values = configureServiceSASSignatureValues(serviceSASSignatureValues, accountName); + + SASQueryParameters sasQueryParameters = values.generateSASQueryParameters(userDelegationKey); + + return sasQueryParameters.encode(); + } + + /** + * Generates a SAS token with the specified parameters + * + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @return A string that represents the SAS token + */ + public String generateSAS(ContainerSASPermission permissions, OffsetDateTime expiryTime) { + return this.generateSAS(null, permissions, /* identifier */ expiryTime, null /* startTime */, null /* version + */, null /* sasProtocol */, null /* ipRange */, null /* cacheControl */, null /* contentDisposition */, + null /* contentEncoding */, null /* contentLanguage */, null /*contentType*/); + } + + /** + * Generates a SAS token with the specified parameters + * + * @param identifier The {@code String} name of the access policy on the container this SAS references if any + * @return A string that represents the SAS token + */ + public String generateSAS(String identifier) { + return this.generateSAS(identifier, null /* permissions*/, null /* expiryTime */, null /* startTime */, null + /* version */, null /* sasProtocol */, null /* ipRange */, null /* cacheControl */, null /* + contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, null /*contentType*/); + } + + /** + * Generates a SAS token with the specified parameters + * + * @param identifier The {@code String} name of the access policy on the container this SAS references if any + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @param startTime An optional {@code OffsetDateTime} start time for the SAS + * @param version An optional {@code String} version for the SAS + * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS + * @param ipRange An optional {@code IPRange} ip address range for the SAS + * @return A string that represents the SAS token + */ + public String generateSAS(String identifier, ContainerSASPermission permissions, OffsetDateTime expiryTime, + OffsetDateTime startTime, + String version, SASProtocol sasProtocol, IPRange ipRange) { + return this.generateSAS(identifier, permissions, expiryTime, startTime, version, sasProtocol, ipRange, null + /* cacheControl */, null /* contentDisposition */, null /* contentEncoding */, null /* contentLanguage */, + null /*contentType*/); + } + + /** + * Generates a SAS token with the specified parameters + * + * @param identifier The {@code String} name of the access policy on the container this SAS references if any + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @param startTime An optional {@code OffsetDateTime} start time for the SAS + * @param version An optional {@code String} version for the SAS + * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS + * @param ipRange An optional {@code IPRange} ip address range for the SAS + * @param cacheControl An optional {@code String} cache-control header for the SAS. + * @param contentDisposition An optional {@code String} content-disposition header for the SAS. + * @param contentEncoding An optional {@code String} content-encoding header for the SAS. + * @param contentLanguage An optional {@code String} content-language header for the SAS. + * @param contentType An optional {@code String} content-type header for the SAS. + * @return A string that represents the SAS token + */ + public String generateSAS(String identifier, ContainerSASPermission permissions, OffsetDateTime expiryTime, + OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange, String cacheControl, + String contentDisposition, String contentEncoding, String contentLanguage, String contentType) { + ServiceSASSignatureValues serviceSASSignatureValues = new ServiceSASSignatureValues(version, sasProtocol, + startTime, expiryTime, permissions == null ? null : permissions.toString(), ipRange, identifier, + cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); + + SharedKeyCredential sharedKeyCredential = + Utility.getSharedKeyCredential(this.azureBlobStorage.httpPipeline()); + + Utility.assertNotNull("sharedKeyCredential", sharedKeyCredential); + + ServiceSASSignatureValues values = configureServiceSASSignatureValues(serviceSASSignatureValues, + sharedKeyCredential.accountName()); + + SASQueryParameters sasQueryParameters = values.generateSASQueryParameters(sharedKeyCredential); + + return sasQueryParameters.encode(); + } + + /** + * Sets serviceSASSignatureValues parameters dependent on the current blob type + */ + private ServiceSASSignatureValues configureServiceSASSignatureValues(ServiceSASSignatureValues serviceSASSignatureValues, String accountName) { + // Set canonical name + serviceSASSignatureValues.canonicalName(this.azureBlobStorage.url(), accountName); + + // Set snapshotId to null + serviceSASSignatureValues.snapshotId(null); + + // Set resource + serviceSASSignatureValues.resource(Constants.UrlConstants.SAS_CONTAINER_CONSTANT); + return serviceSASSignatureValues; + } } diff --git a/storage/client/blob/src/main/java/com/azure/storage/blob/ContainerClient.java b/storage/client/blob/src/main/java/com/azure/storage/blob/ContainerClient.java index 40ed0d25daa1f..de5d4f1431f39 100644 --- a/storage/client/blob/src/main/java/com/azure/storage/blob/ContainerClient.java +++ b/storage/client/blob/src/main/java/com/azure/storage/blob/ContainerClient.java @@ -15,26 +15,28 @@ import com.azure.storage.blob.models.PublicAccessType; import com.azure.storage.blob.models.SignedIdentifier; import com.azure.storage.blob.models.StorageAccountInfo; +import com.azure.storage.blob.models.UserDelegationKey; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import java.net.URL; import java.time.Duration; +import java.time.OffsetDateTime; import java.util.List; /** - * Client to a container. It may only be instantiated through a {@link ContainerClientBuilder} or via the method - * {@link StorageClient#getContainerClient(String)}. This class does not hold any - * state about a particular container but is instead a convenient way of sending off appropriate requests to - * the resource on the service. It may also be used to construct URLs to blobs. + * Client to a container. It may only be instantiated through a {@link ContainerClientBuilder} or via the method {@link + * StorageClient#getContainerClient(String)}. This class does not hold any state about a particular container but is + * instead a convenient way of sending off appropriate requests to the resource on the service. It may also be used to + * construct URLs to blobs. * *

    * This client contains operations on a container. Operations on a blob are available on {@link BlobClient} through * {@link #getBlobClient(String)}, and operations on the service are available on {@link StorageClient}. * *

    - * Please refer to the Azure Docs - * for more information on containers. + * Please refer to the Azure + * Docs for more information on containers. */ public final class ContainerClient { private ContainerAsyncClient containerAsyncClient; @@ -47,22 +49,21 @@ public final class ContainerClient { /** * Package-private constructor for use by {@link ContainerClientBuilder}. + * * @param containerAsyncClient the async container client */ ContainerClient(ContainerAsyncClient containerAsyncClient) { - this.containerAsyncClient = containerAsyncClient; + this.containerAsyncClient = containerAsyncClient; } /** - * Creates a new {@link BlockBlobClient} object by concatenating the blobName to the end of - * ContainerAsyncClient's URL. The new BlockBlobClient uses the same request policy pipeline as the ContainerAsyncClient. - * To change the pipeline, create the BlockBlobClient and then call its WithPipeline method passing in the - * desired pipeline object. Or, call this package's NewBlockBlobAsyncClient instead of calling this object's - * NewBlockBlobAsyncClient method. - * - * @param blobName - * A {@code String} representing the name of the blob. + * Creates a new {@link BlockBlobClient} object by concatenating the blobName to the end of ContainerAsyncClient's + * URL. The new BlockBlobClient uses the same request policy pipeline as the ContainerAsyncClient. To change the + * pipeline, create the BlockBlobClient and then call its WithPipeline method passing in the desired pipeline + * object. Or, call this package's NewBlockBlobAsyncClient instead of calling this object's NewBlockBlobAsyncClient + * method. * + * @param blobName A {@code String} representing the name of the blob. * @return A new {@link BlockBlobClient} object which references the blob with the specified name in this container. */ public BlockBlobClient getBlockBlobClient(String blobName) { @@ -70,17 +71,14 @@ public BlockBlobClient getBlockBlobClient(String blobName) { } /** - * Creates a new {@link BlockBlobClient} object by concatenating the blobName to the end of - * ContainerAsyncClient's URL. The new BlockBlobClient uses the same request policy pipeline as the ContainerAsyncClient. - * To change the pipeline, create the BlockBlobClient and then call its WithPipeline method passing in the - * desired pipeline object. Or, call this package's NewBlockBlobAsyncClient instead of calling this object's - * NewBlockBlobAsyncClient method. - * - * @param blobName - * A {@code String} representing the name of the blob. - * @param snapshot - * the snapshot identifier for the blob. + * Creates a new {@link BlockBlobClient} object by concatenating the blobName to the end of ContainerAsyncClient's + * URL. The new BlockBlobClient uses the same request policy pipeline as the ContainerAsyncClient. To change the + * pipeline, create the BlockBlobClient and then call its WithPipeline method passing in the desired pipeline + * object. Or, call this package's NewBlockBlobAsyncClient instead of calling this object's NewBlockBlobAsyncClient + * method. * + * @param blobName A {@code String} representing the name of the blob. + * @param snapshot the snapshot identifier for the blob. * @return A new {@link BlockBlobClient} object which references the blob with the specified name in this container. */ public BlockBlobClient getBlockBlobClient(String blobName, String snapshot) { @@ -88,15 +86,12 @@ public BlockBlobClient getBlockBlobClient(String blobName, String snapshot) { } /** - * Creates creates a new PageBlobClient object by concatenating blobName to the end of - * ContainerAsyncClient's URL. The new PageBlobClient uses the same request policy pipeline as the ContainerAsyncClient. - * To change the pipeline, create the PageBlobClient and then call its WithPipeline method passing in the - * desired pipeline object. Or, call this package's NewPageBlobAsyncClient instead of calling this object's - * NewPageBlobAsyncClient method. - * - * @param blobName - * A {@code String} representing the name of the blob. + * Creates creates a new PageBlobClient object by concatenating blobName to the end of ContainerAsyncClient's URL. + * The new PageBlobClient uses the same request policy pipeline as the ContainerAsyncClient. To change the pipeline, + * create the PageBlobClient and then call its WithPipeline method passing in the desired pipeline object. Or, call + * this package's NewPageBlobAsyncClient instead of calling this object's NewPageBlobAsyncClient method. * + * @param blobName A {@code String} representing the name of the blob. * @return A new {@link PageBlobClient} object which references the blob with the specified name in this container. */ public PageBlobClient getPageBlobClient(String blobName) { @@ -104,17 +99,13 @@ public PageBlobClient getPageBlobClient(String blobName) { } /** - * Creates creates a new PageBlobClient object by concatenating blobName to the end of - * ContainerAsyncClient's URL. The new PageBlobClient uses the same request policy pipeline as the ContainerAsyncClient. - * To change the pipeline, create the PageBlobClient and then call its WithPipeline method passing in the - * desired pipeline object. Or, call this package's NewPageBlobAsyncClient instead of calling this object's - * NewPageBlobAsyncClient method. - * - * @param blobName - * A {@code String} representing the name of the blob. - * @param snapshot - * the snapshot identifier for the blob. + * Creates creates a new PageBlobClient object by concatenating blobName to the end of ContainerAsyncClient's URL. + * The new PageBlobClient uses the same request policy pipeline as the ContainerAsyncClient. To change the pipeline, + * create the PageBlobClient and then call its WithPipeline method passing in the desired pipeline object. Or, call + * this package's NewPageBlobAsyncClient instead of calling this object's NewPageBlobAsyncClient method. * + * @param blobName A {@code String} representing the name of the blob. + * @param snapshot the snapshot identifier for the blob. * @return A new {@link PageBlobClient} object which references the blob with the specified name in this container. */ public PageBlobClient getPageBlobClient(String blobName, String snapshot) { @@ -122,31 +113,27 @@ public PageBlobClient getPageBlobClient(String blobName, String snapshot) { } /** - * Creates creates a new AppendBlobClient object by concatenating blobName to the end of - * ContainerAsyncClient's URL. The new AppendBlobClient uses the same request policy pipeline as the ContainerAsyncClient. - * To change the pipeline, create the AppendBlobClient and then call its WithPipeline method passing in the - * desired pipeline object. Or, call this package's NewAppendBlobAsyncClient instead of calling this object's + * Creates creates a new AppendBlobClient object by concatenating blobName to the end of ContainerAsyncClient's URL. + * The new AppendBlobClient uses the same request policy pipeline as the ContainerAsyncClient. To change the + * pipeline, create the AppendBlobClient and then call its WithPipeline method passing in the desired pipeline + * object. Or, call this package's NewAppendBlobAsyncClient instead of calling this object's * NewAppendBlobAsyncClient method. * - * @param blobName - * A {@code String} representing the name of the blob. - * - * @return A new {@link AppendBlobClient} object which references the blob with the specified name in this container. + * @param blobName A {@code String} representing the name of the blob. + * @return A new {@link AppendBlobClient} object which references the blob with the specified name in this + * container. */ public AppendBlobClient getAppendBlobClient(String blobName) { return new AppendBlobClient(containerAsyncClient.getAppendBlobAsyncClient(blobName)); } /** - * Initializes a new BlobClient object by concatenating blobName to the end of - * ContainerAsyncClient's URL. The new BlobClient uses the same request policy pipeline as the ContainerAsyncClient. - * To change the pipeline, create the BlobClient and then call its WithPipeline method passing in the - * desired pipeline object. Or, call this package's getBlobAsyncClient instead of calling this object's - * getBlobAsyncClient method. - * - * @param blobName - * A {@code String} representing the name of the blob. + * Initializes a new BlobClient object by concatenating blobName to the end of ContainerAsyncClient's URL. The new + * BlobClient uses the same request policy pipeline as the ContainerAsyncClient. To change the pipeline, create the + * BlobClient and then call its WithPipeline method passing in the desired pipeline object. Or, call this package's + * getBlobAsyncClient instead of calling this object's getBlobAsyncClient method. * + * @param blobName A {@code String} representing the name of the blob. * @return A new {@link BlobClient} object which references the blob with the specified name in this container. */ public BlobClient getBlobClient(String blobName) { @@ -154,17 +141,13 @@ public BlobClient getBlobClient(String blobName) { } /** - * Initializes a new BlobClient object by concatenating blobName to the end of - * ContainerAsyncClient's URL. The new BlobClient uses the same request policy pipeline as the ContainerAsyncClient. - * To change the pipeline, create the BlobClient and then call its WithPipeline method passing in the - * desired pipeline object. Or, call this package's getBlobAsyncClient instead of calling this object's - * getBlobAsyncClient method. - * - * @param blobName - * A {@code String} representing the name of the blob. - * @param snapshot - * the snapshot identifier for the blob. + * Initializes a new BlobClient object by concatenating blobName to the end of ContainerAsyncClient's URL. The new + * BlobClient uses the same request policy pipeline as the ContainerAsyncClient. To change the pipeline, create the + * BlobClient and then call its WithPipeline method passing in the desired pipeline object. Or, call this package's + * getBlobAsyncClient instead of calling this object's getBlobAsyncClient method. * + * @param blobName A {@code String} representing the name of the blob. + * @param snapshot the snapshot identifier for the blob. * @return A new {@link BlobClient} object which references the blob with the specified name in this container. */ public BlobClient getBlobClient(String blobName, String snapshot) { @@ -174,8 +157,7 @@ public BlobClient getBlobClient(String blobName, String snapshot) { /** * Initializes a {@link StorageClient} object pointing to the storage account this container is in. * - * @return - * A {@link StorageClient} object pointing to the specified storage account + * @return A {@link StorageClient} object pointing to the specified storage account */ public StorageClient getStorageClient() { return new StorageClient(containerAsyncClient.getStorageAsyncClient()); @@ -183,6 +165,7 @@ public StorageClient getStorageClient() { /** * Gets the URL of the container represented by this client. + * * @return the URL. */ public URL getContainerUrl() { @@ -201,10 +184,8 @@ public Response exists() { /** * Gets if the container this client represents exists in the cloud. * - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * @return - * true if the container exists, false if it doesn't + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return true if the container exists, false if it doesn't */ public Response exists(Duration timeout) { Mono> response = containerAsyncClient.exists(); @@ -216,6 +197,7 @@ public Response exists(Duration timeout) { * Creates a new container within a storage account. If a container with the same name already exists, the operation * fails. For more information, see the * Azure Docs. + * * @return A response containing status code and HTTP headers */ public VoidResponse create() { @@ -227,13 +209,10 @@ public VoidResponse create() { * fails. For more information, see the * Azure Docs. * - * @param metadata - * {@link Metadata} - * @param accessType - * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header - * in the Azure Docs for more information. Pass null for no public access. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param metadata {@link Metadata} + * @param accessType Specifies how the data in this container is available to the public. See the + * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @return A response containing status code and HTTP headers */ public VoidResponse create(Metadata metadata, PublicAccessType accessType, Duration timeout) { @@ -243,9 +222,10 @@ public VoidResponse create(Metadata metadata, PublicAccessType accessType, Durat } /** - * Marks the specified container for deletion. The container and any blobs contained within it are later - * deleted during garbage collection. For more information, see the + * Marks the specified container for deletion. The container and any blobs contained within it are later deleted + * during garbage collection. For more information, see the * Azure Docs. + * * @return A response containing status code and HTTP headers */ public VoidResponse delete() { @@ -253,14 +233,12 @@ public VoidResponse delete() { } /** - * Marks the specified container for deletion. The container and any blobs contained within it are later - * deleted during garbage collection. For more information, see the + * Marks the specified container for deletion. The container and any blobs contained within it are later deleted + * during garbage collection. For more information, see the * Azure Docs. * - * @param accessConditions - * {@link ContainerAccessConditions} - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param accessConditions {@link ContainerAccessConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @return A response containing status code and HTTP headers */ public VoidResponse delete(ContainerAccessConditions accessConditions, Duration timeout) { @@ -273,8 +251,7 @@ public VoidResponse delete(ContainerAccessConditions accessConditions, Duration * Returns the container's metadata and system properties. For more information, see the * Azure Docs. * - * @return - * The container properties. + * @return The container properties. */ public Response getProperties() { return this.getProperties(null, null); @@ -284,17 +261,13 @@ public Response getProperties() { * Returns the container's metadata and system properties. For more information, see the * Azure Docs. * - * @param leaseAccessConditions - * By setting lease access conditions, requests will fail if the provided lease does not match the active - * lease on the blob. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * - * @return - * The container properties. + * @param leaseAccessConditions By setting lease access conditions, requests will fail if the provided lease does + * not match the active lease on the blob. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The container properties. */ public Response getProperties(LeaseAccessConditions leaseAccessConditions, - Duration timeout) { + Duration timeout) { Mono> response = containerAsyncClient.getProperties(leaseAccessConditions); return Utility.blockWithOptionalTimeout(response, timeout); @@ -304,8 +277,7 @@ public Response getProperties(LeaseAccessConditions leaseAc * Sets the container's metadata. For more information, see the * Azure Docs. * - * @param metadata - * {@link Metadata} + * @param metadata {@link Metadata} * @return A response containing status code and HTTP headers */ public VoidResponse setMetadata(Metadata metadata) { @@ -316,16 +288,13 @@ public VoidResponse setMetadata(Metadata metadata) { * Sets the container's metadata. For more information, see the * Azure Docs. * - * @param metadata - * {@link Metadata} - * @param accessConditions - * {@link ContainerAccessConditions} - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param metadata {@link Metadata} + * @param accessConditions {@link ContainerAccessConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @return A response containing status code and HTTP headers */ public VoidResponse setMetadata(Metadata metadata, - ContainerAccessConditions accessConditions, Duration timeout) { + ContainerAccessConditions accessConditions, Duration timeout) { Mono response = containerAsyncClient.setMetadata(metadata, accessConditions); return Utility.blockWithOptionalTimeout(response, timeout); @@ -336,8 +305,7 @@ public VoidResponse setMetadata(Metadata metadata, * For more information, see the * Azure Docs. * - * @return - * The container access policy. + * @return The container access policy. */ public Response getAccessPolicy() { return this.getAccessPolicy(null, null); @@ -348,14 +316,10 @@ public Response getAccessPolicy() { * For more information, see the * Azure Docs. * - * @param leaseAccessConditions - * By setting lease access conditions, requests will fail if the provided lease does not match the active - * lease on the blob. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * - * @return - * The container access policy. + * @param leaseAccessConditions By setting lease access conditions, requests will fail if the provided lease does + * not match the active lease on the blob. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The container access policy. */ public Response getAccessPolicy(LeaseAccessConditions leaseAccessConditions, Duration timeout) { @@ -370,17 +334,16 @@ public Response getAccessPolicy(LeaseAccessConditions l * ensure the time formatting is compatible with the service. For more information, see the * Azure Docs. * - * @param accessType - * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header - * in the Azure Docs for more information. Pass null for no public access. - * @param identifiers - * A list of {@link SignedIdentifier} objects that specify the permissions for the container. Please see - * here - * for more information. Passing null will clear all access policies. + * @param accessType Specifies how the data in this container is available to the public. See the + * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. + * @param identifiers A list of {@link SignedIdentifier} objects that specify the permissions for the container. + * Please see + * here + * for more information. Passing null will clear all access policies. * @return A response containing status code and HTTP headers */ public VoidResponse setAccessPolicy(PublicAccessType accessType, - List identifiers) { + List identifiers) { return this.setAccessPolicy(accessType, identifiers, null, null); } @@ -390,31 +353,27 @@ public VoidResponse setAccessPolicy(PublicAccessType accessType, * ensure the time formatting is compatible with the service. For more information, see the * Azure Docs. * - * @param accessType - * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header - * in the Azure Docs for more information. Pass null for no public access. - * @param identifiers - * A list of {@link SignedIdentifier} objects that specify the permissions for the container. Please see - * here - * for more information. Passing null will clear all access policies. - * @param accessConditions - * {@link ContainerAccessConditions} - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param accessType Specifies how the data in this container is available to the public. See the + * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. + * @param identifiers A list of {@link SignedIdentifier} objects that specify the permissions for the container. + * Please see + * here + * for more information. Passing null will clear all access policies. + * @param accessConditions {@link ContainerAccessConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @return A response containing status code and HTTP headers */ public VoidResponse setAccessPolicy(PublicAccessType accessType, - List identifiers, ContainerAccessConditions accessConditions, - Duration timeout) { + List identifiers, ContainerAccessConditions accessConditions, + Duration timeout) { Mono response = containerAsyncClient.setAccessPolicy(accessType, identifiers, accessConditions); return Utility.blockWithOptionalTimeout(response, timeout); } /** - * Returns a lazy loaded list of blobs in this container, with folder structures flattened. - * The returned {@link Iterable} can be iterated through while new items are automatically - * retrieved as needed. + * Returns a lazy loaded list of blobs in this container, with folder structures flattened. The returned {@link + * Iterable} can be iterated through while new items are automatically retrieved as needed. * *

    * Blob names are returned in lexicographic order. @@ -423,17 +382,15 @@ public VoidResponse setAccessPolicy(PublicAccessType accessType, * For more information, see the * Azure Docs. * - * @return - * The listed blobs, flattened. + * @return The listed blobs, flattened. */ public Iterable listBlobsFlat() { return this.listBlobsFlat(new ListBlobsOptions(), null); } /** - * Returns a lazy loaded list of blobs in this container, with folder structures flattened. - * The returned {@link Iterable} can be iterated through while new items are automatically - * retrieved as needed. + * Returns a lazy loaded list of blobs in this container, with folder structures flattened. The returned {@link + * Iterable} can be iterated through while new items are automatically retrieved as needed. * *

    * Blob names are returned in lexicographic order. @@ -442,13 +399,9 @@ public Iterable listBlobsFlat() { * For more information, see the * Azure Docs. * - * @param options - * {@link ListBlobsOptions} - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * - * @return - * The listed blobs, flattened. + * @param options {@link ListBlobsOptions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The listed blobs, flattened. */ public Iterable listBlobsFlat(ListBlobsOptions options, Duration timeout) { Flux response = containerAsyncClient.listBlobsFlat(options); @@ -457,74 +410,64 @@ public Iterable listBlobsFlat(ListBlobsOptions options, Duration timeo } /** - * Returns a reactive Publisher emitting all the blobs and directories (prefixes) under - * the given directory (prefix). Directories will have {@link BlobItem#isPrefix()} set to - * true. + * Returns a reactive Publisher emitting all the blobs and directories (prefixes) under the given directory + * (prefix). Directories will have {@link BlobItem#isPrefix()} set to true. * *

    * Blob names are returned in lexicographic order. For more information, see the * Azure Docs. * *

    - * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob - * on the root level 'bar', will return the following results when prefix=null: + * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the + * root level 'bar', will return the following results when prefix=null: * *

      - *
    • foo/ (isPrefix = true) - *
    • bar (isPrefix = false) + *
    • foo/ (isPrefix = true) + *
    • bar (isPrefix = false) *
    *

    * will return the following results when prefix="foo/": * *

      - *
    • foo/foo1 (isPrefix = false) - *
    • foo/foo2 (isPrefix = false) + *
    • foo/foo1 (isPrefix = false) + *
    • foo/foo2 (isPrefix = false) *
    * - * @param directory - * The directory to list blobs underneath - * - * @return - * A reactive response emitting the prefixes and blobs. + * @param directory The directory to list blobs underneath + * @return A reactive response emitting the prefixes and blobs. */ public Iterable listBlobsHierarchy(String directory) { return this.listBlobsHierarchy("/", new ListBlobsOptions().prefix(directory), null); } /** - * Returns a reactive Publisher emitting all the blobs and prefixes (directories) under - * the given prefix (directory). Directories will have {@link BlobItem#isPrefix()} set to - * true. + * Returns a reactive Publisher emitting all the blobs and prefixes (directories) under the given prefix + * (directory). Directories will have {@link BlobItem#isPrefix()} set to true. * *

    * Blob names are returned in lexicographic order. For more information, see the * Azure Docs. * *

    - * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob - * on the root level 'bar', will return the following results when prefix=null: + * E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the + * root level 'bar', will return the following results when prefix=null: * *

      - *
    • foo/ (isPrefix = true) - *
    • bar (isPrefix = false) + *
    • foo/ (isPrefix = true) + *
    • bar (isPrefix = false) *
    *

    * will return the following results when prefix="foo/": * *

      - *
    • foo/foo1 (isPrefix = false) - *
    • foo/foo2 (isPrefix = false) + *
    • foo/foo1 (isPrefix = false) + *
    • foo/foo2 (isPrefix = false) *
    * - * @param delimiter - * The delimiter for blob hierarchy, "/" for hierarchy based on directories - * @param options - * {@link ListBlobsOptions} - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * - * @return - * A reactive response emitting the prefixes and blobs. + * @param delimiter The delimiter for blob hierarchy, "/" for hierarchy based on directories + * @param options {@link ListBlobsOptions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return A reactive response emitting the prefixes and blobs. */ public Iterable listBlobsHierarchy(String delimiter, ListBlobsOptions options, Duration timeout) { Flux response = containerAsyncClient.listBlobsHierarchy(delimiter, options); @@ -536,14 +479,10 @@ public Iterable listBlobsHierarchy(String delimiter, ListBlobsOptions * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * - * @param proposedId - * A {@code String} in any valid GUID format. May be null. - * @param duration - * The duration of the lease, in seconds, or negative one (-1) for a lease that - * never expires. A non-infinite lease can be between 15 and 60 seconds. - * - * @return - * The lease ID. + * @param proposedId A {@code String} in any valid GUID format. May be null. + * @param duration The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A + * non-infinite lease can be between 15 and 60 seconds. + * @return The lease ID. */ public Response acquireLease(String proposedId, int duration) { return this.acquireLease(proposedId, duration, null, null); @@ -553,23 +492,17 @@ public Response acquireLease(String proposedId, int duration) { * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * - * @param proposedID - * A {@code String} in any valid GUID format. May be null. - * @param duration - * The duration of the lease, in seconds, or negative one (-1) for a lease that - * never expires. A non-infinite lease can be between 15 and 60 seconds. - * @param modifiedAccessConditions - * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used - * to construct conditions related to when the blob was changed relative to the given request. The request - * will fail if the specified condition is not satisfied. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * - * @return - * The lease ID. + * @param proposedID A {@code String} in any valid GUID format. May be null. + * @param duration The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A + * non-infinite lease can be between 15 and 60 seconds. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The lease ID. */ public Response acquireLease(String proposedID, int duration, - ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { + ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { Mono> response = containerAsyncClient .acquireLease(proposedID, duration, modifiedAccessConditions); @@ -579,11 +512,8 @@ public Response acquireLease(String proposedID, int duration, /** * Renews the blob's previously-acquired lease. * - * @param leaseID - * The leaseId of the active lease on the blob. - * - * @return - * The renewed lease ID. + * @param leaseID The leaseId of the active lease on the blob. + * @return The renewed lease ID. */ public Response renewLease(String leaseID) { return this.renewLease(leaseID, null, null); @@ -592,20 +522,15 @@ public Response renewLease(String leaseID) { /** * Renews the blob's previously-acquired lease. * - * @param leaseID - * The leaseId of the active lease on the blob. - * @param modifiedAccessConditions - * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used - * to construct conditions related to when the blob was changed relative to the given request. The request - * will fail if the specified condition is not satisfied. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * - * @return - * The renewed lease ID. + * @param leaseID The leaseId of the active lease on the blob. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The renewed lease ID. */ public Response renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions, - Duration timeout) { + Duration timeout) { Mono> response = containerAsyncClient .renewLease(leaseID, modifiedAccessConditions); @@ -615,8 +540,7 @@ public Response renewLease(String leaseID, ModifiedAccessConditions modi /** * Releases the blob's previously-acquired lease. * - * @param leaseID - * The leaseId of the active lease on the blob. + * @param leaseID The leaseId of the active lease on the blob. * @return A response containing status code and HTTP headers */ public VoidResponse releaseLease(String leaseID) { @@ -626,18 +550,15 @@ public VoidResponse releaseLease(String leaseID) { /** * Releases the blob's previously-acquired lease. * - * @param leaseID - * The leaseId of the active lease on the blob. - * @param modifiedAccessConditions - * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used - * to construct conditions related to when the blob was changed relative to the given request. The request - * will fail if the specified condition is not satisfied. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param leaseID The leaseId of the active lease on the blob. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @return A response containing status code and HTTP headers. */ public VoidResponse releaseLease(String leaseID, - ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { + ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { Mono response = containerAsyncClient .releaseLease(leaseID, modifiedAccessConditions); @@ -648,8 +569,7 @@ public VoidResponse releaseLease(String leaseID, * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * - * @return - * The remaining time in the broken lease. + * @return The remaining time in the broken lease. */ public Response breakLease() { return this.breakLease(null, null, null); @@ -659,24 +579,19 @@ public Response breakLease() { * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * - * @param breakPeriodInSeconds - * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue - * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the - * time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be - * available before the break period has expired, but the lease may be held for longer than the break - * period. - * @param modifiedAccessConditions - * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used - * to construct conditions related to when the blob was changed relative to the given request. The request - * will fail if the specified condition is not satisfied. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * - * @return - * The remaining time in the broken lease. + * @param breakPeriodInSeconds An optional {@code Integer} representing the proposed duration of seconds that the + * lease should continue before it is broken, between 0 and 60 seconds. This break period is only used if it is + * shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease + * will not be available before the break period has expired, but the lease may be held for longer than the break + * period. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The remaining time in the broken lease. */ public Response breakLease(Integer breakPeriodInSeconds, - ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { + ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { Mono> response = containerAsyncClient .breakLease(breakPeriodInSeconds, modifiedAccessConditions); @@ -686,36 +601,28 @@ public Response breakLease(Integer breakPeriodInSeconds, /** * ChangeLease changes the blob's lease ID. * - * @param leaseId - * The leaseId of the active lease on the blob. - * @param proposedID - * A {@code String} in any valid GUID format. - * - * @return - * The new lease ID. + * @param leaseId The leaseId of the active lease on the blob. + * @param proposedID A {@code String} in any valid GUID format. + * @return The new lease ID. */ public Response changeLease(String leaseId, String proposedID) { return this.changeLease(leaseId, proposedID, null, null); } /** - * ChangeLease changes the blob's lease ID. For more information, see the Azure Docs. - * - * @param leaseId - * The leaseId of the active lease on the blob. - * @param proposedID - * A {@code String} in any valid GUID format. - * @param modifiedAccessConditions - * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used - * to construct conditions related to when the blob was changed relative to the given request. The request - * will fail if the specified condition is not satisfied. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * ChangeLease changes the blob's lease ID. For more information, see the Azure + * Docs. * + * @param leaseId The leaseId of the active lease on the blob. + * @param proposedID A {@code String} in any valid GUID format. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the blob was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @return The new lease ID. */ public Response changeLease(String leaseId, String proposedID, - ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { + ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { Mono> response = containerAsyncClient .changeLease(leaseId, proposedID, modifiedAccessConditions); @@ -726,15 +633,137 @@ public Response changeLease(String leaseId, String proposedID, * Returns the sku name and account kind for the account. For more information, please see the * Azure Docs. * - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * - * @return - * The account info. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The account info. */ public Response getAccountInfo(Duration timeout) { Mono> response = containerAsyncClient.getAccountInfo(); return Utility.blockWithOptionalTimeout(response, timeout); } + + /** + * Generates a user delegation SAS token with the specified parameters + * + * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS + * @param accountName The {@code String} account name for the SAS + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @return A string that represents the SAS token + */ + public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, + ContainerSASPermission permissions, OffsetDateTime expiryTime) { + return this.containerAsyncClient.generateUserDelegationSAS(userDelegationKey, accountName, permissions, + expiryTime); + } + + /** + * Generates a user delegation SAS token with the specified parameters + * + * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS + * @param accountName The {@code String} account name for the SAS + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @param startTime An optional {@code OffsetDateTime} start time for the SAS + * @param version An optional {@code String} version for the SAS + * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS + * @param ipRange An optional {@code IPRange} ip address range for the SAS + * @return A string that represents the SAS token + */ + public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, + ContainerSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, + SASProtocol sasProtocol, IPRange ipRange) { + return this.containerAsyncClient.generateUserDelegationSAS(userDelegationKey, accountName, permissions, + expiryTime, startTime, version, sasProtocol, ipRange); + } + + /** + * Generates a user delegation SAS token with the specified parameters + * + * @param userDelegationKey The {@code UserDelegationKey} user delegation key for the SAS + * @param accountName The {@code String} account name for the SAS + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @param startTime An optional {@code OffsetDateTime} start time for the SAS + * @param version An optional {@code String} version for the SAS + * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS + * @param ipRange An optional {@code IPRange} ip address range for the SAS + * @param cacheControl An optional {@code String} cache-control header for the SAS. + * @param contentDisposition An optional {@code String} content-disposition header for the SAS. + * @param contentEncoding An optional {@code String} content-encoding header for the SAS. + * @param contentLanguage An optional {@code String} content-language header for the SAS. + * @param contentType An optional {@code String} content-type header for the SAS. + * @return A string that represents the SAS token + */ + public String generateUserDelegationSAS(UserDelegationKey userDelegationKey, String accountName, + ContainerSASPermission permissions, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, + SASProtocol sasProtocol, IPRange ipRange, String cacheControl, String contentDisposition, + String contentEncoding, String contentLanguage, String contentType) { + return this.containerAsyncClient.generateUserDelegationSAS(userDelegationKey, accountName, permissions, + expiryTime, startTime, version, sasProtocol, ipRange, cacheControl, contentDisposition, contentEncoding, + contentLanguage, contentType); + } + + /** + * Generates a SAS token with the specified parameters + * + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @return A string that represents the SAS token + */ + public String generateSAS(ContainerSASPermission permissions, OffsetDateTime expiryTime) { + return this.containerAsyncClient.generateSAS(permissions, expiryTime); + } + + /** + * Generates a SAS token with the specified parameters + * + * @param identifier The {@code String} name of the access policy on the container this SAS references if any + * @return A string that represents the SAS token + */ + public String generateSAS(String identifier) { + return this.containerAsyncClient.generateSAS(identifier); + } + + /** + * Generates a SAS token with the specified parameters + * + * @param identifier The {@code String} name of the access policy on the container this SAS references if any + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @param startTime An optional {@code OffsetDateTime} start time for the SAS + * @param version An optional {@code String} version for the SAS + * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS + * @param ipRange An optional {@code IPRange} ip address range for the SAS + * @return A string that represents the SAS token + */ + public String generateSAS(String identifier, ContainerSASPermission permissions, OffsetDateTime expiryTime, + OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange) { + return this.containerAsyncClient.generateSAS(identifier, permissions, expiryTime, startTime, version, + sasProtocol, ipRange); + } + + /** + * Generates a SAS token with the specified parameters + * + * @param identifier The {@code String} name of the access policy on the container this SAS references if any + * @param permissions The {@code ContainerSASPermissions} permission for the SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the SAS + * @param startTime An optional {@code OffsetDateTime} start time for the SAS + * @param version An optional {@code String} version for the SAS + * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS + * @param ipRange An optional {@code IPRange} ip address range for the SAS + * @param cacheControl An optional {@code String} cache-control header for the SAS. + * @param contentDisposition An optional {@code String} content-disposition header for the SAS. + * @param contentEncoding An optional {@code String} content-encoding header for the SAS. + * @param contentLanguage An optional {@code String} content-language header for the SAS. + * @param contentType An optional {@code String} content-type header for the SAS. + * @return A string that represents the SAS token + */ + public String generateSAS(String identifier, ContainerSASPermission permissions, OffsetDateTime expiryTime, + OffsetDateTime startTime, String version, SASProtocol sasProtocol, IPRange ipRange, String cacheControl, + String contentDisposition, String contentEncoding, String contentLanguage, String contentType) { + return this.containerAsyncClient.generateSAS(identifier, permissions, expiryTime, startTime, version, + sasProtocol, ipRange, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); + } } diff --git a/storage/client/blob/src/main/java/com/azure/storage/blob/SASQueryParameters.java b/storage/client/blob/src/main/java/com/azure/storage/blob/SASQueryParameters.java index b6afd82addcab..cd918e636bbd4 100644 --- a/storage/client/blob/src/main/java/com/azure/storage/blob/SASQueryParameters.java +++ b/storage/client/blob/src/main/java/com/azure/storage/blob/SASQueryParameters.java @@ -333,9 +333,7 @@ UserDelegationKey userDelegationKey() { private void tryAppendQueryParameter(StringBuilder sb, String param, Object value) { if (value != null) { - if (sb.length() == 0) { - sb.append('?'); - } else { + if (sb.length() != 0) { sb.append('&'); } sb.append(safeURLEncode(param)).append('=').append(safeURLEncode(value.toString())); diff --git a/storage/client/blob/src/main/java/com/azure/storage/blob/ServiceSASSignatureValues.java b/storage/client/blob/src/main/java/com/azure/storage/blob/ServiceSASSignatureValues.java index 7450ae699f5a4..f6b4767cada56 100644 --- a/storage/client/blob/src/main/java/com/azure/storage/blob/ServiceSASSignatureValues.java +++ b/storage/client/blob/src/main/java/com/azure/storage/blob/ServiceSASSignatureValues.java @@ -6,14 +6,16 @@ import com.azure.storage.blob.models.UserDelegationKey; import com.azure.storage.common.credentials.SharedKeyCredential; +import java.net.MalformedURLException; +import java.net.URL; import java.security.InvalidKeyException; import java.time.OffsetDateTime; /** - * ServiceSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage service. Once - * all the values here are set appropriately, call generateSASQueryParameters to obtain a representation of the SAS - * which can actually be applied to blob urls. Note: that both this class and {@link SASQueryParameters} exist because - * the former is mutable and a logical representation while the latter is immutable and used to generate actual REST + * ServiceSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage service. Once all + * the values here are set appropriately, call generateSASQueryParameters to obtain a representation of the SAS which + * can actually be applied to blob urls. Note: that both this class and {@link SASQueryParameters} exist because the + * former is mutable and a logical representation while the latter is immutable and used to generate actual REST * requests. *

    * Please see here @@ -22,9 +24,9 @@ * Please see here for * more details on each value, including which are required. * - * @apiNote ## Sample Code \n - * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_sas "Sample code for ServiceSASSignatureValues")] \n - * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + * @apiNote ## Sample Code \n [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_sas + * "Sample code for ServiceSASSignatureValues")] \n For more samples, please see the [Samples + * file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) */ final class ServiceSASSignatureValues { @@ -40,9 +42,9 @@ final class ServiceSASSignatureValues { private IPRange ipRange; - private String containerName; + private String canonicalName; - private String blobName; + private String resource; private String snapshotId; @@ -64,6 +66,45 @@ final class ServiceSASSignatureValues { ServiceSASSignatureValues() { } + /** + * Creates an object with the specified expiry time and permissions + * + * @param expiryTime + * @param permissions + */ + ServiceSASSignatureValues(OffsetDateTime expiryTime, String permissions) { + this.expiryTime = expiryTime; + this.permissions = permissions; + } + + /** + * Creates an object with the specified identifier + * + * @param identifier + */ + ServiceSASSignatureValues(String identifier) { + this.identifier = identifier; + } + + ServiceSASSignatureValues(String version, SASProtocol sasProtocol, OffsetDateTime startTime, + OffsetDateTime expiryTime, String permission, IPRange ipRange, String identifier, String cacheControl, + String contentDisposition, String contentEncoding, String contentLanguage, String contentType) { + if (version != null) { + this.version = version; + } + this.protocol = sasProtocol; + this.startTime = startTime; + this.expiryTime = expiryTime; + this.permissions = permission; + this.ipRange = ipRange; + this.identifier = identifier; + this.cacheControl = cacheControl; + this.contentDisposition = contentDisposition; + this.contentEncoding = contentEncoding; + this.contentLanguage = contentLanguage; + this.contentType = contentType; + } + /** * The version of the service this SAS will target. If not specified, it will default to the version targeted by the * library. @@ -159,32 +200,52 @@ public ServiceSASSignatureValues ipRange(IPRange ipRange) { } /** - * The name of the container the SAS user may access. + * The resource the SAS user may access. */ - public String containerName() { - return containerName; + public String resource() { + return resource; } /** - * The name of the container the SAS user may access. + * The resource the SAS user may access. */ - public ServiceSASSignatureValues containerName(String containerName) { - this.containerName = containerName; + public ServiceSASSignatureValues resource(String resource) { + this.resource = resource; return this; } /** - * The name of the blob the SAS user may access. + * The canonical name of the object the SAS user may access. + */ + public String canonicalName() { + return canonicalName; + } + + /** + * The canonical name of the object the SAS user may access. */ - public String blobName() { - return blobName; + public ServiceSASSignatureValues canonicalName(String canonicalName) { + this.canonicalName = canonicalName; + return this; } /** - * The name of the blob the SAS user may access. + * The canonical name of the object the SAS user may access. + * + * @throws RuntimeException If urlString is a malformed URL. */ - public ServiceSASSignatureValues blobName(String blobName) { - this.blobName = blobName; + public ServiceSASSignatureValues canonicalName(String urlString, String accountName) { + URL url = null; + try { + url = new URL(urlString); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + + StringBuilder canonicalName = new StringBuilder("/blob"); + canonicalName.append('/').append(accountName).append(url.getPath()); + this.canonicalName = canonicalName.toString(); + return this; } @@ -192,7 +253,7 @@ public ServiceSASSignatureValues blobName(String blobName) { * The specific snapshot the SAS user may access. */ public String snapshotId() { - return snapshotId; + return this.snapshotId; } /** @@ -301,21 +362,16 @@ public ServiceSASSignatureValues contentType(String contentType) { * Uses an account's shared key credential to sign these signature values to produce the proper SAS query * parameters. * - * @param sharedKeyCredentials - * A {@link SharedKeyCredential} object used to sign the SAS values. - * + * @param sharedKeyCredentials A {@link SharedKeyCredential} object used to sign the SAS values. * @return {@link SASQueryParameters} * @throws Error If the accountKey is not a valid Base64-encoded string. */ public SASQueryParameters generateSASQueryParameters(SharedKeyCredential sharedKeyCredentials) { Utility.assertNotNull("sharedKeyCredentials", sharedKeyCredentials); - assertGenerateOK(); - - String resource = getResource(); - String verifiedPermissions = getVerifiedPermissions(); + assertGenerateOK(false); // Signature is generated on the un-url-encoded values. - final String stringToSign = stringToSign(verifiedPermissions, resource, sharedKeyCredentials); + final String stringToSign = stringToSign(); String signature = null; try { @@ -325,33 +381,24 @@ public SASQueryParameters generateSASQueryParameters(SharedKeyCredential sharedK } return new SASQueryParameters(this.version, null, null, - this.protocol, this.startTime, this.expiryTime, this.ipRange, this.identifier, resource, - this.permissions, signature, this.cacheControl, this.contentDisposition, this.contentEncoding, - this.contentLanguage, this.contentType, null /* delegate */); + this.protocol, this.startTime, this.expiryTime, this.ipRange, this.identifier, resource, + this.permissions, signature, this.cacheControl, this.contentDisposition, this.contentEncoding, + this.contentLanguage, this.contentType, null /* delegate */); } /** * Uses a user delegation key to sign these signature values to produce the proper SAS query parameters. * - * @param delegationKey - * A {@link UserDelegationKey} object used to sign the SAS values. - * - * @param accountName - * Name of the account holding the resource this SAS is authorizing. - * + * @param delegationKey A {@link UserDelegationKey} object used to sign the SAS values. * @return {@link SASQueryParameters} * @throws Error If the accountKey is not a valid Base64-encoded string. */ - public SASQueryParameters generateSASQueryParameters(UserDelegationKey delegationKey, String accountName) { + public SASQueryParameters generateSASQueryParameters(UserDelegationKey delegationKey) { Utility.assertNotNull("delegationKey", delegationKey); - Utility.assertNotNull("accountName", accountName); - assertGenerateOK(); - - String resource = getResource(); - String verifiedPermissions = getVerifiedPermissions(); + assertGenerateOK(true); // Signature is generated on the un-url-encoded values. - final String stringToSign = stringToSign(verifiedPermissions, resource, delegationKey, accountName); + final String stringToSign = stringToSign(delegationKey); String signature = null; try { @@ -361,110 +408,78 @@ public SASQueryParameters generateSASQueryParameters(UserDelegationKey delegatio } return new SASQueryParameters(this.version, null, null, - this.protocol, this.startTime, this.expiryTime, this.ipRange, null /* identifier */, resource, - this.permissions, signature, this.cacheControl, this.contentDisposition, this.contentEncoding, - this.contentLanguage, this.contentType, delegationKey); + this.protocol, this.startTime, this.expiryTime, this.ipRange, null /* identifier */, resource, + this.permissions, signature, this.cacheControl, this.contentDisposition, this.contentEncoding, + this.contentLanguage, this.contentType, delegationKey); } /** * Common assertions for generateSASQueryParameters overloads. */ - private void assertGenerateOK() { + private void assertGenerateOK(boolean usingUserDelegation) { Utility.assertNotNull("version", this.version); - Utility.assertNotNull("containerName", this.containerName); - if (blobName == null && snapshotId != null) { - throw new IllegalArgumentException("Cannot set a snapshotId without a blobName."); - } - } - - /** - * Gets the resource string for SAS tokens based on object state. - */ - private String getResource() { - String resource = "c"; - if (!Utility.isNullOrEmpty(this.blobName)) { - resource = snapshotId != null && !snapshotId.isEmpty() ? "bs" : "b"; - } - - return resource; - } + Utility.assertNotNull("canonicalName", this.canonicalName); - /** - * Gets the verified permissions string for SAS tokens based on object state. - */ - private String getVerifiedPermissions() { - String verifiedPermissions = null; - // Calling parse and toString guarantees the proper ordering and throws on invalid characters. - if (Utility.isNullOrEmpty(this.blobName)) { - if (this.permissions != null) { - verifiedPermissions = ContainerSASPermission.parse(this.permissions).toString(); + // Ensure either (expiryTime and permissions) or (identifier) is set + if (this.expiryTime == null || this.permissions == null) { + // Identifier is not required if user delegation is being used + if (!usingUserDelegation) { + Utility.assertNotNull("identifier", this.identifier); } } else { - if (this.permissions != null) { - verifiedPermissions = BlobSASPermission.parse(this.permissions).toString(); - } + Utility.assertNotNull("expiryTime", this.expiryTime); + Utility.assertNotNull("permissions", this.permissions); } - return verifiedPermissions; - } - - private String getCanonicalName(String accountName) { - // Container: "/blob/account/containername" - // Blob: "/blob/account/containername/blobname" - StringBuilder canonicalName = new StringBuilder("/blob"); - canonicalName.append('/').append(accountName).append('/').append(this.containerName); - - if (!Utility.isNullOrEmpty(this.blobName)) { - canonicalName.append("/").append(this.blobName); + if (this.resource != null && this.resource.equals(Constants.UrlConstants.SAS_CONTAINER_CONSTANT)) { + if (this.snapshotId != null) { + throw new IllegalArgumentException("Cannot set a snapshotId without resource being a blob."); + } } - - return canonicalName.toString(); } - private String stringToSign(final String verifiedPermissions, final String resource, - final SharedKeyCredential sharedKeyCredentials) { + private String stringToSign() { return String.join("\n", - verifiedPermissions == null ? "" : verifiedPermissions, - this.startTime == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime), - this.expiryTime == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime), - getCanonicalName(sharedKeyCredentials.accountName()), - this.identifier == null ? "" : this.identifier, - this.ipRange == null ? (new IPRange()).toString() : this.ipRange.toString(), - this.protocol == null ? "" : protocol.toString(), - this.version == null ? "" : this.version, - resource == null ? "" : resource, - this.snapshotId == null ? "" : this.snapshotId, - this.cacheControl == null ? "" : this.cacheControl, - this.contentDisposition == null ? "" : this.contentDisposition, - this.contentEncoding == null ? "" : this.contentEncoding, - this.contentLanguage == null ? "" : this.contentLanguage, - this.contentType == null ? "" : this.contentType + this.permissions == null ? "" : this.permissions, + this.startTime == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime), + this.expiryTime == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime), + this.canonicalName == null ? "" : this.canonicalName, + this.identifier == null ? "" : this.identifier, + this.ipRange == null ? (new IPRange()).toString() : this.ipRange.toString(), + this.protocol == null ? "" : protocol.toString(), + this.version == null ? "" : this.version, + this.resource == null ? "" : this.resource, + this.snapshotId == null ? "" : this.snapshotId, + this.cacheControl == null ? "" : this.cacheControl, + this.contentDisposition == null ? "" : this.contentDisposition, + this.contentEncoding == null ? "" : this.contentEncoding, + this.contentLanguage == null ? "" : this.contentLanguage, + this.contentType == null ? "" : this.contentType ); } - private String stringToSign(final String verifiedPermissions, final String resource, - final UserDelegationKey key, final String accountName) { + private String stringToSign(final UserDelegationKey key) { return String.join("\n", - verifiedPermissions == null ? "" : verifiedPermissions, - this.startTime == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime), - this.expiryTime == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime), - getCanonicalName(accountName), - key.signedOid() == null ? "" : key.signedOid(), - key.signedTid() == null ? "" : key.signedTid(), - key.signedStart() == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(key.signedStart()), - key.signedExpiry() == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(key.signedExpiry()), - key.signedService() == null ? "" : key.signedService(), - key.signedVersion() == null ? "" : key.signedVersion(), - this.ipRange == null ? new IPRange().toString() : this.ipRange.toString(), - this.protocol == null ? "" : this.protocol.toString(), - this.version == null ? "" : this.version, - resource == null ? "" : resource, - this.snapshotId == null ? "" : this.snapshotId, - this.cacheControl == null ? "" : this.cacheControl, - this.contentDisposition == null ? "" : this.contentDisposition, - this.contentEncoding == null ? "" : this.contentEncoding, - this.contentLanguage == null ? "" : this.contentLanguage, - this.contentType == null ? "" : this.contentType + this.permissions == null ? "" : this.permissions, + this.startTime == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime), + this.expiryTime == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime), + this.canonicalName == null ? "" : this.canonicalName, + key.signedOid() == null ? "" : key.signedOid(), + key.signedTid() == null ? "" : key.signedTid(), + key.signedStart() == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(key.signedStart()), + key.signedExpiry() == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(key.signedExpiry()), + key.signedService() == null ? "" : key.signedService(), + key.signedVersion() == null ? "" : key.signedVersion(), + this.ipRange == null ? new IPRange().toString() : this.ipRange.toString(), + this.protocol == null ? "" : this.protocol.toString(), + this.version == null ? "" : this.version, + this.resource == null ? "" : this.resource, + this.snapshotId == null ? "" : this.snapshotId, + this.cacheControl == null ? "" : this.cacheControl, + this.contentDisposition == null ? "" : this.contentDisposition, + this.contentEncoding == null ? "" : this.contentEncoding, + this.contentLanguage == null ? "" : this.contentLanguage, + this.contentType == null ? "" : this.contentType ); } } diff --git a/storage/client/blob/src/main/java/com/azure/storage/blob/StorageAsyncClient.java b/storage/client/blob/src/main/java/com/azure/storage/blob/StorageAsyncClient.java index cbb5a91600702..b93ee7bd579e7 100644 --- a/storage/client/blob/src/main/java/com/azure/storage/blob/StorageAsyncClient.java +++ b/storage/client/blob/src/main/java/com/azure/storage/blob/StorageAsyncClient.java @@ -21,6 +21,7 @@ import com.azure.storage.blob.models.StorageServiceProperties; import com.azure.storage.blob.models.StorageServiceStats; import com.azure.storage.blob.models.UserDelegationKey; +import com.azure.storage.common.credentials.SharedKeyCredential; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -31,10 +32,9 @@ import static com.azure.storage.blob.Utility.postProcessResponse; /** - * Client to a storage account. It may only be instantiated through a {@link StorageClientBuilder}. - * This class does not hold any state about a particular storage account but is - * instead a convenient way of sending off appropriate requests to the resource on the service. - * It may also be used to construct URLs to blobs and containers. + * Client to a storage account. It may only be instantiated through a {@link StorageClientBuilder}. This class does not + * hold any state about a particular storage account but is instead a convenient way of sending off appropriate requests + * to the resource on the service. It may also be used to construct URLs to blobs and containers. * *

    * This client contains operations on a blob. Operations on a container are available on {@link ContainerAsyncClient} @@ -45,17 +45,17 @@ * information on containers. * *

    - * Note this client is an async client that returns reactive responses from Spring Reactor Core - * project (https://projectreactor.io/). Calling the methods in this client will NOT - * start the actual network operation, until {@code .subscribe()} is called on the reactive response. - * You can simply convert one of these responses to a {@link java.util.concurrent.CompletableFuture} - * object through {@link Mono#toFuture()}. + * Note this client is an async client that returns reactive responses from Spring Reactor Core project + * (https://projectreactor.io/). Calling the methods in this client will NOT start the actual network + * operation, until {@code .subscribe()} is called on the reactive response. You can simply convert one of these + * responses to a {@link java.util.concurrent.CompletableFuture} object through {@link Mono#toFuture()}. */ public final class StorageAsyncClient { private final AzureBlobStorageImpl azureBlobStorage; /** * Package-private constructor for use by {@link StorageClientBuilder}. + * * @param azureBlobStorageBuilder the API client builder for blob storage API */ StorageAsyncClient(AzureBlobStorageBuilder azureBlobStorageBuilder) { @@ -63,13 +63,12 @@ public final class StorageAsyncClient { } /** - * Initializes a {@link ContainerAsyncClient} object pointing to the specified container. This method does not create a - * container. It simply constructs the URL to the container and offers access to methods relevant to containers. + * Initializes a {@link ContainerAsyncClient} object pointing to the specified container. This method does not + * create a container. It simply constructs the URL to the container and offers access to methods relevant to + * containers. * - * @param containerName - * The name of the container to point to. - * @return - * A {@link ContainerAsyncClient} object pointing to the specified container + * @param containerName The name of the container to point to. + * @return A {@link ContainerAsyncClient} object pointing to the specified container */ public ContainerAsyncClient getContainerAsyncClient(String containerName) { return new ContainerAsyncClient(new AzureBlobStorageBuilder() @@ -95,11 +94,9 @@ public Mono> createContainer(String containerName * Azure Docs. * * @param containerName Name of the container to create - * @param metadata - * {@link Metadata} - * @param accessType - * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header - * in the Azure Docs for more information. Pass null for no public access. + * @param metadata {@link Metadata} + * @param accessType Specifies how the data in this container is available to the public. See the + * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. * @return A response containing a {@link ContainerAsyncClient} used to interact with the container created. */ public Mono> createContainer(String containerName, Metadata metadata, PublicAccessType accessType) { @@ -111,6 +108,7 @@ public Mono> createContainer(String containerName /** * Gets the URL of the storage account represented by this client. + * * @return the URL. * @throws RuntimeException If the account URL is malformed. */ @@ -123,25 +121,21 @@ public URL getAccountUrl() { } /** - * Returns a reactive Publisher emitting all the containers in this account lazily as needed. For more information, see - * the Azure Docs. + * Returns a reactive Publisher emitting all the containers in this account lazily as needed. For more information, + * see the Azure Docs. * - * @return - * A reactive response emitting the list of containers. + * @return A reactive response emitting the list of containers. */ public Flux listContainers() { return this.listContainers(new ListContainersOptions()); } /** - * Returns a reactive Publisher emitting all the containers in this account lazily as needed. For more information, see - * the Azure Docs. - * - * @param options - * A {@link ListContainersOptions} which specifies what data should be returned by the service. + * Returns a reactive Publisher emitting all the containers in this account lazily as needed. For more information, + * see the Azure Docs. * - * @return - * A reactive response emitting the list of containers. + * @param options A {@link ListContainersOptions} which specifies what data should be returned by the service. + * @return A reactive response emitting the list of containers. */ public Flux listContainers(ListContainersOptions options) { return listContainersSegment(null, options) @@ -179,7 +173,7 @@ private Mono listContainersSegment(String } private Flux listContainersHelper(String marker, ListContainersOptions options, - ServicesListContainersSegmentResponse response) { + ServicesListContainersSegmentResponse response) { Flux result = Flux.fromIterable(response.value().containerItems()); if (response.value().nextMarker() != null) { // Recursively add the continuation items to the observable. @@ -195,8 +189,7 @@ private Flux listContainersHelper(String marker, ListContainersOp * Gets the properties of a storage account’s Blob service. For more information, see the * Azure Docs. * - * @return - * A reactive response containing the storage account properties. + * @return A reactive response containing the storage account properties. */ public Mono> getProperties() { return postProcessResponse( @@ -210,11 +203,8 @@ public Mono> getProperties() { * Note that setting the default service version has no effect when using this client because this client explicitly * sets the version header on each request, overriding the default. * - * @param properties - * Configures the service. - * - * @return - * A reactive response containing the storage account properties. + * @param properties Configures the service. + * @return A reactive response containing the storage account properties. */ public Mono setProperties(StorageServiceProperties properties) { return postProcessResponse( @@ -223,14 +213,11 @@ public Mono setProperties(StorageServiceProperties properties) { } /** - * Gets a user delegation key for use with this account's blob storage. - * Note: This method call is only valid when using {@link TokenCredential} in this object's {@link HttpPipeline}. - * - * @param start - * Start time for the key's validity. Null indicates immediate start. - * @param expiry - * Expiration of the key's validity. + * Gets a user delegation key for use with this account's blob storage. Note: This method call is only valid when + * using {@link TokenCredential} in this object's {@link HttpPipeline}. * + * @param start Start time for the key's validity. Null indicates immediate start. + * @param expiry Expiration of the key's validity. * @return A reactive response containing the user delegation key. * @throws IllegalArgumentException If {@code start} isn't null and is after {@code expiry}. */ @@ -250,13 +237,12 @@ public Mono> getUserDelegationKey(OffsetDateTime sta } /** - * Retrieves statistics related to replication for the Blob service. It is only available on the secondary - * location endpoint when read-access geo-redundant replication is enabled for the storage account. For more - * information, see the + * Retrieves statistics related to replication for the Blob service. It is only available on the secondary location + * endpoint when read-access geo-redundant replication is enabled for the storage account. For more information, see + * the * Azure Docs. * - * @return - * A reactive response containing the storage account statistics. + * @return A reactive response containing the storage account statistics. */ public Mono> getStatistics() { return postProcessResponse( @@ -268,11 +254,63 @@ public Mono> getStatistics() { * Returns the sku name and account kind for the account. For more information, please see the * Azure Docs. * - * @return - * A reactive response containing the storage account info. + * @return A reactive response containing the storage account info. */ public Mono> getAccountInfo() { return postProcessResponse(this.azureBlobStorage.services().getAccountInfoWithRestResponseAsync(Context.NONE)) .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } + + /** + * Generates an account SAS token with the specified parameters + * + * @param accountSASService The {@code AccountSASService} services for the account SAS + * @param accountSASResourceType An optional {@code AccountSASResourceType} resources for the account SAS + * @param accountSASPermission The {@code AccountSASPermission} permission for the account SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the account SAS + * @return A string that represents the SAS token + */ + public String generateAccountSAS(AccountSASService accountSASService, AccountSASResourceType accountSASResourceType, + AccountSASPermission accountSASPermission, OffsetDateTime expiryTime) { + return this.generateAccountSAS(accountSASService, accountSASResourceType, accountSASPermission, expiryTime, + null /* startTime */, null /* version */, null /* ipRange */, null /* sasProtocol */); + } + + /** + * Generates an account SAS token with the specified parameters + * + * @param accountSASService The {@code AccountSASService} services for the account SAS + * @param accountSASResourceType An optional {@code AccountSASResourceType} resources for the account SAS + * @param accountSASPermission The {@code AccountSASPermission} permission for the account SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the account SAS + * @param startTime The {@code OffsetDateTime} start time for the account SAS + * @param version The {@code String} version for the account SAS + * @param ipRange An optional {@code IPRange} ip address range for the SAS + * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS + * @return A string that represents the SAS token + */ + public String generateAccountSAS(AccountSASService accountSASService, AccountSASResourceType accountSASResourceType, + AccountSASPermission accountSASPermission, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, IPRange ipRange, + SASProtocol sasProtocol) { + + AccountSASSignatureValues accountSASSignatureValues = new AccountSASSignatureValues(); + accountSASSignatureValues.services(accountSASService == null ? null : accountSASService.toString()); + accountSASSignatureValues.resourceTypes(accountSASResourceType == null ? null : accountSASResourceType.toString()); + accountSASSignatureValues.permissions(accountSASPermission == null ? null : accountSASPermission.toString()); + accountSASSignatureValues.expiryTime(expiryTime); + accountSASSignatureValues.startTime(startTime); + + if (version != null) { + accountSASSignatureValues.version(version); + } + + accountSASSignatureValues.ipRange(ipRange); + accountSASSignatureValues.protocol(sasProtocol); + + SharedKeyCredential sharedKeyCredential = Utility.getSharedKeyCredential(this.azureBlobStorage.httpPipeline()); + + SASQueryParameters sasQueryParameters = accountSASSignatureValues.generateSASQueryParameters(sharedKeyCredential); + + return sasQueryParameters.encode(); + } } diff --git a/storage/client/blob/src/main/java/com/azure/storage/blob/StorageClient.java b/storage/client/blob/src/main/java/com/azure/storage/blob/StorageClient.java index cc4c6b71c56fa..1511726d653bb 100644 --- a/storage/client/blob/src/main/java/com/azure/storage/blob/StorageClient.java +++ b/storage/client/blob/src/main/java/com/azure/storage/blob/StorageClient.java @@ -24,14 +24,13 @@ import java.time.OffsetDateTime; /** - * Client to a storage account. It may only be instantiated through a {@link StorageClientBuilder}. - * This class does not hold any state about a particular storage account but is - * instead a convenient way of sending off appropriate requests to the resource on the service. - * It may also be used to construct URLs to blobs and containers. + * Client to a storage account. It may only be instantiated through a {@link StorageClientBuilder}. This class does not + * hold any state about a particular storage account but is instead a convenient way of sending off appropriate requests + * to the resource on the service. It may also be used to construct URLs to blobs and containers. * *

    - * This client contains operations on a blob. Operations on a container are available on {@link ContainerClient} - * through {@link #getContainerClient(String)}, and operations on a blob are available on {@link BlobClient}. + * This client contains operations on a blob. Operations on a container are available on {@link ContainerClient} through + * {@link #getContainerClient(String)}, and operations on a blob are available on {@link BlobClient}. * *

    * Please see here for more @@ -42,6 +41,7 @@ public final class StorageClient { /** * Package-private constructor for use by {@link StorageClientBuilder}. + * * @param storageAsyncClient the async storage account client */ StorageClient(StorageAsyncClient storageAsyncClient) { @@ -52,10 +52,8 @@ public final class StorageClient { * Initializes a {@link ContainerClient} object pointing to the specified container. This method does not create a * container. It simply constructs the URL to the container and offers access to methods relevant to containers. * - * @param containerName - * The name of the container to point to. - * @return - * A {@link ContainerClient} object pointing to the specified container + * @param containerName The name of the container to point to. + * @return A {@link ContainerClient} object pointing to the specified container */ public ContainerClient getContainerClient(String containerName) { return new ContainerClient(storageAsyncClient.getContainerAsyncClient(containerName)); @@ -79,11 +77,9 @@ public Response createContainer(String containerName) { * Azure Docs. * * @param containerName Name of the container to create - * @param metadata - * {@link Metadata} - * @param accessType - * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header - * in the Azure Docs for more information. Pass null for no public access. + * @param metadata {@link Metadata} + * @param accessType Specifies how the data in this container is available to the public. See the + * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. * @return A response containing a {@link ContainerClient} used to interact with the container created. */ public Response createContainer(String containerName, Metadata metadata, PublicAccessType accessType) { @@ -94,6 +90,7 @@ public Response createContainer(String containerName, Metadata /** * Gets the URL of the storage account represented by this client. + * * @return the URL. */ public URL getAccountUrl() { @@ -101,29 +98,24 @@ public URL getAccountUrl() { } /** - * Returns a lazy loaded list of containers in this account. The returned {@link Iterable} can be iterated - * through while new items are automatically retrieved as needed. For more information, see - * the Azure Docs. + * Returns a lazy loaded list of containers in this account. The returned {@link Iterable} can be iterated through + * while new items are automatically retrieved as needed. For more information, see the Azure Docs. * - * @return - * The list of containers. + * @return The list of containers. */ public Iterable listContainers() { return this.listContainers(new ListContainersOptions(), null); } /** - * Returns a lazy loaded list of containers in this account. The returned {@link Iterable} can be iterated - * through while new items are automatically retrieved as needed. For more information, see - * the Azure Docs. - * - * @param options - * A {@link ListContainersOptions} which specifies what data should be returned by the service. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * Returns a lazy loaded list of containers in this account. The returned {@link Iterable} can be iterated through + * while new items are automatically retrieved as needed. For more information, see the Azure Docs. * - * @return - * The list of containers. + * @param options A {@link ListContainersOptions} which specifies what data should be returned by the service. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The list of containers. */ public Iterable listContainers(ListContainersOptions options, Duration timeout) { Flux response = storageAsyncClient.listContainers(options); @@ -135,8 +127,7 @@ public Iterable listContainers(ListContainersOptions options, Dur * Gets the properties of a storage account’s Blob service. For more information, see the * Azure Docs. * - * @return - * The storage account properties. + * @return The storage account properties. */ public Response getProperties() { return this.getProperties(null); @@ -146,11 +137,8 @@ public Response getProperties() { * Gets the properties of a storage account’s Blob service. For more information, see the * Azure Docs. * - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * - * @return - * The storage account properties. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The storage account properties. */ public Response getProperties(Duration timeout) { @@ -165,11 +153,8 @@ public Response getProperties(Duration timeout) { * Note that setting the default service version has no effect when using this client because this client explicitly * sets the version header on each request, overriding the default. * - * @param properties - * Configures the service. - * - * @return - * The storage account properties. + * @param properties Configures the service. + * @return The storage account properties. */ public VoidResponse setProperties(StorageServiceProperties properties) { return this.setProperties(properties, null); @@ -181,13 +166,9 @@ public VoidResponse setProperties(StorageServiceProperties properties) { * Note that setting the default service version has no effect when using this client because this client explicitly * sets the version header on each request, overriding the default. * - * @param properties - * Configures the service. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * - * @return - * The storage account properties. + * @param properties Configures the service. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The storage account properties. */ public VoidResponse setProperties(StorageServiceProperties properties, Duration timeout) { Mono response = storageAsyncClient.setProperties(properties); @@ -196,66 +177,53 @@ public VoidResponse setProperties(StorageServiceProperties properties, Duration } /** - * Gets a user delegation key for use with this account's blob storage. - * Note: This method call is only valid when using {@link TokenCredential} in this object's {@link HttpPipeline}. - * - * @param start - * Start time for the key's validity. Null indicates immediate start. - * @param expiry - * Expiration of the key's validity. + * Gets a user delegation key for use with this account's blob storage. Note: This method call is only valid when + * using {@link TokenCredential} in this object's {@link HttpPipeline}. * - * @return - * The user delegation key. + * @param start Start time for the key's validity. Null indicates immediate start. + * @param expiry Expiration of the key's validity. + * @return The user delegation key. */ public Response getUserDelegationKey(OffsetDateTime start, OffsetDateTime expiry) { return this.getUserDelegationKey(start, expiry, null); } /** - * Gets a user delegation key for use with this account's blob storage. - * Note: This method call is only valid when using {@link TokenCredential} in this object's {@link HttpPipeline}. - * - * @param start - * Start time for the key's validity. Null indicates immediate start. - * @param expiry - * Expiration of the key's validity. - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * Gets a user delegation key for use with this account's blob storage. Note: This method call is only valid when + * using {@link TokenCredential} in this object's {@link HttpPipeline}. * - * @return - * The user delegation key. + * @param start Start time for the key's validity. Null indicates immediate start. + * @param expiry Expiration of the key's validity. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The user delegation key. */ public Response getUserDelegationKey(OffsetDateTime start, OffsetDateTime expiry, - Duration timeout) { + Duration timeout) { Mono> response = storageAsyncClient.getUserDelegationKey(start, expiry); return Utility.blockWithOptionalTimeout(response, timeout); } /** - * Retrieves statistics related to replication for the Blob service. It is only available on the secondary - * location endpoint when read-access geo-redundant replication is enabled for the storage account. For more - * information, see the + * Retrieves statistics related to replication for the Blob service. It is only available on the secondary location + * endpoint when read-access geo-redundant replication is enabled for the storage account. For more information, see + * the * Azure Docs. * - * @return - * The storage account statistics. + * @return The storage account statistics. */ public Response getStatistics() { return this.getStatistics(null); } /** - * Retrieves statistics related to replication for the Blob service. It is only available on the secondary - * location endpoint when read-access geo-redundant replication is enabled for the storage account. For more - * information, see the + * Retrieves statistics related to replication for the Blob service. It is only available on the secondary location + * endpoint when read-access geo-redundant replication is enabled for the storage account. For more information, see + * the * Azure Docs. * - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * - * @return - * The storage account statistics. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The storage account statistics. */ public Response getStatistics(Duration timeout) { Mono> response = storageAsyncClient.getStatistics(); @@ -267,8 +235,7 @@ public Response getStatistics(Duration timeout) { * Returns the sku name and account kind for the account. For more information, please see the * Azure Docs. * - * @return - * The storage account info. + * @return The storage account info. */ public Response getAccountInfo() { return this.getAccountInfo(null); @@ -278,15 +245,45 @@ public Response getAccountInfo() { * Returns the sku name and account kind for the account. For more information, please see the * Azure Docs. * - * @param timeout - * An optional timeout value beyond which a {@link RuntimeException} will be raised. - * - * @return - * The storage account info. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The storage account info. */ public Response getAccountInfo(Duration timeout) { Mono> response = storageAsyncClient.getAccountInfo(); return Utility.blockWithOptionalTimeout(response, timeout); } + + /** + * Generates an account SAS token with the specified parameters + * + * @param accountSASService The {@code AccountSASService} services for the account SAS + * @param accountSASResourceType An optional {@code AccountSASResourceType} resources for the account SAS + * @param accountSASPermission The {@code AccountSASPermission} permission for the account SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the account SAS + * @return A string that represents the SAS token + */ + public String generateAccountSAS(AccountSASService accountSASService, AccountSASResourceType accountSASResourceType, + AccountSASPermission accountSASPermission, OffsetDateTime expiryTime) { + return this.storageAsyncClient.generateAccountSAS(accountSASService, accountSASResourceType, accountSASPermission, expiryTime); + } + + /** + * Generates an account SAS token with the specified parameters + * + * @param accountSASService The {@code AccountSASService} services for the account SAS + * @param accountSASResourceType An optional {@code AccountSASResourceType} resources for the account SAS + * @param accountSASPermission The {@code AccountSASPermission} permission for the account SAS + * @param expiryTime The {@code OffsetDateTime} expiry time for the account SAS + * @param startTime The {@code OffsetDateTime} start time for the account SAS + * @param version The {@code String} version for the account SAS + * @param ipRange An optional {@code IPRange} ip address range for the SAS + * @param sasProtocol An optional {@code SASProtocol} protocol for the SAS + * @return A string that represents the SAS token + */ + public String generateAccountSAS(AccountSASService accountSASService, AccountSASResourceType accountSASResourceType, + AccountSASPermission accountSASPermission, OffsetDateTime expiryTime, OffsetDateTime startTime, String version, IPRange ipRange, + SASProtocol sasProtocol) { + return this.storageAsyncClient.generateAccountSAS(accountSASService, accountSASResourceType, accountSASPermission, expiryTime, startTime, version, ipRange, sasProtocol); + } } diff --git a/storage/client/blob/src/main/java/com/azure/storage/blob/Utility.java b/storage/client/blob/src/main/java/com/azure/storage/blob/Utility.java index 3b5d073c4023e..252a5e83bbaf2 100644 --- a/storage/client/blob/src/main/java/com/azure/storage/blob/Utility.java +++ b/storage/client/blob/src/main/java/com/azure/storage/blob/Utility.java @@ -5,9 +5,13 @@ import com.azure.core.http.HttpHeader; import com.azure.core.http.HttpHeaders; +import com.azure.core.http.HttpPipeline; +import com.azure.core.http.policy.HttpPipelinePolicy; import com.azure.core.implementation.http.UrlBuilder; import com.azure.storage.blob.models.StorageErrorException; import com.azure.storage.blob.models.UserDelegationKey; +import com.azure.storage.common.credentials.SharedKeyCredential; +import com.azure.storage.common.policy.SharedKeyCredentialPolicy; import reactor.core.publisher.Mono; import reactor.util.annotation.Nullable; @@ -387,4 +391,23 @@ static T blockWithOptionalTimeout(Mono response, @Nullable Duration timeo return response.block(timeout); } } + + /** + * Gets the SharedKeyCredential from the HttpPipeline + * + * @param httpPipeline + * The {@code HttpPipeline} httpPipeline from which a sharedKeyCredential will be extracted + * + * @return The {@code SharedKeyCredential} sharedKeyCredential in the httpPipeline + */ + static SharedKeyCredential getSharedKeyCredential(HttpPipeline httpPipeline) { + for (int i = 0; i < httpPipeline.getPolicyCount(); i++) { + HttpPipelinePolicy httpPipelinePolicy = httpPipeline.getPolicy(i); + if (httpPipelinePolicy instanceof SharedKeyCredentialPolicy) { + SharedKeyCredentialPolicy sharedKeyCredentialPolicy = (SharedKeyCredentialPolicy) httpPipelinePolicy; + return sharedKeyCredentialPolicy.sharedKeyCredential(); + } + } + return null; + } } diff --git a/storage/client/blob/src/main/java/com/azure/storage/blob/implementation/AppendBlobsImpl.java b/storage/client/blob/src/main/java/com/azure/storage/blob/implementation/AppendBlobsImpl.java index 77a1c79599e38..a6f486ff075f7 100644 --- a/storage/client/blob/src/main/java/com/azure/storage/blob/implementation/AppendBlobsImpl.java +++ b/storage/client/blob/src/main/java/com/azure/storage/blob/implementation/AppendBlobsImpl.java @@ -29,12 +29,11 @@ import com.azure.storage.blob.models.SourceModifiedAccessConditions; import com.azure.storage.blob.models.StorageErrorException; import io.netty.buffer.ByteBuf; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; - import java.net.URL; import java.time.OffsetDateTime; import java.util.Map; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; /** * An instance of this class provides access to all the operations defined in diff --git a/storage/client/blob/src/main/java/com/azure/storage/blob/implementation/BlobsImpl.java b/storage/client/blob/src/main/java/com/azure/storage/blob/implementation/BlobsImpl.java index bcdb0de6a2d7f..e89e8972aed35 100644 --- a/storage/client/blob/src/main/java/com/azure/storage/blob/implementation/BlobsImpl.java +++ b/storage/client/blob/src/main/java/com/azure/storage/blob/implementation/BlobsImpl.java @@ -45,11 +45,10 @@ import com.azure.storage.blob.models.ModifiedAccessConditions; import com.azure.storage.blob.models.SourceModifiedAccessConditions; import com.azure.storage.blob.models.StorageErrorException; -import reactor.core.publisher.Mono; - import java.net.URL; import java.time.OffsetDateTime; import java.util.Map; +import reactor.core.publisher.Mono; /** * An instance of this class provides access to all the operations defined in diff --git a/storage/client/blob/src/main/java/com/azure/storage/blob/models/package-info.java b/storage/client/blob/src/main/java/com/azure/storage/blob/models/package-info.java index 653e8a46d1d67..a078397789b87 100644 --- a/storage/client/blob/src/main/java/com/azure/storage/blob/models/package-info.java +++ b/storage/client/blob/src/main/java/com/azure/storage/blob/models/package-info.java @@ -3,6 +3,6 @@ // Code generated by Microsoft (R) AutoRest Code Generator. /** - * This package contains the data models for AzureBlobStorage. + * Package containing the data models for AzureBlobStorage. */ package com.azure.storage.blob.models; diff --git a/storage/client/blob/src/main/java/com/azure/storage/blob/package-info.java b/storage/client/blob/src/main/java/com/azure/storage/blob/package-info.java index 77e28d3d565ae..6048911e2d091 100644 --- a/storage/client/blob/src/main/java/com/azure/storage/blob/package-info.java +++ b/storage/client/blob/src/main/java/com/azure/storage/blob/package-info.java @@ -3,7 +3,6 @@ // Code generated by Microsoft (R) AutoRest Code Generator /** - * This package contains the classes for StorageClient. - * Storage Client. + * Package containing the classes for StorageClient. */ package com.azure.storage.blob; diff --git a/storage/client/blob/src/main/java/com/azure/storage/common/credentials/SASTokenCredential.java b/storage/client/blob/src/main/java/com/azure/storage/common/credentials/SASTokenCredential.java index fe93273e3f64e..9e7d5f2691e9c 100644 --- a/storage/client/blob/src/main/java/com/azure/storage/common/credentials/SASTokenCredential.java +++ b/storage/client/blob/src/main/java/com/azure/storage/common/credentials/SASTokenCredential.java @@ -18,16 +18,32 @@ public final class SASTokenCredential { private static final String SIGNED_PERMISSIONS = "sp"; private static final String SIGNED_EXPIRY = "se"; private static final String SIGNATURE = "sig"; + private static final String SIGNED_RESOURCE = "sr"; // Optional SAS token pieces private static final String SIGNED_START = "st"; private static final String SIGNED_PROTOCOL = "spr"; private static final String SIGNED_IP = "sip"; + private static final String CACHE_CONTROL = "rscc"; + private static final String CONTENT_DISPOSITION = "rscd"; + private static final String CONTENT_ENCODING = "rsce"; + private static final String CONTENT_LANGUAGE = "rscl"; + private static final String CONTENT_TYPE = "rsct"; + + // Possible User Delegation Key pieces + private static final String SIGNED_KEY_O_ID = "skoid"; + private static final String SIGNED_KEY_T_ID = "sktid"; + private static final String SIGNED_KEY_START = "skt"; + private static final String SIGNED_KEY_EXPIRY = "ske"; + private static final String SIGNED_KEY_SERVICE = "sks"; + private static final String SIGNED_KEY_VERSION = "skv"; + private final String sasToken; /** * Creates a SAS token credential from the passed SAS token. + * * @param sasToken SAS token used to authenticate requests with the service. */ public SASTokenCredential(String sasToken) { @@ -43,6 +59,7 @@ public String sasToken() { /** * Creates a SAS token credential from the passed URL query string + * * @param query URL query used to build the SAS token * @return a SAS token credential if the query param contains all the necessary pieces */ @@ -57,20 +74,36 @@ public static SASTokenCredential fromQuery(String query) { queryParams.put(key, queryParam); } - if (queryParams.size() < 6 - || !queryParams.containsKey(SIGNED_VERSION) - || !queryParams.containsKey(SIGNED_SERVICES) - || !queryParams.containsKey(SIGNED_RESOURCE_TYPES) - || !queryParams.containsKey(SIGNED_PERMISSIONS) - || !queryParams.containsKey(SIGNED_EXPIRY) - || !queryParams.containsKey(SIGNATURE)) { + /* Because ServiceSAS only requires expiry and permissions, both of which could be on the container + acl, the only guaranteed indication of a SAS is the signature. We'll let the service validate + the other query parameters. */ + if (!queryParams.containsKey(SIGNATURE)) { return null; } - StringBuilder sasTokenBuilder = new StringBuilder(queryParams.get(SIGNED_VERSION)) - .append("&").append(queryParams.get(SIGNED_SERVICES)) - .append("&").append(queryParams.get(SIGNED_RESOURCE_TYPES)) - .append("&").append(queryParams.get(SIGNED_PERMISSIONS)); + StringBuilder sasTokenBuilder = new StringBuilder(); + + tryToAddQueryParameter(SIGNED_VERSION, queryParams, sasTokenBuilder) + + if (queryParams.containsKey(SIGNED_VERSION)) { + sasTokenBuilder.append(queryParams.get(SIGNED_VERSION)); + } + + if (queryParams.containsKey(SIGNED_SERVICES)) { + sasTokenBuilder.append("&").append(queryParams.get(SIGNED_SERVICES)); + } + + if (queryParams.containsKey(SIGNED_RESOURCE_TYPES)) { + sasTokenBuilder.append("&").append(queryParams.get(SIGNED_RESOURCE_TYPES)); + } + + if (queryParams.containsKey(SIGNED_PERMISSIONS)) { + sasTokenBuilder.append("&").append(queryParams.get(SIGNED_PERMISSIONS)); + } + + if (queryParams.containsKey(SIGNED_RESOURCE)) { + sasTokenBuilder.append("&").append(queryParams.get(SIGNED_RESOURCE)); + } // SIGNED_START is optional if (queryParams.containsKey(SIGNED_START)) { @@ -89,6 +122,51 @@ public static SASTokenCredential fromQuery(String query) { sasTokenBuilder.append("&").append(queryParams.get(SIGNED_PROTOCOL)); } + if (queryParams.containsKey(CACHE_CONTROL)) { + sasTokenBuilder.append("&").append(queryParams.get(CACHE_CONTROL)); + } + + if (queryParams.containsKey(CONTENT_DISPOSITION)) { + sasTokenBuilder.append("&").append(queryParams.get(CONTENT_DISPOSITION)); + } + + if (queryParams.containsKey(CONTENT_ENCODING)) { + sasTokenBuilder.append("&").append(queryParams.get(CONTENT_ENCODING)); + } + + if (queryParams.containsKey(CONTENT_LANGUAGE)) { + sasTokenBuilder.append("&").append(queryParams.get(CONTENT_LANGUAGE)); + } + + if (queryParams.containsKey(CONTENT_TYPE)) { + sasTokenBuilder.append("&").append(queryParams.get(CONTENT_TYPE)); + } + + // User Delegation Key Parameters + if (queryParams.containsKey(SIGNED_KEY_O_ID)) { + sasTokenBuilder.append("&").append(queryParams.get(SIGNED_KEY_O_ID)); + } + + if (queryParams.containsKey(SIGNED_KEY_T_ID)) { + sasTokenBuilder.append("&").append(queryParams.get(SIGNED_KEY_T_ID)); + } + + if (queryParams.containsKey(SIGNED_KEY_START)) { + sasTokenBuilder.append("&").append(queryParams.get(SIGNED_KEY_START)); + } + + if (queryParams.containsKey(SIGNED_KEY_EXPIRY)) { + sasTokenBuilder.append("&").append(queryParams.get(SIGNED_KEY_EXPIRY)); + } + + if (queryParams.containsKey(SIGNED_KEY_SERVICE)) { + sasTokenBuilder.append("&").append(queryParams.get(SIGNED_KEY_SERVICE)); + } + + if (queryParams.containsKey(SIGNED_KEY_VERSION)) { + sasTokenBuilder.append("&").append(queryParams.get(SIGNED_KEY_VERSION)); + } + sasTokenBuilder.append("&").append(queryParams.get(SIGNATURE)); return new SASTokenCredential(sasTokenBuilder.toString()); diff --git a/storage/client/blob/src/main/java/com/azure/storage/common/credentials/package-info.java b/storage/client/blob/src/main/java/com/azure/storage/common/credentials/package-info.java index b03314b4cc0b0..b7e3c73a85a80 100644 --- a/storage/client/blob/src/main/java/com/azure/storage/common/credentials/package-info.java +++ b/storage/client/blob/src/main/java/com/azure/storage/common/credentials/package-info.java @@ -2,6 +2,6 @@ // Licensed under the MIT License. /** - * This package contains credentials used by Azure Storage services. + * Package containing credentials used by Azure Storage services. */ package com.azure.storage.common.credentials; diff --git a/storage/client/blob/src/main/java/com/azure/storage/common/policy/SharedKeyCredentialPolicy.java b/storage/client/blob/src/main/java/com/azure/storage/common/policy/SharedKeyCredentialPolicy.java index 8ee1284591dd2..e8b54914cb762 100644 --- a/storage/client/blob/src/main/java/com/azure/storage/common/policy/SharedKeyCredentialPolicy.java +++ b/storage/client/blob/src/main/java/com/azure/storage/common/policy/SharedKeyCredentialPolicy.java @@ -18,12 +18,20 @@ public final class SharedKeyCredentialPolicy implements HttpPipelinePolicy { /** * Creates a SharedKey pipeline policy that adds the SharedKey into the request's authorization header. + * * @param credential the SharedKey credential used to create the policy. */ public SharedKeyCredentialPolicy(SharedKeyCredential credential) { this.credential = credential; } + /** + * @return the {@link SharedKeyCredential} linked to the policy. + */ + public SharedKeyCredential sharedKeyCredential() { + return this.credential; + } + @Override public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { String authorizationValue = credential.generateAuthorizationHeader(context.httpRequest().url(), diff --git a/storage/client/blob/src/main/java/com/azure/storage/common/policy/package-info.java b/storage/client/blob/src/main/java/com/azure/storage/common/policy/package-info.java index 6f36065ea580b..35661c94f0454 100644 --- a/storage/client/blob/src/main/java/com/azure/storage/common/policy/package-info.java +++ b/storage/client/blob/src/main/java/com/azure/storage/common/policy/package-info.java @@ -2,6 +2,6 @@ // Licensed under the MIT License. /** - * This package contains policies used by Azure Storage services. + * Package containing policies used by Azure Storage services. */ package com.azure.storage.common.policy; diff --git a/storage/client/blob/src/test/java/com/azure/storage/blob/SASTest.groovy b/storage/client/blob/src/test/java/com/azure/storage/blob/SASTest.groovy new file mode 100644 index 0000000000000..1ebf9860f2ac3 --- /dev/null +++ b/storage/client/blob/src/test/java/com/azure/storage/blob/SASTest.groovy @@ -0,0 +1,1079 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob + +import com.azure.storage.blob.models.AccessPolicy +import com.azure.storage.blob.models.BlobRange +import com.azure.storage.blob.models.SignedIdentifier +import com.azure.storage.blob.models.StorageErrorCode +import com.azure.storage.blob.models.UserDelegationKey +import com.azure.storage.common.credentials.SASTokenCredential +import com.azure.storage.common.credentials.SharedKeyCredential +import spock.lang.Unroll + +import java.time.LocalDateTime +import java.time.OffsetDateTime +import java.time.ZoneOffset + +class SASTest extends APISpec { + + def "responseError"() { + when: + cu.listBlobsFlat() + + then: + def e = thrown(StorageException) + e.errorCode() == StorageErrorCode.INVALID_QUERY_PARAMETER_VALUE + e.statusCode() == 400 + e.message().contains("Value for one of the query parameters specified in the request URI is invalid.") + e.getMessage().contains("