process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
- HttpRequest httpRequest = context.httpRequest();
- boolean considerSecondary = (httpRequest.httpMethod().equals(HttpMethod.GET)
- || httpRequest.httpMethod().equals(HttpMethod.HEAD))
- && (this.requestRetryOptions.secondaryHost() != null);
+ boolean considerSecondary = (this.requestRetryOptions.secondaryHost() != null)
+ && (HttpMethod.GET.equals(context.httpRequest().httpMethod()) || HttpMethod.HEAD.equals(context.httpRequest().httpMethod()));
- return this.attemptAsync(httpRequest, next, 1, considerSecondary, 1);
+ return attemptAsync(context, next, context.httpRequest(), considerSecondary, 1, 1);
}
/**
- * This method actually attempts to send the request and determines if we should attempt again and, if so, how
- * long to wait before sending out the next request.
+ * This method actually attempts to send the request and determines if we should attempt again and, if so, how long
+ * to wait before sending out the next request.
*
- * Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) When to retry: connection failure
- * or an HTTP status code of 500 or greater, except 501 and 505 If using a secondary: Odd tries go against
- * primary; even tries go against the secondary For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8,
- * 1.2) If secondary gets a 404, don't fail, retry but future retries are only against the primary When retrying
- * against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2))
+ * Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) When to retry: connection failure or
+ * an HTTP status code of 500 or greater, except 501 and 505 If using a secondary: Odd tries go against primary;
+ * even tries go against the secondary For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2) If
+ * secondary gets a 404, don't fail, retry but future retries are only against the primary When retrying against a
+ * secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2))
*
- * @param httpRequest
- * The request to try.
- * @param primaryTry
- * This indicates how man tries we've attempted against the primary DC.
- * @param considerSecondary
- * Before each try, we'll select either the primary or secondary URL if appropriate.
- * @param attempt
- * This indicates the total number of attempts to send the request.
- *
- * @return A single containing either the successful response or an error that was not retryable because either
- * the maxTries was exceeded or retries will not mitigate the issue.
+ * @param context The request to try.
+ * @param next The next policy to apply to the request
+ * @param originalRequest The unmodified original request
+ * @param primaryTry This indicates how man tries we've attempted against the primary DC.
+ * @param attempt This indicates the total number of attempts to send the request.
+ * @return A single containing either the successful response or an error that was not retryable because either the
+ * maxTries was exceeded or retries will not mitigate the issue.
*/
- private Mono attemptAsync(final HttpRequest httpRequest, HttpPipelineNextPolicy next, final int primaryTry,
- final boolean considerSecondary,
- final int attempt) {
-
+ private Mono attemptAsync(final HttpPipelineCallContext context, HttpPipelineNextPolicy next, final HttpRequest originalRequest,
+ boolean considerSecondary, final int primaryTry, final int attempt) {
// Determine which endpoint to try. It's primary if there is no secondary or if it is an odd number attempt.
final boolean tryingPrimary = !considerSecondary || (attempt % 2 != 0);
@@ -96,14 +88,14 @@ stream, the buffers that were emitted will have already been consumed (their pos
ByteBuffers downstream will only actually consume a duplicate so the original is preserved. This only
duplicates the ByteBuffer object, not the underlying data.
*/
- Flux bufferedBody = httpRequest.body() == null
- ? null : httpRequest.body().map(ByteBuf::duplicate);
- httpRequest.body(bufferedBody);
+ context.httpRequest(originalRequest.buffer());
+ Flux bufferedBody = (context.httpRequest().body() == null) ? null : context.httpRequest().body().map(ByteBuf::duplicate);
+ context.httpRequest().body(bufferedBody);
if (!tryingPrimary) {
- UrlBuilder builder = UrlBuilder.parse(httpRequest.url());
+ UrlBuilder builder = UrlBuilder.parse(context.httpRequest().url());
builder.host(this.requestRetryOptions.secondaryHost());
try {
- httpRequest.url(builder.toURL());
+ context.httpRequest().url(builder.toURL());
} catch (MalformedURLException e) {
return Mono.error(e);
}
@@ -114,55 +106,53 @@ stream, the buffers that were emitted will have already been consumed (their pos
until after the retry backoff delay, so we call delaySubscription.
*/
return next.clone().process()
- .timeout(Duration.ofSeconds(this.requestRetryOptions.tryTimeout()))
- .delaySubscription(Duration.ofMillis(delayMs))
- .flatMap(response -> {
- boolean newConsiderSecondary = considerSecondary;
- String action;
- int statusCode = response.statusCode();
+ .timeout(Duration.ofSeconds(this.requestRetryOptions.tryTimeout()))
+ .delaySubscription(Duration.ofMillis(delayMs))
+ .flatMap(response -> {
+ boolean newConsiderSecondary = considerSecondary;
+ String action;
+ int statusCode = response.statusCode();
/*
If attempt was against the secondary & it returned a StatusNotFound (404), then the
resource was not found. This may be due to replication delay. So, in this case,
we'll never try the secondary again for this operation.
*/
- if (!tryingPrimary && statusCode == 404) {
- newConsiderSecondary = false;
- action = "Retry: Secondary URL returned 404";
- } else if (statusCode == 503 || statusCode == 500) {
- action = "Retry: Temporary error or server timeout";
- } else {
- action = "NoRetry: Successful HTTP request";
- }
-
- if (action.charAt(0) == 'R' && attempt < requestRetryOptions.maxTries()) {
+ if (!tryingPrimary && statusCode == 404) {
+ newConsiderSecondary = false;
+ action = "Retry: Secondary URL returned 404";
+ } else if (statusCode == 503 || statusCode == 500) {
+ action = "Retry: Temporary error or server timeout";
+ } else {
+ action = "NoRetry: Successful HTTP request";
+ }
+
+ if (action.charAt(0) == 'R' && attempt < requestRetryOptions.maxTries()) {
/*
We increment primaryTry if we are about to try the primary again (which is when we
consider the secondary and tried the secondary this time (tryingPrimary==false) or
we do not consider the secondary at all (considerSecondary==false)). This will
ensure primaryTry is correct when passed to calculate the delay.
*/
- int newPrimaryTry = !tryingPrimary || !considerSecondary
- ? primaryTry + 1 : primaryTry;
- return attemptAsync(httpRequest, next, newPrimaryTry, newConsiderSecondary,
- attempt + 1);
- }
- return Mono.just(response);
- })
- .onErrorResume(throwable -> {
+ int newPrimaryTry = (!tryingPrimary || !considerSecondary) ? primaryTry + 1 : primaryTry;
+ return attemptAsync(context, next, originalRequest, newConsiderSecondary, newPrimaryTry, attempt + 1);
+ }
+
+ return Mono.just(response);
+ }).onErrorResume(throwable -> {
/*
It is likely that many users will not realize that their Flux must be replayable and
get an error upon retries when the provided data length does not match the length of the exact
data. We cannot enforce the desired Flux behavior, so we provide a hint when this is likely
the root cause.
*/
- if (throwable instanceof IllegalStateException && attempt > 1) {
- return Mono.error(new IllegalStateException("The request failed because the "
- + "size of the contents of the provided Flux did not match the provided "
- + "data size upon attempting to retry. This is likely caused by the Flux "
- + "not being replayable. To support retries, all Fluxes must produce the "
- + "same data for each subscriber. Please ensure this behavior.", throwable));
- }
+ if (throwable instanceof IllegalStateException && attempt > 1) {
+ return Mono.error(new IllegalStateException("The request failed because the "
+ + "size of the contents of the provided Flux did not match the provided "
+ + "data size upon attempting to retry. This is likely caused by the Flux "
+ + "not being replayable. To support retries, all Fluxes must produce the "
+ + "same data for each subscriber. Please ensure this behavior.", throwable));
+ }
/*
IOException is a catch-all for IO related errors. Technically it includes many types which may
@@ -170,28 +160,27 @@ we do not consider the secondary at all (considerSecondary==false)). This will
either case, it is better to optimistically retry instead of failing too soon.
A Timeout Exception is a client-side timeout coming from Rx.
*/
- String action;
- if (throwable instanceof IOException) {
- action = "Retry: Network error";
- } else if (throwable instanceof TimeoutException) {
- action = "Retry: Client timeout";
- } else {
- action = "NoRetry: Unknown error";
- }
-
- if (action.charAt(0) == 'R' && attempt < requestRetryOptions.maxTries()) {
+ String action;
+ if (throwable instanceof IOException) {
+ action = "Retry: Network error";
+ } else if (throwable instanceof TimeoutException) {
+ action = "Retry: Client timeout";
+ } else {
+ action = "NoRetry: Unknown error";
+ }
+
+ if (action.charAt(0) == 'R' && attempt < requestRetryOptions.maxTries()) {
/*
We increment primaryTry if we are about to try the primary again (which is when we
consider the secondary and tried the secondary this time (tryingPrimary==false) or
we do not consider the secondary at all (considerSecondary==false)). This will
ensure primaryTry is correct when passed to calculate the delay.
*/
- int newPrimaryTry = !tryingPrimary || !considerSecondary
- ? primaryTry + 1 : primaryTry;
- return attemptAsync(httpRequest, next, newPrimaryTry, considerSecondary,
- attempt + 1);
- }
- return Mono.error(throwable);
- });
+ int newPrimaryTry = (!tryingPrimary || !considerSecondary) ? primaryTry + 1 : primaryTry;
+ return attemptAsync(context, next, originalRequest, considerSecondary, newPrimaryTry, attempt + 1);
+ }
+
+ return Mono.error(throwable);
+ });
}
}
diff --git a/sdk/storage/azure-storage-blob/src/samples/java/com/azure/storage/blob/BlobAsyncClientJavaDocCodeSnippets.java b/sdk/storage/azure-storage-blob/src/samples/java/com/azure/storage/blob/BlobAsyncClientJavaDocCodeSnippets.java
index 3b82eabc7bcfc..7f0e2ca99eca7 100644
--- a/sdk/storage/azure-storage-blob/src/samples/java/com/azure/storage/blob/BlobAsyncClientJavaDocCodeSnippets.java
+++ b/sdk/storage/azure-storage-blob/src/samples/java/com/azure/storage/blob/BlobAsyncClientJavaDocCodeSnippets.java
@@ -121,7 +121,7 @@ public void download() {
// END: com.azure.storage.blob.BlobAsyncClient.download
// BEGIN: com.azure.storage.blob.BlobAsyncClient.download#BlobRange-ReliableDownloadOptions-BlobAccessConditions-boolean
- BlobRange range = new BlobRange(1024, 2048);
+ BlobRange range = new BlobRange(1024, 2048L);
ReliableDownloadOptions options = new ReliableDownloadOptions().maxRetryRequests(5);
client.download(range, options, null, false).subscribe(response -> {
@@ -147,7 +147,7 @@ public void downloadToFile() {
// END: com.azure.storage.blob.BlobAsyncClient.downloadToFile#String
// BEGIN: com.azure.storage.blob.BlobAsyncClient.downloadToFile#String-BlobRange-Integer-ReliableDownloadOptions-BlobAccessConditions-boolean
- BlobRange range = new BlobRange(1024, 2048);
+ BlobRange range = new BlobRange(1024, 2048L);
ReliableDownloadOptions options = new ReliableDownloadOptions().maxRetryRequests(5);
client.downloadToFile(file, range, null, options, null, false)
diff --git a/sdk/storage/azure-storage-blob/src/samples/java/com/azure/storage/blob/BlobClientJavaDocCodeSnippets.java b/sdk/storage/azure-storage-blob/src/samples/java/com/azure/storage/blob/BlobClientJavaDocCodeSnippets.java
index d99b83f910ad2..e19f92dc1a6b9 100644
--- a/sdk/storage/azure-storage-blob/src/samples/java/com/azure/storage/blob/BlobClientJavaDocCodeSnippets.java
+++ b/sdk/storage/azure-storage-blob/src/samples/java/com/azure/storage/blob/BlobClientJavaDocCodeSnippets.java
@@ -115,7 +115,7 @@ public void download() {
// END: com.azure.storage.blob.BlobClient.download#OutputStream
// BEGIN: com.azure.storage.blob.BlobClient.download#OutputStream-BlobRange-ReliableDownloadOptions-BlobAccessConditions-boolean-Duration
- BlobRange range = new BlobRange(1024, 2048);
+ BlobRange range = new BlobRange(1024, 2048L);
ReliableDownloadOptions options = new ReliableDownloadOptions().maxRetryRequests(5);
System.out.printf("Download completed with status %d%n",
@@ -134,7 +134,7 @@ public void downloadToFile() {
// END: com.azure.storage.blob.BlobClient.downloadToFile#String
// BEGIN: com.azure.storage.blob.BlobClient.downloadToFile#String-BlobRange-Integer-ReliableDownloadOptions-BlobAccessConditions-boolean-Duration
- BlobRange range = new BlobRange(1024, 2048);
+ BlobRange range = new BlobRange(1024, 2048L);
ReliableDownloadOptions options = new ReliableDownloadOptions().maxRetryRequests(5);
client.downloadToFile(file, range, null, options, null, false, timeout);
diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/APISpec.groovy b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/APISpec.groovy
index abac9b167e18f..1768e02fab744 100644
--- a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/APISpec.groovy
+++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/APISpec.groovy
@@ -17,6 +17,7 @@ import com.azure.core.util.configuration.ConfigurationManager
import com.azure.identity.credential.EnvironmentCredential
import com.azure.storage.blob.models.*
import com.azure.storage.common.credentials.SharedKeyCredential
+import io.netty.buffer.ByteBuf
import org.junit.Assume
import org.spockframework.lang.ISpecificationContext
import reactor.core.publisher.Flux
@@ -409,7 +410,7 @@ class APISpec extends Specification {
response.value().contentEncoding() == contentEncoding &&
response.value().contentLanguage() == contentLanguage &&
response.value().contentMD5() == contentMD5 &&
- response.headers().value("Content-Type") == (contentType == null ? "application/octet-stream" : contentType)
+ response.headers().value("Content-Type") == contentType
}
static Metadata getMetadataFromHeaders(HttpHeaders headers) {
@@ -502,43 +503,51 @@ class APISpec extends Specification {
to play too nicely with mocked objects and the complex reflection stuff on both ends made it more difficult to work
with than was worth it. Because this type is just for BlobDownload, we don't need to accept a header type.
*/
- def getStubResponseForBlobDownload(int code, Flux body, String etag) {
- return new HttpResponse() {
+ static class MockDownloadHttpResponse extends HttpResponse {
+ private final int statusCode
+ private final HttpHeaders headers
+ private final Flux body
+
+ MockDownloadHttpResponse(HttpResponse response, int statusCode, Flux body) {
+ this.request(response.request())
+ this.statusCode = statusCode
+ this.headers = response.headers()
+ this.body = body
+ }
- @Override
- int statusCode() {
- return code
- }
+ @Override
+ int statusCode() {
+ return statusCode
+ }
- @Override
- String headerValue(String s) {
- return null
- }
+ @Override
+ String headerValue(String s) {
+ return headers.value(s)
+ }
- @Override
- HttpHeaders headers() {
- return new HttpHeaders()
- }
+ @Override
+ HttpHeaders headers() {
+ return headers
+ }
- @Override
- Flux body() {
- return body
- }
+ @Override
+ Flux body() {
+ return body
+ }
- @Override
- Mono bodyAsByteArray() {
- return null
- }
+ @Override
+ Mono bodyAsByteArray() {
+ return Mono.error(new IOException())
+ }
- @Override
- Mono bodyAsString() {
- return null
- }
+ @Override
+ Mono bodyAsString() {
+ return Mono.error(new IOException())
+ }
- @Override
- Mono bodyAsString(Charset charset) {
- return null
- }
+ @Override
+ Mono bodyAsString(Charset charset) {
+ return Mono.error(new IOException())
}
}
@@ -559,6 +568,7 @@ class APISpec extends Specification {
return new BlobServiceClientBuilder()
.endpoint(String.format("https://%s.blob.core.windows.net/", primaryCreds.accountName()))
.credential(new EnvironmentCredential()) // AZURE_TENANT_ID, AZURE_CLIENT_ID, AZURE_CLIENT_SECRET
+ .httpLogDetailLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.buildClient()
}
diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/AadLoginTest.java b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/AadLoginTest.java
deleted file mode 100644
index b284e65beb8ce..0000000000000
--- a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/AadLoginTest.java
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package com.azure.storage.blob;
-
-import com.azure.identity.credential.EnvironmentCredential;
-import com.azure.storage.blob.models.ContainerItem;
-import org.junit.BeforeClass;
-
-import java.util.Random;
-
-public class AadLoginTest {
- private static final Random RANDOM = new Random();
- private static BlobServiceClient storageClient;
-
- @BeforeClass
- public static void setup() {
- storageClient = new BlobServiceClientBuilder()
- .endpoint("https://" + System.getenv("ACCOUNT_NAME") + ".blob.core.windows.net")
- .credential(new EnvironmentCredential())
-// .httpClient(HttpClient.createDefault().proxy(() -> new ProxyOptions(Type.HTTP, new InetSocketAddress("localhost", 8888))))
- .buildClient();
- }
-
- //@Test
- public void listContainers() {
- for (ContainerItem item : storageClient.listContainers()) {
- System.out.println(item.name());
- }
- }
-}
diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/AppendBlobAPITest.groovy b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/AppendBlobAPITest.groovy
index b295c16c976d9..5d6f364f7e5e7 100644
--- a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/AppendBlobAPITest.groovy
+++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/AppendBlobAPITest.groovy
@@ -57,6 +57,9 @@ class AppendBlobAPITest extends APISpec {
bu.create(headers, null, null, null)
Response response = bu.getProperties()
+ // If the value isn't set the service will automatically set it
+ contentType = (contentType == null) ? "application/octet-stream" : contentType
+
then:
validateBlobProperties(response, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentMD5, contentType)
diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlobAPITest.groovy b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlobAPITest.groovy
index 5c1bd11787368..ec294bee15419 100644
--- a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlobAPITest.groovy
+++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlobAPITest.groovy
@@ -4,10 +4,15 @@
package com.azure.storage.blob
import com.azure.core.http.HttpHeaders
+import com.azure.core.http.HttpPipelineCallContext
+import com.azure.core.http.HttpPipelineNextPolicy
+import com.azure.core.http.policy.HttpPipelinePolicy
import com.azure.core.http.rest.Response
import com.azure.core.http.rest.VoidResponse
import com.azure.core.implementation.util.ImplUtils
import com.azure.storage.blob.models.*
+import reactor.core.publisher.Flux
+import reactor.core.publisher.Mono
import spock.lang.Unroll
import java.nio.ByteBuffer
@@ -76,50 +81,50 @@ class BlobAPITest extends APISpec {
This is to test the appropriate integration of DownloadResponse, including setting the correct range values on
HTTPGetterInfo.
*/
-// def "Download with retry range"() {
-// /*
-// We are going to make a request for some range on a blob. The Flux returned will throw an exception, forcing
-// a retry per the ReliableDownloadOptions. The next request should have the same range header, which was generated
-// from the count and offset values in HTTPGetterInfo that was constructed on the initial call to download. We
-// don't need to check the data here, but we want to ensure that the correct range is set each time. This will
-// test the correction of a bug that was found which caused HTTPGetterInfo to have an incorrect offset when it was
-// constructed in BlobClient.download().
-// */
-// setup:
-// HttpPipelinePolicy mockPolicy = Mock(HttpPipelinePolicy) {
-// process(_ as HttpPipelineCallContext, _ as HttpPipelineNextPolicy) >> {
-// HttpPipelineCallContext context, HttpPipelineNextPolicy next ->
-// HttpRequest request = context.httpRequest()
-// if (request.headers().value("x-ms-range") != "bytes=2-6") {
-// return Mono.error(new IllegalArgumentException("The range header was not set correctly on retry."))
-// }
-// else {
-// // ETag can be a dummy value. It's not validated, but DownloadResponse requires one
-// // TODO stub responses failing azure.core.implementation checks; too many nulls
-// return Mono.just(getStubResponseForBlobDownload(206, Flux.error(new IOException()), "etag"))
-// }
-// }
-// }
-//
-// BlobClient bu2 = new BlobClientBuilder()
-// .endpoint(bu.getBlobUrl().toString())
-// .credential(primaryCreds)
-// .addPolicy(mockPolicy)
-// .buildClient()
-//
-// when:
-// def range = new BlobRange(2, 5L)
-// def options = new ReliableDownloadOptions().maxRetryRequests(3)
-// bu2.download(new ByteArrayOutputStream(), options, range, null, false, null)
-//
-// then:
-// /*
-// Because the dummy Flux always throws an error. This will also validate that an IllegalArgumentException is
-// NOT thrown because the types would not match.
-// */
-// def e = thrown(RuntimeException)
-// e.getCause() instanceof IOException
-// }
+ def "Download with retry range"() {
+ /*
+ We are going to make a request for some range on a blob. The Flux returned will throw an exception, forcing
+ a retry per the ReliableDownloadOptions. The next request should have the same range header, which was generated
+ from the count and offset values in HTTPGetterInfo that was constructed on the initial call to download. We
+ don't need to check the data here, but we want to ensure that the correct range is set each time. This will
+ test the correction of a bug that was found which caused HTTPGetterInfo to have an incorrect offset when it was
+ constructed in BlobClient.download().
+ */
+ setup:
+ HttpPipelinePolicy mockPolicy = Mock(HttpPipelinePolicy) {
+ process(_ as HttpPipelineCallContext, _ as HttpPipelineNextPolicy) >> {
+ HttpPipelineCallContext context, HttpPipelineNextPolicy next ->
+ return next.process()
+ .flatMap {
+ if (it.request().headers().value("x-ms-range") != "bytes=2-6") {
+ return Mono.error(new IllegalArgumentException("The range header was not set correctly on retry."))
+ } else {
+ // ETag can be a dummy value. It's not validated, but DownloadResponse requires one
+ return Mono.just(new MockDownloadHttpResponse(it, 206, Flux.error(new IOException())))
+ }
+ }
+ }
+ }
+
+ BlobClient bu2 = new BlobClientBuilder()
+ .endpoint(bu.getBlobUrl().toString())
+ .credential(primaryCreds)
+ .addPolicy(mockPolicy)
+ .buildBlobClient()
+
+ when:
+ BlobRange range = new BlobRange(2, 5L)
+ ReliableDownloadOptions options = new ReliableDownloadOptions().maxRetryRequests(3)
+ bu2.download(new ByteArrayOutputStream(), range, options, null, false, null)
+
+ then:
+ /*
+ Because the dummy Flux always throws an error. This will also validate that an IllegalArgumentException is
+ NOT thrown because the types would not match.
+ */
+ def e = thrown(RuntimeException)
+ e.getCause() instanceof IOException
+ }
def "Download min"() {
when:
@@ -358,8 +363,7 @@ class BlobAPITest extends APISpec {
validateBasicHeaders(response.headers())
}
- // TODO (alzimmer): Figure out why getProperties returns null after setHTTPHeaders
- /*def "Set HTTP headers min"() {
+ def "Set HTTP headers min"() {
when:
BlobProperties properties = bu.getProperties().value()
BlobHTTPHeaders headers = new BlobHTTPHeaders()
@@ -368,15 +372,15 @@ class BlobAPITest extends APISpec {
.blobContentType("type")
.blobCacheControl(properties.cacheControl())
.blobContentLanguage(properties.contentLanguage())
- .blobContentMD5(Base64.getDecoder().decode(properties.contentMD5()))
+ .blobContentMD5(Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(defaultData.array())))
bu.setHTTPHeaders(headers)
then:
- bu.getProperties().headers().value("x-ms-blob-content-type") == "type"
- }*/
+ bu.getProperties().headers().value("Content-Type") == "type"
+ }
- /*@Unroll
+ @Unroll
def "Set HTTP headers headers"() {
setup:
BlobHTTPHeaders putHeaders = new BlobHTTPHeaders().blobCacheControl(cacheControl)
@@ -397,7 +401,7 @@ class BlobAPITest extends APISpec {
cacheControl | contentDisposition | contentEncoding | contentLanguage | contentMD5 | contentType
null | null | null | null | null | null
"control" | "disposition" | "encoding" | "language" | Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(defaultData.array())) | "type"
- }*/
+ }
@Unroll
diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlockBlobAPITest.groovy b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlockBlobAPITest.groovy
index 63ec8837697b1..d5aaa8a65dcd5 100644
--- a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlockBlobAPITest.groovy
+++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlockBlobAPITest.groovy
@@ -349,13 +349,16 @@ class BlockBlobAPITest extends APISpec {
bu.commitBlockList(ids, headers, null, null, null)
Response response = bu.getProperties()
+ // If the value isn't set the service will automatically set it
+ contentType = (contentType == null) ? "application/octet-stream" : contentType
+
then:
validateBlobProperties(response, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentMD5, contentType)
where:
cacheControl | contentDisposition | contentEncoding | contentLanguage | contentMD5 | contentType
null | null | null | null | null | null
- // "control" | "disposition" | "encoding" | "language" | MessageDigest.getInstance("MD5").digest(defaultData.array()) | "type" TODO (alzimmer): Figure out why getProperties returns null for this one
+ "control" | "disposition" | "encoding" | "language" | MessageDigest.getInstance("MD5").digest(defaultData.array()) | "type"
}
@Unroll
@@ -614,8 +617,7 @@ class BlockBlobAPITest extends APISpec {
bu.upload(null, 0).statusCode() == 201
}
- // TODO (alzimmer): Determine why getProperties returns null
- /*@Unroll
+ @Unroll
def "Upload headers"() {
setup:
BlobHTTPHeaders headers = new BlobHTTPHeaders().blobCacheControl(cacheControl)
@@ -629,14 +631,18 @@ class BlockBlobAPITest extends APISpec {
bu.upload(defaultInputStream.get(), defaultDataSize, headers, null, null, null)
Response response = bu.getProperties()
+ // If the value isn't set the service will automatically set it
+ contentMD5 = (contentMD5 == null) ? MessageDigest.getInstance("MD5").digest(defaultData.array()) : contentMD5
+ contentType = (contentType == null) ? "application/octet-stream" : contentType
+
then:
validateBlobProperties(response, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentMD5, contentType)
where:
- cacheControl | contentDisposition | contentEncoding | contentLanguage | contentMD5 | contentType
- null | null | null | null | null | null
- "control" | "disposition" | "encoding" | "language" | MessageDigest.getInstance("MD5").digest(defaultData.array()) | "type"
- }*/
+ cacheControl | contentDisposition | contentEncoding | contentLanguage | contentMD5 | contentType
+ null | null | null | null | null | null
+ "control" | "disposition" | "encoding" | "language" | MessageDigest.getInstance("MD5").digest(defaultData.array()) | "type"
+ }
@Unroll
def "Upload metadata"() {
diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/DownloadResponseMockFlux.java b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/DownloadResponseMockFlux.java
new file mode 100644
index 0000000000000..864e23a7a0825
--- /dev/null
+++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/DownloadResponseMockFlux.java
@@ -0,0 +1,214 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package com.azure.storage.blob;
+
+import com.azure.core.http.HttpHeaders;
+import com.azure.core.http.HttpResponse;
+import com.azure.storage.blob.models.BlobDownloadHeaders;
+import com.azure.storage.blob.models.BlobsDownloadResponse;
+import com.azure.storage.blob.models.StorageErrorException;
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import reactor.core.CoreSubscriber;
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+import reactor.core.publisher.Operators;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
+
+class DownloadResponseMockFlux extends Flux {
+ static final int DR_TEST_SCENARIO_SUCCESSFUL_ONE_CHUNK = 0;
+ static final int DR_TEST_SCENARIO_SUCCESSFUL_MULTI_CHUNK = 1;
+ static final int DR_TEST_SCENARIO_SUCCESSFUL_STREAM_FAILURES = 2;
+ static final int DR_TEST_SCENARIO_MAX_RETRIES_EXCEEDED = 3;
+ static final int DR_TEST_SCENARIO_NON_RETRYABLE_ERROR = 4;
+ static final int DR_TEST_SCENARIO_ERROR_GETTER_MIDDLE = 6;
+ static final int DR_TEST_SCENARIO_INFO_TEST = 8;
+
+ private int scenario;
+ private int tryNumber;
+ private HTTPGetterInfo info;
+ private ByteBuffer scenarioData;
+
+ DownloadResponseMockFlux(int scenario) {
+ this.scenario = scenario;
+ switch (this.scenario) {
+ case DR_TEST_SCENARIO_SUCCESSFUL_ONE_CHUNK:
+ this.scenarioData = APISpec.getRandomData(512 * 1024);
+ break;
+ case DR_TEST_SCENARIO_SUCCESSFUL_MULTI_CHUNK:
+ case DR_TEST_SCENARIO_SUCCESSFUL_STREAM_FAILURES:
+ this.scenarioData = APISpec.getRandomData(1024);
+ break;
+ case DR_TEST_SCENARIO_MAX_RETRIES_EXCEEDED:
+ case DR_TEST_SCENARIO_NON_RETRYABLE_ERROR:
+ case DR_TEST_SCENARIO_ERROR_GETTER_MIDDLE:
+ case DR_TEST_SCENARIO_INFO_TEST:
+ break;
+ default:
+ throw new IllegalArgumentException("Invalid download resource test scenario.");
+ }
+ }
+
+ ByteBuffer getScenarioData() {
+ return this.scenarioData;
+ }
+
+ int getTryNumber() {
+ return this.tryNumber;
+ }
+
+ @Override
+ public void subscribe(CoreSubscriber super ByteBuf> subscriber) {
+ switch (this.scenario) {
+ case DR_TEST_SCENARIO_SUCCESSFUL_ONE_CHUNK:
+ subscriber.onNext(Unpooled.wrappedBuffer(this.scenarioData.duplicate()));
+ Operators.complete(subscriber);
+ break;
+
+ case DR_TEST_SCENARIO_SUCCESSFUL_MULTI_CHUNK:
+ for (int i = 0; i < 4; i++) {
+ ByteBuffer toSend = this.scenarioData.duplicate();
+ toSend.position(i * 256);
+ toSend.limit((i + 1) * 256);
+ subscriber.onNext(Unpooled.wrappedBuffer(toSend));
+ }
+ Operators.complete(subscriber);
+ break;
+
+ case DR_TEST_SCENARIO_SUCCESSFUL_STREAM_FAILURES:
+ if (this.tryNumber <= 3) {
+ // tryNumber is 1 indexed, so we have to sub 1.
+ if (this.info.offset() != (this.tryNumber - 1) * 256
+ || this.info.count() != this.scenarioData.remaining() - (this.tryNumber - 1) * 256) {
+ Operators.error(subscriber, new IllegalArgumentException("Info values are incorrect."));
+ return;
+ }
+ ByteBuffer toSend = this.scenarioData.duplicate();
+ toSend.position((this.tryNumber - 1) * 256);
+ toSend.limit(this.tryNumber * 256);
+ subscriber.onNext(Unpooled.wrappedBuffer(toSend));
+ Operators.error(subscriber, new IOException());
+ break;
+ }
+ if (this.info.offset() != (this.tryNumber - 1) * 256
+ || this.info.count() != this.scenarioData.remaining() - (this.tryNumber - 1) * 256) {
+ Operators.error(subscriber, new IllegalArgumentException("Info values are incorrect."));
+ return;
+ }
+ ByteBuffer toSend = this.scenarioData.duplicate();
+ toSend.position((this.tryNumber - 1) * 256);
+ toSend.limit(this.tryNumber * 256);
+ subscriber.onNext(Unpooled.wrappedBuffer(toSend));
+ Operators.complete(subscriber);
+ break;
+
+ case DR_TEST_SCENARIO_MAX_RETRIES_EXCEEDED:
+ Operators.error(subscriber, new IOException());
+ break;
+
+ case DR_TEST_SCENARIO_NON_RETRYABLE_ERROR:
+ Operators.error(subscriber, new Exception());
+ break;
+
+ case DR_TEST_SCENARIO_ERROR_GETTER_MIDDLE:
+ if (this.tryNumber == 1) {
+ /*
+ We return a retryable error here so we have to invoke the getter, which will throw an error in
+ this case.
+ */
+ Operators.error(subscriber, new IOException());
+ } else {
+ Operators.error(subscriber, new IllegalArgumentException("Retried after getter error."));
+ }
+ break;
+
+ case DR_TEST_SCENARIO_INFO_TEST:
+ switch (this.tryNumber) {
+ case 1: // Test the value of info when getting the initial response.
+ case 2: // Test the value of info when getting an intermediate response.
+ Operators.error(subscriber, new IOException());
+ break;
+ case 3:
+ // All calls to getter checked. Exit. This test does not check for data.
+ Operators.complete(subscriber);
+ break;
+ default:
+ throw new IllegalArgumentException("Invalid try number.");
+ }
+ break;
+
+ default:
+ Operators.error(subscriber, new IllegalArgumentException("Invalid test case"));
+ }
+ }
+
+ Mono getter(HTTPGetterInfo info) {
+ this.tryNumber++;
+ this.info = info;
+ BlobsDownloadResponse rawResponse = new BlobsDownloadResponse(null, 200, new HttpHeaders(), this, new BlobDownloadHeaders());
+ DownloadAsyncResponse response = new DownloadAsyncResponse(rawResponse, info, this::getter);
+
+ switch (this.scenario) {
+ case DR_TEST_SCENARIO_ERROR_GETTER_MIDDLE:
+ switch (this.tryNumber) {
+ case 1:
+ return Mono.just(response);
+ case 2:
+ /*
+ This validates that we don't retry in the getter even if it's a retryable error from the
+ service.
+ */
+ throw new StorageErrorException("Message", new HttpResponse() {
+ @Override
+ public int statusCode() {
+ return 500;
+ }
+
+ @Override
+ public String headerValue(String s) {
+ return null;
+ }
+
+ @Override
+ public HttpHeaders headers() {
+ return null;
+ }
+
+ @Override
+ public Flux body() {
+ return null;
+ }
+
+ @Override
+ public Mono bodyAsByteArray() {
+ return null;
+ }
+
+ @Override
+ public Mono bodyAsString() {
+ return null;
+ }
+
+ @Override
+ public Mono bodyAsString(Charset charset) {
+ return null;
+ }
+ });
+ default:
+ throw new IllegalArgumentException("Retried after error in getter");
+ }
+ case DR_TEST_SCENARIO_INFO_TEST:
+ // We also test that the info is updated in DR_TEST_SCENARIO_SUCCESSFUL_STREAM_FAILURES.
+ if (info.count() != 10 || info.offset() != 20 || !info.eTag().equals("etag")) {
+ throw new IllegalArgumentException("Info values incorrect");
+ }
+ return Mono.just(response);
+ default:
+ return Mono.just(response);
+ }
+ }
+}
diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/DownloadResponseTest.groovy b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/DownloadResponseTest.groovy
new file mode 100644
index 0000000000000..f784283d4de6c
--- /dev/null
+++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/DownloadResponseTest.groovy
@@ -0,0 +1,152 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package com.azure.storage.blob
+
+
+import com.azure.core.implementation.util.FluxUtil
+import com.azure.storage.blob.models.ReliableDownloadOptions
+import com.azure.storage.blob.models.StorageErrorException
+import spock.lang.Unroll
+
+class DownloadResponseTest extends APISpec {
+ BlockBlobClient bu
+
+ def setup() {
+ bu = cu.getBlockBlobClient(generateBlobName())
+ bu.upload(defaultInputStream.get(), defaultText.length())
+ }
+
+ /*
+ This shouldn't really be different from anything else we're doing in the other tests. Just a sanity check against
+ a real use case.
+ */
+
+ def "Network call"() {
+ expect:
+ OutputStream outputStream = new ByteArrayOutputStream()
+ bu.download(outputStream)
+ outputStream.toByteArray() == defaultData.array()
+ }
+
+ @Unroll
+ def "Successful"() {
+ setup:
+ DownloadResponseMockFlux flux = new DownloadResponseMockFlux(scenario)
+
+ HTTPGetterInfo info = new HTTPGetterInfo()
+ .offset(0)
+ .count(flux.getScenarioData().remaining())
+ .eTag("etag")
+
+ ReliableDownloadOptions options = new ReliableDownloadOptions().maxRetryRequests(5)
+
+ when:
+ DownloadAsyncResponse response = flux.getter(info).block()
+
+ then:
+ FluxUtil.collectByteBufStream(response.body(options), false).block().nioBuffer() == flux.getScenarioData()
+ flux.getTryNumber() == tryNumber
+
+
+ where:
+ scenario | tryNumber
+ DownloadResponseMockFlux.DR_TEST_SCENARIO_SUCCESSFUL_ONE_CHUNK | 1
+ DownloadResponseMockFlux.DR_TEST_SCENARIO_SUCCESSFUL_MULTI_CHUNK | 1
+ DownloadResponseMockFlux.DR_TEST_SCENARIO_SUCCESSFUL_STREAM_FAILURES | 4
+ }
+
+ @Unroll
+ def "Failure"() {
+ setup:
+ DownloadResponseMockFlux flux = new DownloadResponseMockFlux(scenario)
+ ReliableDownloadOptions options = new ReliableDownloadOptions().maxRetryRequests(5)
+ HTTPGetterInfo info = new HTTPGetterInfo().eTag("etag")
+
+ when:
+ DownloadAsyncResponse response = flux.getter(info).block()
+ response.body(options).blockFirst()
+
+ then:
+ def e = thrown(Throwable) // Blocking subscribe will sometimes wrap the IOException in a RuntimeException.
+ if (e.getCause() != null) {
+ e = e.getCause()
+ }
+ exceptionType.isInstance(e)
+ flux.getTryNumber() == tryNumber
+
+ /*
+ tryNumber is 7 because the initial request is the first try, then it will fail when retryCount>maxRetryCount,
+ which is when retryCount=6 and therefore tryNumber=7
+ */
+ where:
+ scenario | exceptionType | tryNumber
+ DownloadResponseMockFlux.DR_TEST_SCENARIO_MAX_RETRIES_EXCEEDED | IOException | 7
+ DownloadResponseMockFlux.DR_TEST_SCENARIO_NON_RETRYABLE_ERROR | Exception | 1
+ DownloadResponseMockFlux.DR_TEST_SCENARIO_ERROR_GETTER_MIDDLE | StorageErrorException | 2
+ }
+
+ @Unroll
+ def "Info null IA"() {
+ setup:
+ DownloadResponseMockFlux flux = new DownloadResponseMockFlux(DownloadResponseMockFlux.DR_TEST_SCENARIO_SUCCESSFUL_ONE_CHUNK)
+
+ when:
+ new DownloadAsyncResponse(flux.getter(info).block().rawResponse(), info, { HTTPGetterInfo newInfo -> flux.getter(newInfo) })
+
+ then:
+ thrown(IllegalArgumentException)
+
+ where:
+ info | _
+ null | _
+ new HTTPGetterInfo().eTag(null) | _
+ }
+
+ def "Options IA"() {
+ when:
+ new ReliableDownloadOptions().maxRetryRequests(-1)
+
+ then:
+ thrown(IllegalArgumentException)
+ }
+
+ def "Getter IA"() {
+ setup:
+ DownloadResponseMockFlux flux = new DownloadResponseMockFlux(DownloadResponseMockFlux.DR_TEST_SCENARIO_SUCCESSFUL_ONE_CHUNK)
+
+ when:
+ DownloadAsyncResponse response = new DownloadAsyncResponse(flux.getter(new HTTPGetterInfo()).block()
+ .rawResponse(), new HTTPGetterInfo().eTag("etag"), null)
+ response.body(null).blockFirst()
+
+ then:
+ thrown(IllegalArgumentException)
+ }
+
+ def "Info"() {
+ setup:
+ DownloadResponseMockFlux flux = new DownloadResponseMockFlux(DownloadResponseMockFlux.DR_TEST_SCENARIO_INFO_TEST)
+ HTTPGetterInfo info = new HTTPGetterInfo()
+ .offset(20)
+ .count(10)
+ .eTag("etag")
+
+ ReliableDownloadOptions options = new ReliableDownloadOptions().maxRetryRequests(5)
+
+ when:
+ DownloadAsyncResponse response = flux.getter(info).block()
+ response.body(options).blockFirst()
+
+ then:
+ flux.getTryNumber() == 3
+ }
+
+ def "Info count IA"() {
+ when:
+ new HTTPGetterInfo().count(-1)
+
+ then:
+ thrown(IllegalArgumentException)
+ }
+}
diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/HelperTest.groovy b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/HelperTest.groovy
new file mode 100644
index 0000000000000..0142f21c9bd41
--- /dev/null
+++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/HelperTest.groovy
@@ -0,0 +1,730 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package com.azure.storage.blob
+
+import com.azure.core.http.rest.Response
+import com.azure.core.http.rest.VoidResponse
+import com.azure.storage.blob.models.BlobRange
+import com.azure.storage.blob.models.UserDelegationKey
+import com.azure.storage.common.credentials.SASTokenCredential
+import com.azure.storage.common.credentials.SharedKeyCredential
+import spock.lang.Unroll
+
+import java.time.LocalDateTime
+import java.time.OffsetDateTime
+import java.time.ZoneOffset
+
+class HelperTest extends APISpec {
+
+ // TODO (alzimmer): Turn this on when nextPageLink can be passed into listing
+ /*def "responseError"() {
+ when:
+ cu.listBlobsFlat().iterator().hasNext()
+
+ then:
+ def e = thrown(StorageException)
+ e.errorCode() == StorageErrorCode.INVALID_QUERY_PARAMETER_VALUE
+ e.statusCode() == 400
+ e.message().contains("Value for one of the query parameters specified in the request URI is invalid.")
+ e.getMessage().contains(" properties = bsu.getProperties()
+
+ then:
+ properties.value().cacheControl() == "cache"
+ properties.value().contentDisposition() == "disposition"
+ properties.value().contentEncoding() == "encoding"
+ properties.value().contentLanguage() == "language"
+ properties.headers().value("Content-Type") == "type"
+ }
+
+ /*
+ This test will ensure that each field gets placed into the proper location within the string to sign and that null
+ values are handled correctly. We will validate the whole SAS with service calls as well as correct serialization of
+ individual parts later.
+ */
+
+ @Unroll
+ def "serviceSasSignatures string to sign"() {
+ when:
+ ServiceSASSignatureValues v = new ServiceSASSignatureValues()
+ if (permissions != null) {
+ v.permissions(new BlobSASPermission().read(true).toString())
+ } else {
+ v.permissions("")
+ }
+
+ if (snapId != null) {
+ v.resource(Constants.UrlConstants.SAS_BLOB_SNAPSHOT_CONSTANT)
+ } else {
+ v.resource(Constants.UrlConstants.SAS_BLOB_CONSTANT)
+ }
+
+ v.startTime(startTime)
+ .expiryTime(expiryTime)
+ .canonicalName(String.format("/blob/%s/containerName/blobName", primaryCreds.accountName()))
+ .snapshotId(snapId)
+
+ if (ipRange != null) {
+ v.ipRange(new IPRange().ipMin("ip"))
+ }
+
+ v.identifier(identifier)
+ .protocol(protocol)
+ .cacheControl(cacheControl)
+ .contentDisposition(disposition)
+ .contentEncoding(encoding)
+ .contentLanguage(language)
+ .contentType(type)
+
+ SASQueryParameters token = v.generateSASQueryParameters(primaryCreds)
+
+ if (startTime != null) {
+ expectedStringToSign = String.format(expectedStringToSign,
+ Utility.ISO_8601_UTC_DATE_FORMATTER.format(startTime),
+ Utility.ISO_8601_UTC_DATE_FORMATTER.format(expiryTime),
+ primaryCreds.accountName())
+ } else {
+ expectedStringToSign = String.format(expectedStringToSign,
+ Utility.ISO_8601_UTC_DATE_FORMATTER.format(expiryTime),
+ primaryCreds.accountName())
+ }
+
+ then:
+ token.signature() == primaryCreds.computeHmac256(expectedStringToSign)
+
+ /*
+ We don't test the blob or containerName properties because canonicalized resource is always added as at least
+ /blob/accountName. We test canonicalization of resources later. Again, this is not to test a fully functional
+ sas but the construction of the string to sign.
+ Signed resource is tested elsewhere, as we work some minor magic in choosing which value to use.
+ */
+ where:
+ permissions | startTime | expiryTime | identifier | ipRange | protocol | snapId | cacheControl | disposition | encoding | language | type || expectedStringToSign
+ new BlobSASPermission() | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | null | null | null | null | null | null | null | null || "r\n\n%s\n" + "/blob/%s/containerName/blobName\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n"
+ null | OffsetDateTime.now(ZoneOffset.UTC).minusDays(1) | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | null | null | null | null | null | null | null | null || "\n%s\n%s\n/blob/%s/containerName/blobName\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n"
+ null | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | null | null | null | null | null | null | null | null || "\n\n%s\n/blob/%s/containerName/blobName\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n"
+ null | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | "id" | null | null | null | null | null | null | null | null || "\n\n%s\n/blob/%s/containerName/blobName\nid\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n"
+ null | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | new IPRange() | null | null | null | null | null | null | null || "\n\n%s\n/blob/%s/containerName/blobName\n\nip\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n"
+ null | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | null | SASProtocol.HTTPS_ONLY | null | null | null | null | null | null || "\n\n%s\n/blob/%s/containerName/blobName\n\n\n" + SASProtocol.HTTPS_ONLY + "\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n"
+ null | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | null | null | "snapId" | null | null | null | null | null || "\n\n%s\n/blob/%s/containerName/blobName\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nbs\nsnapId\n\n\n\n\n"
+ null | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | null | null | null | "control" | null | null | null | null || "\n\n%s\n/blob/%s/containerName/blobName\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\ncontrol\n\n\n\n"
+ null | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | null | null | null | null | "disposition" | null | null | null || "\n\n%s\n/blob/%s/containerName/blobName\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\ndisposition\n\n\n"
+ null | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | null | null | null | null | null | "encoding" | null | null || "\n\n%s\n/blob/%s/containerName/blobName\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\nencoding\n\n"
+ null | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | null | null | null | null | null | null | "language" | null || "\n\n%s\n/blob/%s/containerName/blobName\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\nlanguage\n"
+ null | null | OffsetDateTime.now(ZoneOffset.UTC).plusDays(1) | null | null | null | null | null | null | null | null | "type" || "\n\n%s\n/blob/%s/containerName/blobName\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\ntype"
+ }
+
+ @Unroll
+ def "serviceSasSignatures string to sign user delegation key"() {
+ when:
+ ServiceSASSignatureValues v = new ServiceSASSignatureValues()
+ if (permissions != null) {
+ v.permissions(new BlobSASPermission().read(true).toString())
+ }
+
+ v.startTime(startTime)
+ .canonicalName(String.format("/blob/%s/containerName/blobName", primaryCreds.accountName()))
+ .snapshotId(snapId)
+
+ if (expiryTime == null) {
+ v.expiryTime(OffsetDateTime.now())
+ } else {
+ v.expiryTime(expiryTime)
+ }
+
+ if (snapId != null) {
+ v.resource(Constants.UrlConstants.SAS_BLOB_SNAPSHOT_CONSTANT)
+ } else {
+ v.resource(Constants.UrlConstants.SAS_BLOB_CONSTANT)
+ }
+
+ if (ipRange != null) {
+ v.ipRange(new IPRange().ipMin("ip"))
+ }
+
+ v.protocol(protocol)
+ .cacheControl(cacheControl)
+ .contentDisposition(disposition)
+ .contentEncoding(encoding)
+ .contentLanguage(language)
+ .contentType(type)
+
+ UserDelegationKey key = new UserDelegationKey()
+ .signedOid(keyOid)
+ .signedTid(keyTid)
+ .signedStart(keyStart)
+ .signedExpiry(keyExpiry)
+ .signedService(keyService)
+ .signedVersion(keyVersion)
+ .value(keyValue)
+
+ SASQueryParameters token = v.generateSASQueryParameters(key)
+
+ expectedStringToSign = String.format(expectedStringToSign, primaryCreds.accountName())
+
+ then:
+ token.signature() == Utility.delegateComputeHmac256(key, expectedStringToSign)
+
+ /*
+ We test string to sign functionality directly related to user delegation sas specific parameters
+ */
+ where:
+ permissions | startTime | expiryTime | keyOid | keyTid | keyStart | keyExpiry | keyService | keyVersion | keyValue | ipRange | protocol | snapId | cacheControl | disposition | encoding | language | type || expectedStringToSign
+ new BlobSASPermission() | null | null | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | null | null || "r\n\n\n" + "/blob/%s/containerName/blobName\n\n\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n"
+ null | OffsetDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) | null | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | null | null || "\n" + Utility.ISO_8601_UTC_DATE_FORMATTER.format(OffsetDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC)) + "\n\n/blob/%s/containerName/blobName\n\n\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n"
+ null | null | OffsetDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | null | null || "\n\n" + Utility.ISO_8601_UTC_DATE_FORMATTER.format(OffsetDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC)) + "\n/blob/%s/containerName/blobName\n\n\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n"
+ null | null | null | "11111111-1111-1111-1111-111111111111" | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n11111111-1111-1111-1111-111111111111\n\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n"
+ null | null | null | null | "22222222-2222-2222-2222-222222222222" | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n22222222-2222-2222-2222-222222222222\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n"
+ null | null | null | null | null | OffsetDateTime.of(LocalDateTime.of(2018, 1, 1, 0, 0), ZoneOffset.UTC) | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n2018-01-01T00:00:00Z\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n"
+ null | null | null | null | null | null | OffsetDateTime.of(LocalDateTime.of(2018, 1, 1, 0, 0), ZoneOffset.UTC) | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n2018-01-01T00:00:00Z\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n"
+ null | null | null | null | null | null | null | "b" | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n\nb\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n"
+ null | null | null | null | null | null | null | null | "2018-06-17" | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n\n\n2018-06-17\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n"
+ null | null | null | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | new IPRange() | null | null | null | null | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n\n\n\nip\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n"
+ null | null | null | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | SASProtocol.HTTPS_ONLY | null | null | null | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n\n\n\n\n" + SASProtocol.HTTPS_ONLY + "\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n"
+ null | null | null | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | "snapId" | null | null | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nbs\nsnapId\n\n\n\n\n"
+ null | null | null | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | "control" | null | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\ncontrol\n\n\n\n"
+ null | null | null | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | "disposition" | null | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\ndisposition\n\n\n"
+ null | null | null | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | "encoding" | null | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\nencoding\n\n"
+ null | null | null | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | "language" | null || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\nlanguage\n"
+ null | null | null | null | null | null | null | null | null | "3hd4LRwrARVGbeMRQRfTLIsGMkCPuZJnvxZDU7Gak8c=" | null | null | null | null | null | null | null | "type" || "\n\n\n/blob/%s/containerName/blobName\n\n\n\n\n\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\ntype"
+ }
+
+ @Unroll
+ def "serviceSASSignatureValues canonicalizedResource"() {
+ setup:
+ ServiceSASSignatureValues v = new ServiceSASSignatureValues()
+ .expiryTime(expiryTime)
+ .permissions(new BlobSASPermission().toString())
+ .resource(expectedResource)
+ .canonicalName(String.format("/blob/%s/%s", primaryCreds.accountName(), containerName))
+ .snapshotId(snapId)
+
+ if (blobName != null) {
+ v.canonicalName(v.canonicalName() + "/" + blobName)
+ }
+
+ expectedStringToSign = String.format(expectedStringToSign,
+ Utility.ISO_8601_UTC_DATE_FORMATTER.format(expiryTime),
+ primaryCreds.accountName())
+
+ when:
+ SASQueryParameters token = v.generateSASQueryParameters(primaryCreds)
+
+ then:
+ token.signature() == primaryCreds.computeHmac256(expectedStringToSign)
+ token.resource() == expectedResource
+
+ where:
+ containerName | blobName | snapId | expiryTime || expectedResource | expectedStringToSign
+ "c" | "b" | "id" | OffsetDateTime.now() || "bs" | "\n\n%s\n" + "/blob/%s/c/b\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nbs\nid\n\n\n\n\n"
+ "c" | "b" | null | OffsetDateTime.now() || "b" | "\n\n%s\n" + "/blob/%s/c/b\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nb\n\n\n\n\n\n"
+ "c" | null | null | OffsetDateTime.now() || "c" | "\n\n%s\n" + "/blob/%s/c\n\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\nc\n\n\n\n\n\n"
+
+ }
+
+ @Unroll
+ def "serviceSasSignatureValues IA"() {
+ setup:
+ ServiceSASSignatureValues v = new ServiceSASSignatureValues()
+ .permissions(new AccountSASPermission().toString())
+ .expiryTime(OffsetDateTime.now())
+ .resource(containerName)
+ .canonicalName(blobName)
+ .snapshotId("2018-01-01T00:00:00.0000000Z")
+ .version(version)
+
+ when:
+ v.generateSASQueryParameters((SharedKeyCredential)creds)
+
+ then:
+ def e = thrown(IllegalArgumentException)
+ e.getMessage().contains(parameter)
+
+ where:
+ containerName | version | creds | blobName || parameter
+ "c" | null | primaryCreds | "b" | "version"
+ "c" | "v" | null | "b" | "sharedKeyCredentials"
+ "c" | "v" | primaryCreds | null | "canonicalName"
+ }
+
+ @Unroll
+ def "BlobSASPermissions toString"() {
+ setup:
+ BlobSASPermission perms = new BlobSASPermission()
+ .read(read)
+ .write(write)
+ .delete(delete)
+ .create(create)
+ .add(add)
+
+ expect:
+ perms.toString() == expectedString
+
+ where:
+ read | write | delete | create | add || expectedString
+ true | false | false | false | false || "r"
+ false | true | false | false | false || "w"
+ false | false | true | false | false || "d"
+ false | false | false | true | false || "c"
+ false | false | false | false | true || "a"
+ true | true | true | true | true || "racwd"
+ }
+
+ @Unroll
+ def "BlobSASPermissions parse"() {
+ when:
+ BlobSASPermission perms = BlobSASPermission.parse(permString)
+
+ then:
+ perms.read() == read
+ perms.write() == write
+ perms.delete() == delete
+ perms.create() == create
+ perms.add() == add
+
+ where:
+ permString || read | write | delete | create | add
+ "r" || true | false | false | false | false
+ "w" || false | true | false | false | false
+ "d" || false | false | true | false | false
+ "c" || false | false | false | true | false
+ "a" || false | false | false | false | true
+ "racwd" || true | true | true | true | true
+ "dcwra" || true | true | true | true | true
+ }
+
+ def "BlobSASPermissions parse IA"() {
+ when:
+ BlobSASPermission.parse("rwaq")
+
+ then:
+ thrown(IllegalArgumentException)
+ }
+
+ @Unroll
+ def "ContainerSASPermissions toString"() {
+ setup:
+ ContainerSASPermission perms = new ContainerSASPermission()
+ .read(read)
+ .write(write)
+ .delete(delete)
+ .create(create)
+ .add(add)
+ .list(list)
+
+ expect:
+ perms.toString() == expectedString
+
+ where:
+ read | write | delete | create | add | list || expectedString
+ true | false | false | false | false | false || "r"
+ false | true | false | false | false | false || "w"
+ false | false | true | false | false | false || "d"
+ false | false | false | true | false | false || "c"
+ false | false | false | false | true | false || "a"
+ false | false | false | false | false | true || "l"
+ true | true | true | true | true | true || "racwdl"
+ }
+
+ @Unroll
+ def "ContainerSASPermissions parse"() {
+ when:
+ ContainerSASPermission perms = ContainerSASPermission.parse(permString)
+
+ then:
+ perms.read() == read
+ perms.write() == write
+ perms.delete() == delete
+ perms.create() == create
+ perms.add() == add
+ perms.list() == list
+
+ where:
+ permString || read | write | delete | create | add | list
+ "r" || true | false | false | false | false | false
+ "w" || false | true | false | false | false | false
+ "d" || false | false | true | false | false | false
+ "c" || false | false | false | true | false | false
+ "a" || false | false | false | false | true | false
+ "l" || false | false | false | false | false | true
+ "racwdl" || true | true | true | true | true | true
+ "dcwrla" || true | true | true | true | true | true
+ }
+
+ def "ContainerSASPermissions parse IA"() {
+ when:
+ ContainerSASPermission.parse("rwaq")
+
+ then:
+ thrown(IllegalArgumentException)
+ }
+
+ @Unroll
+ def "IPRange toString"() {
+ setup:
+ def ip = new IPRange()
+ .ipMin(min)
+ .ipMax(max)
+
+ expect:
+ ip.toString() == expectedString
+
+ where:
+ min | max || expectedString
+ "a" | "b" || "a-b"
+ "a" | null || "a"
+ null | "b" || ""
+ }
+
+ @Unroll
+ def "IPRange parse"() {
+ when:
+ IPRange ip = IPRange.parse(rangeStr)
+
+ then:
+ ip.ipMin() == min
+ ip.ipMax() == max
+
+ where:
+ rangeStr || min | max
+ "a-b" || "a" | "b"
+ "a" || "a" | null
+ "" || "" | null
+ }
+
+ @Unroll
+ def "SASProtocol parse"() {
+ expect:
+ SASProtocol.parse(protocolStr) == protocol
+
+ where:
+ protocolStr || protocol
+ "https" || SASProtocol.HTTPS_ONLY
+ "https,http" || SASProtocol.HTTPS_HTTP
+ }
+
+ /*
+ This test will ensure that each field gets placed into the proper location within the string to sign and that null
+ values are handled correctly. We will validate the whole SAS with service calls as well as correct serialization of
+ individual parts later.
+ */
+
+ @Unroll
+ def "accountSasSignatures string to sign"() {
+ when:
+ AccountSASSignatureValues v = new AccountSASSignatureValues()
+ .permissions(new AccountSASPermission().read(true).toString())
+ .services("b")
+ .resourceTypes("o")
+ .startTime(startTime)
+ .expiryTime(OffsetDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))
+ .protocol(protocol)
+
+ if (ipRange != null) {
+ v.ipRange(new IPRange().ipMin("ip"))
+ }
+
+ def token = v.generateSASQueryParameters(primaryCreds)
+
+ expectedStringToSign = String.format(expectedStringToSign, primaryCreds.accountName())
+
+ then:
+ token.signature() == primaryCreds.computeHmac256(expectedStringToSign)
+
+ where:
+ startTime | ipRange | protocol || expectedStringToSign
+ OffsetDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) | null | null || "%s\nr\nb\no\n" + Utility.ISO_8601_UTC_DATE_FORMATTER.format(OffsetDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC)) + "\n" + Utility.ISO_8601_UTC_DATE_FORMATTER.format(OffsetDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC)) + "\n\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\n"
+ null | new IPRange() | null || "%s\nr\nb\no\n\n" + Utility.ISO_8601_UTC_DATE_FORMATTER.format(OffsetDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC)) + "\nip\n\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\n"
+ null | null | SASProtocol.HTTPS_ONLY || "%s\nr\nb\no\n\n" + Utility.ISO_8601_UTC_DATE_FORMATTER.format(OffsetDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC)) + "\n\n" + SASProtocol.HTTPS_ONLY + "\n" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "\n"
+ }
+
+ @Unroll
+ def "accountSasSignatureValues IA"() {
+ setup:
+ AccountSASSignatureValues v = new AccountSASSignatureValues()
+ .permissions(permissions)
+ .services(service)
+ .resourceTypes(resourceType)
+ .expiryTime(expiryTime)
+ .version(version)
+
+ when:
+ v.generateSASQueryParameters(creds)
+
+ then:
+ def e = thrown(IllegalArgumentException)
+ e.getMessage().contains(parameter)
+
+ where:
+ permissions | service | resourceType | expiryTime | version | creds || parameter
+ null | "b" | "c" | OffsetDateTime.now() | "v" | primaryCreds || "permissions"
+ "c" | null | "c" | OffsetDateTime.now() | "v" | primaryCreds || "services"
+ "c" | "b" | null | OffsetDateTime.now() | "v" | primaryCreds || "resourceTypes"
+ "c" | "b" | "c" | null | "v" | primaryCreds || "expiryTime"
+ "c" | "b" | "c" | OffsetDateTime.now() | null | primaryCreds || "version"
+ "c" | "b" | "c" | OffsetDateTime.now() | "v" | null || "SharedKeyCredential"
+ }
+
+ @Unroll
+ def "AccountSASPermissions toString"() {
+ setup:
+ AccountSASPermission perms = new AccountSASPermission()
+ .read(read)
+ .write(write)
+ .delete(delete)
+ .list(list)
+ .add(add)
+ .create(create)
+ .update(update)
+ .processMessages(process)
+
+ expect:
+ perms.toString() == expectedString
+
+ where:
+ read | write | delete | list | add | create | update | process || expectedString
+ true | false | false | false | false | false | false | false || "r"
+ false | true | false | false | false | false | false | false || "w"
+ false | false | true | false | false | false | false | false || "d"
+ false | false | false | true | false | false | false | false || "l"
+ false | false | false | false | true | false | false | false || "a"
+ false | false | false | false | false | true | false | false || "c"
+ false | false | false | false | false | false | true | false || "u"
+ false | false | false | false | false | false | false | true || "p"
+ true | true | true | true | true | true | true | true || "rwdlacup"
+ }
+
+ @Unroll
+ def "AccountSASPermissions parse"() {
+ when:
+ AccountSASPermission perms = AccountSASPermission.parse(permString)
+
+ then:
+ perms.read() == read
+ perms.write() == write
+ perms.delete() == delete
+ perms.list() == list
+ perms.add() == add
+ perms.create() == create
+ perms.update() == update
+ perms.processMessages() == process
+
+ where:
+ permString || read | write | delete | list | add | create | update | process
+ "r" || true | false | false | false | false | false | false | false
+ "w" || false | true | false | false | false | false | false | false
+ "d" || false | false | true | false | false | false | false | false
+ "l" || false | false | false | true | false | false | false | false
+ "a" || false | false | false | false | true | false | false | false
+ "c" || false | false | false | false | false | true | false | false
+ "u" || false | false | false | false | false | false | true | false
+ "p" || false | false | false | false | false | false | false | true
+ "rwdlacup" || true | true | true | true | true | true | true | true
+ "lwrupcad" || true | true | true | true | true | true | true | true
+ }
+
+ def "AccountSASPermissions parse IA"() {
+ when:
+ AccountSASPermission.parse("rwaq")
+
+ then:
+ thrown(IllegalArgumentException)
+ }
+
+ @Unroll
+ def "AccountSASResourceType toString"() {
+ setup:
+ AccountSASResourceType resourceTypes = new AccountSASResourceType()
+ .service(service)
+ .container(container)
+ .object(object)
+
+ expect:
+ resourceTypes.toString() == expectedString
+
+ where:
+ service | container | object || expectedString
+ true | false | false || "s"
+ false | true | false || "c"
+ false | false | true || "o"
+ true | true | true || "sco"
+ }
+
+ @Unroll
+ def "AccountSASResourceType parse"() {
+ when:
+ AccountSASResourceType resourceTypes = AccountSASResourceType.parse(resourceTypeString)
+
+ then:
+ resourceTypes.service() == service
+ resourceTypes.container() == container
+ resourceTypes.object() == object
+
+ where:
+ resourceTypeString || service | container | object
+ "s" || true | false | false
+ "c" || false | true | false
+ "o" || false | false | true
+ "sco" || true | true | true
+ }
+
+ @Unroll
+ def "AccountSASResourceType IA"() {
+ when:
+ AccountSASResourceType.parse("scq")
+
+ then:
+ thrown(IllegalArgumentException)
+ }
+
+ def "BlobURLParts"() {
+ setup:
+ BlobURLParts parts = new BlobURLParts()
+ .scheme("http")
+ .host("host")
+ .containerName("container")
+ .blobName("blob")
+ .snapshot("snapshot")
+
+ ServiceSASSignatureValues sasValues = new ServiceSASSignatureValues()
+ .expiryTime(OffsetDateTime.now(ZoneOffset.UTC).plusDays(1))
+ .permissions("r")
+ .canonicalName(String.format("/blob/%s/container/blob", primaryCreds.accountName()))
+ .resource(Constants.UrlConstants.SAS_BLOB_SNAPSHOT_CONSTANT)
+
+ parts.sasQueryParameters(sasValues.generateSASQueryParameters(primaryCreds))
+
+ when:
+ String[] splitParts = parts.toURL().toString().split("\\?")
+
+ then:
+ splitParts.size() == 2 // Ensure that there is only one question mark even when sas and snapshot are present
+ splitParts[0] == "http://host/container/blob"
+ splitParts[1].contains("snapshot=snapshot")
+ splitParts[1].contains("sp=r")
+ splitParts[1].contains("sig=")
+ splitParts[1].split("&").size() == 6 // snapshot & sv & sr & sp & sig & se
+ }
+
+ def "URLParser"() {
+ when:
+ BlobURLParts parts = URLParser.parse(new URL("http://host/container/blob?snapshot=snapshot&sv=" + Constants.HeaderConstants.TARGET_STORAGE_VERSION + "&sr=c&sp=r&sig=Ee%2BSodSXamKSzivSdRTqYGh7AeMVEk3wEoRZ1yzkpSc%3D"))
+
+ then:
+ parts.scheme() == "http"
+ parts.host() == "host"
+ parts.containerName() == "container"
+ parts.blobName() == "blob"
+ parts.snapshot() == "snapshot"
+ parts.sasQueryParameters().permissions() == "r"
+ parts.sasQueryParameters().version() == Constants.HeaderConstants.TARGET_STORAGE_VERSION
+ parts.sasQueryParameters().resource() == "c"
+ parts.sasQueryParameters().signature() == Utility.safeURLDecode("Ee%2BSodSXamKSzivSdRTqYGh7AeMVEk3wEoRZ1yzkpSc%3D")
+ }
+}
diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/LargeFileTest.java b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/LargeFileTest.java
deleted file mode 100644
index 8bff57d559513..0000000000000
--- a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/LargeFileTest.java
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package com.azure.storage.blob;
-
-import com.azure.storage.common.credentials.SharedKeyCredential;
-
-import java.io.File;
-import java.time.Duration;
-import java.time.OffsetDateTime;
-import java.util.Random;
-
-public class LargeFileTest {
- private static final Random RANDOM = new Random();
- private static final String FILE_PATH = "C:\\Users\\jianghlu\\10g.dat";
- private static BlobServiceClient storageClient;
- private static ContainerClient containerClient;
-
- //@BeforeClass
- public static void setup() {
- storageClient = new BlobServiceClientBuilder()
- .credential(new SharedKeyCredential(System.getenv("ACCOUNT_NAME"), System.getenv("ACCOUNT_KEY")))
- .endpoint("https://" + System.getenv("ACCOUNT_NAME") + ".blob.core.windows.net")
-// .httpClient(HttpClient.createDefault().proxy(() -> new ProxyOptions(Type.HTTP, new InetSocketAddress("localhost", 8888))))
- .buildClient();
-
- containerClient = storageClient.getContainerClient("testcontainer-10g");
- if (!containerClient.exists().value()) {
- containerClient.create();
- }
- }
-
- //@Test
- public void uploadLargeBlockBlob() throws Exception {
- BlockBlobClient blockBlobClient = containerClient.getBlockBlobClient("testblob" + RANDOM.nextInt(1000));
- blockBlobClient.uploadFromFile(FILE_PATH);
- }
-
- //@Test
- public void downloadLargeBlockBlob() throws Exception {
- OffsetDateTime start = OffsetDateTime.now();
- BlockBlobClient blockBlobClient = containerClient.getBlockBlobClient("testblob-10g");
-// blockBlobClient.uploadFromFile(filePath);
-// File uploadedFile = new File(filePath);
-// System.out.println("Upload " + uploadedFile.length() + " bytes took " + Duration.between(start, OffsetDateTime.now()).getSeconds() + " seconds");
-// start = OffsetDateTime.now();
- String downloadName = "D:\\10g-downloaded.dat";
- File downloaded = new File(downloadName);
- downloaded.createNewFile();
- blockBlobClient.downloadToFile(downloadName);
- System.out.println("Download " + downloaded.length() + " bytes took " + Duration.between(start, OffsetDateTime.now()).getSeconds() + " seconds");
- downloaded.delete();
- }
-}
diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/PageBlobAPITest.groovy b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/PageBlobAPITest.groovy
index eee346b007288..d0d26275228fa 100644
--- a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/PageBlobAPITest.groovy
+++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/PageBlobAPITest.groovy
@@ -40,7 +40,7 @@ class PageBlobAPITest extends APISpec {
def "Create sequence number"() {
when:
bu.create(PageBlobClient.PAGE_BYTES, 2, null, null,
- null, null)
+ null, null)
then:
Integer.parseInt(bu.getProperties().headers().value("x-ms-blob-sequence-number")) == 2
@@ -50,24 +50,27 @@ class PageBlobAPITest extends APISpec {
def "Create headers"() {
setup:
BlobHTTPHeaders headers = new BlobHTTPHeaders().blobCacheControl(cacheControl)
- .blobContentDisposition(contentDisposition)
- .blobContentEncoding(contentEncoding)
- .blobContentLanguage(contentLanguage)
- .blobContentMD5(contentMD5)
- .blobContentType(contentType)
+ .blobContentDisposition(contentDisposition)
+ .blobContentEncoding(contentEncoding)
+ .blobContentLanguage(contentLanguage)
+ .blobContentMD5(contentMD5)
+ .blobContentType(contentType)
when:
bu.create(PageBlobClient.PAGE_BYTES, null, headers, null, null, null)
Response response = bu.getProperties(null, null)
+ // If the value isn't set the service will automatically set it
+ contentType = (contentType == null) ? "application/octet-stream" : contentType
+
then:
validateBlobProperties(response, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentMD5, contentType)
where:
cacheControl | contentDisposition | contentEncoding | contentLanguage | contentMD5 | contentType
null | null | null | null | null | null
- // "control" | "disposition" | "encoding" | "language" | Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(defaultData.array())) | "type" TODO (alzimmer): Determine why getProperties returns null
+ "control" | "disposition" | "encoding" | "language" | Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(defaultData.array())) | "type"
}
@Unroll
@@ -109,7 +112,7 @@ class PageBlobAPITest extends APISpec {
expect:
bu.create(PageBlobClient.PAGE_BYTES, null, null, null, bac, null)
- .statusCode() == 201
+ .statusCode() == 201
where:
modified | unmodified | match | noneMatch | leaseID
@@ -159,7 +162,7 @@ class PageBlobAPITest extends APISpec {
def "Upload page"() {
when:
Response response = bu.uploadPages(new PageRange().start(0).end(PageBlobClient.PAGE_BYTES - 1),
- new ByteArrayInputStream(getRandomByteArray(PageBlobClient.PAGE_BYTES)))
+ new ByteArrayInputStream(getRandomByteArray(PageBlobClient.PAGE_BYTES)))
then:
response.statusCode() == 201
@@ -306,7 +309,7 @@ class PageBlobAPITest extends APISpec {
when:
destURL.uploadPagesFromURL(new PageRange().start(0).end(PageBlobClient.PAGE_BYTES * 2 - 1),
- sourceURL.getBlobUrl(), PageBlobClient.PAGE_BYTES * 2)
+ sourceURL.getBlobUrl(), PageBlobClient.PAGE_BYTES * 2)
then:
ByteArrayOutputStream outputStream = new ByteArrayOutputStream()
@@ -323,8 +326,8 @@ class PageBlobAPITest extends APISpec {
thrown(IllegalArgumentException)
where:
- sourceOffset | range
- (Long)PageBlobClient.PAGE_BYTES | null
+ sourceOffset | range
+ (Long) PageBlobClient.PAGE_BYTES | null
}
def "Upload page from URL MD5"() {
@@ -354,14 +357,14 @@ class PageBlobAPITest extends APISpec {
when:
destURL.uploadPagesFromURL(pageRange, bu.getBlobUrl(), null,
- MessageDigest.getInstance("MD5").digest("garbage".getBytes()), null, null, null)
+ MessageDigest.getInstance("MD5").digest("garbage".getBytes()), null, null, null)
then:
thrown(StorageException)
}
@Unroll
- def "Upload page from URL destination AC"() {
+ def "Upload page from URL destination AC"() {
setup:
cu.setAccessPolicy(PublicAccessType.CONTAINER, null, null, null)
def sourceURL = cu.getPageBlobClient(generateBlobName())
@@ -504,7 +507,7 @@ class PageBlobAPITest extends APISpec {
Response response = bu.clearPages(new PageRange().start(0).end(PageBlobClient.PAGE_BYTES - 1))
then:
- !bu.getPageRanges(new BlobRange(0)).iterator().hasNext()
+ bu.getPageRanges(new BlobRange(0)).value().pageRange().size() == 0
validateBasicHeaders(response.headers())
response.value().contentMD5() == null
response.value().blobSequenceNumber() == 0
@@ -536,7 +539,7 @@ class PageBlobAPITest extends APISpec {
expect:
bu.clearPages(new PageRange().start(0).end(PageBlobClient.PAGE_BYTES - 1), pac, null)
- .statusCode() == 201
+ .statusCode() == 201
where:
modified | unmodified | match | noneMatch | leaseID | sequenceNumberLT | sequenceNumberLTE | sequenceNumberEqual
@@ -600,21 +603,20 @@ class PageBlobAPITest extends APISpec {
thrown(StorageException)
}
- // TODO (alzimmer): Turn this on once paged responses become available
- /*def "Get page ranges"() {
+ def "Get page ranges"() {
setup:
bu.uploadPages(new PageRange().start(0).end(PageBlobClient.PAGE_BYTES - 1),
new ByteArrayInputStream(getRandomByteArray(PageBlobClient.PAGE_BYTES)))
when:
- Iterable response = bu.getPageRanges(new BlobRange(0, PageBlobClient.PAGE_BYTES))
+ Response response = bu.getPageRanges(new BlobRange(0, PageBlobClient.PAGE_BYTES))
then:
response.statusCode() == 200
- response.body().pageRange().size() == 1
- validateBasicHeaders(headers)
- headers.blobContentLength() == (long) PageBlobClient.PAGE_BYTES
- }*/
+ response.value().pageRange().size() == 1
+ validateBasicHeaders(response.headers())
+ Integer.parseInt(response.headers().value("x-ms-blob-content-length")) == PageBlobClient.PAGE_BYTES
+ }
def "Get page ranges min"() {
when:
@@ -639,7 +641,7 @@ class PageBlobAPITest extends APISpec {
when:
- bu.getPageRanges(new BlobRange(0, PageBlobClient.PAGE_BYTES), bac, null).iterator().hasNext()
+ bu.getPageRanges(new BlobRange(0, PageBlobClient.PAGE_BYTES), bac, null)
then:
notThrown(StorageException)
@@ -666,7 +668,7 @@ class PageBlobAPITest extends APISpec {
.ifNoneMatch(setupBlobMatchCondition(bu, noneMatch)))
when:
- bu.getPageRanges(new BlobRange(0, PageBlobClient.PAGE_BYTES), bac, null).iterator().hasNext()
+ bu.getPageRanges(new BlobRange(0, PageBlobClient.PAGE_BYTES), bac, null)
then:
thrown(StorageException)
@@ -685,14 +687,13 @@ class PageBlobAPITest extends APISpec {
bu = cu.getPageBlobClient(generateBlobName())
when:
- bu.getPageRanges(null).iterator().hasNext()
+ bu.getPageRanges(null)
then:
thrown(StorageException)
}
- // TODO (alzimmer): This test needs to be restructured to support the Iterable T return
- /*def "Get page ranges diff"() {
+ def "Get page ranges diff"() {
setup:
bu.create(PageBlobClient.PAGE_BYTES * 2)
@@ -707,19 +708,18 @@ class PageBlobAPITest extends APISpec {
bu.clearPages(new PageRange().start(PageBlobClient.PAGE_BYTES).end(PageBlobClient.PAGE_BYTES * 2 - 1))
when:
- Iterable response = bu.getPageRangesDiff(new BlobRange(0, PageBlobClient.PAGE_BYTES * 2), snapshot)
- PageBlobGetPageRangesDiffHeaders headers = response.headers()
+ Response response = bu.getPageRangesDiff(new BlobRange(0, PageBlobClient.PAGE_BYTES * 2), snapshot)
then:
- response.body().pageRange().size() == 1
- response.body().pageRange().get(0).start() == 0
- response.body().pageRange().get(0).end() == PageBlobClient.PAGE_BYTES - 1
- response.body().clearRange().size() == 1
- response.body().clearRange().get(0).start() == PageBlobClient.PAGE_BYTES
- response.body().clearRange().get(0).end() == PageBlobClient.PAGE_BYTES * 2 - 1
- validateBasicHeaders(headers)
- headers.blobContentLength() == PageBlobClient.PAGE_BYTES * 2
- }*/
+ response.value().pageRange().size() == 1
+ response.value().pageRange().get(0).start() == 0
+ response.value().pageRange().get(0).end() == PageBlobClient.PAGE_BYTES - 1
+ response.value().clearRange().size() == 1
+ response.value().clearRange().get(0).start() == PageBlobClient.PAGE_BYTES
+ response.value().clearRange().get(0).end() == PageBlobClient.PAGE_BYTES * 2 - 1
+ validateBasicHeaders(response.headers())
+ Integer.parseInt(response.headers().value("x-ms-blob-content-length")) == PageBlobClient.PAGE_BYTES * 2
+ }
def "Get page ranges diff min"() {
setup:
@@ -745,7 +745,7 @@ class PageBlobAPITest extends APISpec {
.ifNoneMatch(noneMatch))
when:
- bu.getPageRangesDiff(new BlobRange(0, PageBlobClient.PAGE_BYTES), snapshot, bac, null).iterator().hasNext()
+ bu.getPageRangesDiff(new BlobRange(0, PageBlobClient.PAGE_BYTES), snapshot, bac, null)
then:
notThrown(StorageException)
@@ -774,7 +774,7 @@ class PageBlobAPITest extends APISpec {
.ifNoneMatch(setupBlobMatchCondition(bu, noneMatch)))
when:
- bu.getPageRangesDiff(new BlobRange(0, PageBlobClient.PAGE_BYTES), snapshot, bac, null).iterator().hasNext()
+ bu.getPageRangesDiff(new BlobRange(0, PageBlobClient.PAGE_BYTES), snapshot, bac, null)
then:
thrown(StorageException)
@@ -793,7 +793,7 @@ class PageBlobAPITest extends APISpec {
bu = cu.getPageBlobClient(generateBlobName())
when:
- bu.getPageRangesDiff(null, "snapshot").iterator().hasNext()
+ bu.getPageRangesDiff(null, "snapshot")
then:
thrown(StorageException)
@@ -811,13 +811,13 @@ class PageBlobAPITest extends APISpec {
thrown(IllegalArgumentException)
where:
- start | end
- 1 | 1
- -PageBlobClient.PAGE_BYTES | PageBlobClient.PAGE_BYTES - 1
- 0 | 0
- 1 | PageBlobClient.PAGE_BYTES - 1
- 0 | PageBlobClient.PAGE_BYTES
- PageBlobClient.PAGE_BYTES * 2 | PageBlobClient.PAGE_BYTES - 1
+ start | end
+ 1 | 1
+ -PageBlobClient.PAGE_BYTES | PageBlobClient.PAGE_BYTES - 1
+ 0 | 0
+ 1 | PageBlobClient.PAGE_BYTES - 1
+ 0 | PageBlobClient.PAGE_BYTES
+ PageBlobClient.PAGE_BYTES * 2 | PageBlobClient.PAGE_BYTES - 1
}
def "Resize"() {
@@ -931,7 +931,7 @@ class PageBlobAPITest extends APISpec {
expect:
bu.updateSequenceNumber(SequenceNumberActionType.UPDATE, 1, bac, null)
- .statusCode() == 200
+ .statusCode() == 200
where:
modified | unmodified | match | noneMatch | leaseID
diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/ProgressReporterTest.groovy b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/ProgressReporterTest.groovy
new file mode 100644
index 0000000000000..773d552ea3197
--- /dev/null
+++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/ProgressReporterTest.groovy
@@ -0,0 +1,109 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package com.azure.storage.blob
+
+import io.netty.buffer.ByteBuf
+import io.netty.buffer.Unpooled
+import reactor.core.publisher.Flux
+
+import java.nio.ByteBuffer
+import java.util.concurrent.atomic.AtomicLong
+import java.util.concurrent.locks.ReentrantLock
+
+class ProgressReporterTest extends APISpec {
+ def "Report progress sequential"() {
+ setup:
+ ByteBuffer buf1 = getRandomData(10)
+ ByteBuffer buf2 = getRandomData(15)
+ ByteBuffer buf3 = getRandomData(5)
+
+ IProgressReceiver mockReceiver = Mock(IProgressReceiver)
+
+ Flux data = Flux.just(buf1, buf2, buf3)
+ data = ProgressReporter.addProgressReporting(data, mockReceiver)
+
+ when:
+ data.subscribe()
+ data.subscribe() // Subscribing twice enforces invocation of rewind
+
+ then:
+ // The same benchmarks should be reported on each subscription (retry). We should never go over total data size.
+ 2 * mockReceiver.reportProgress(10)
+ 2 * mockReceiver.reportProgress(25)
+ 2 * mockReceiver.reportProgress(30)
+ 0 * mockReceiver.reportProgress({it > 30})
+ }
+
+ def "Report progress sequential network test"() {
+ setup:
+ IProgressReceiver mockReceiver = Mock(IProgressReceiver)
+
+ ByteBuffer buffer = getRandomData(1 * 1024 * 1024)
+ Flux data = ProgressReporter.addProgressReporting(Flux.just(buffer), mockReceiver)
+ .map({ it -> Unpooled.wrappedBuffer(it) })
+
+ when:
+ BlockBlobAsyncClient bu = new BlobClientBuilder()
+ .endpoint(cu.getContainerUrl().toString())
+ .blobName(generateBlobName())
+ .credential(primaryCreds)
+ .buildBlockBlobAsyncClient()
+ bu.upload(data, buffer.remaining()).block()
+
+ then:
+ /*
+ With the HTTP client, etc. involved, the best we can guarantee is that it's called once with the total. There
+ may or may not be any intermediary calls. This test mostly looks to validate that there is no interference
+ with actual network calls.
+ */
+ 1 * mockReceiver.reportProgress(1 * 1024 * 1024)
+ }
+
+ def "Report progress parallel"() {
+ setup:
+ ByteBuffer buf1 = getRandomData(10)
+ ByteBuffer buf2 = getRandomData(15)
+ ByteBuffer buf3 = getRandomData(5)
+
+ ReentrantLock lock = new ReentrantLock()
+ AtomicLong totalProgress = new AtomicLong(0)
+
+ IProgressReceiver mockReceiver = Mock(IProgressReceiver)
+ Flux data = Flux.just(buf1, buf2, buf3)
+ Flux data2 = Flux.just(buf3, buf2, buf1)
+ data = ProgressReporter.addParallelProgressReporting(data, mockReceiver, lock, totalProgress)
+ data2 = ProgressReporter.addParallelProgressReporting(data2, mockReceiver, lock, totalProgress)
+
+ when:
+ data.subscribe()
+ data2.subscribe()
+ data.subscribe()
+ data2.subscribe()
+
+ sleep(3000) // These Fluxes should complete quickly, but we don't want to block or it'll order everything
+
+ then:
+ /*
+ There should be at least one call reporting the total length of the data. There may be two if both data and
+ data2 complete before the second batch of subscriptions
+ */
+ (1..2) * mockReceiver.reportProgress(60)
+
+ /*
+ There should be 12 calls total, but either one or two of them could be reporting the total length, so we
+ can only guarantee four calls with an unknown parameter. This test doesn't strictly mimic the network as
+ there would never be concurrent subscriptions to the same Flux as may be the case here, but it is good
+ enough.
+ */
+ (10..11) * mockReceiver.reportProgress(_)
+
+ /*
+ We should never report more progress than the 60 total (30 from each Flux--Resubscribing is a retry and
+ therefore rewinds).
+ */
+ 0 * mockReceiver.reportProgress({it > 60})
+ }
+
+ // See TransferManagerTest for network tests of the parallel ProgressReporter.
+}
diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/RequestRetryTestFactory.java b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/RequestRetryTestFactory.java
new file mode 100644
index 0000000000000..a0b3c2d69e847
--- /dev/null
+++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/RequestRetryTestFactory.java
@@ -0,0 +1,430 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package com.azure.storage.blob;
+
+import com.azure.core.http.HttpClient;
+import com.azure.core.http.HttpHeaders;
+import com.azure.core.http.HttpMethod;
+import com.azure.core.http.HttpPipelineBuilder;
+import com.azure.core.http.HttpRequest;
+import com.azure.core.http.HttpResponse;
+import com.azure.core.http.ProxyOptions;
+import com.azure.core.implementation.http.UrlBuilder;
+import com.azure.storage.common.policy.RequestRetryOptions;
+import com.azure.storage.common.policy.RequestRetryPolicy;
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import reactor.core.Disposable;
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.nio.charset.Charset;
+import java.time.Duration;
+import java.time.OffsetDateTime;
+import java.time.temporal.ChronoUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.function.Supplier;
+
+import static java.lang.StrictMath.pow;
+
+class RequestRetryTestFactory {
+ static final int RETRY_TEST_SCENARIO_RETRY_UNTIL_SUCCESS = 1;
+
+ static final int RETRY_TEST_SCENARIO_RETRY_UNTIL_MAX_RETRIES = 2;
+
+ static final int RETRY_TEST_SCENARIO_NON_RETRYABLE = 3;
+
+ static final int RETRY_TEST_SCENARIO_NON_RETRYABLE_SECONDARY = 4;
+
+ static final int RETRY_TEST_SCENARIO_NETWORK_ERROR = 5;
+
+ static final int RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING = 6;
+
+ static final int RETRY_TEST_SCENARIO_FIXED_TIMING = 7;
+
+ static final int RETRY_TEST_SCENARIO_TRY_TIMEOUT = 8;
+
+ static final int RETRY_TEST_SCENARIO_NON_REPLAYABLE_FLOWABLE = 9;
+
+ // Cancelable
+
+ static final String RETRY_TEST_PRIMARY_HOST = "PrimaryDC";
+
+ static final String RETRY_TEST_SECONDARY_HOST = "SecondaryDC";
+ static final ByteBuf RETRY_TEST_DEFAULT_DATA = Unpooled.wrappedBuffer("Default data".getBytes());
+ private static final String RETRY_TEST_HEADER = "TestHeader";
+ private static final String RETRY_TEST_QUERY_PARAM = "TestQueryParam";
+ private static final Mono RETRY_TEST_OK_RESPONSE = Mono.just(new RetryTestResponse(200));
+
+ /*
+ We wrap the response in a StorageErrorException to mock the HttpClient. Any responses that the HttpClient receives
+ that is not an expected response is wrapped in a StorageErrorException.
+ */
+ private static final Mono RETRY_TEST_TEMPORARY_ERROR_RESPONSE = Mono.just(new RetryTestResponse(503));
+
+ private static final Mono RETRY_TEST_TIMEOUT_ERROR_RESPONSE = Mono.just(new RetryTestResponse(500));
+
+ private static final Mono RETRY_TEST_NON_RETRYABLE_ERROR = Mono.just(new RetryTestResponse(400));
+
+ private static final Mono RETRY_TEST_NOT_FOUND_RESPONSE = Mono.just(new RetryTestResponse(404));
+
+ private int retryTestScenario;
+
+ private RequestRetryOptions options;
+
+ /*
+ It is atypical and not recommended to have mutable state on the factory itself. However, the tests will need to
+ be able to validate the number of tries, and the tests will not have access to the policies, so we break our own
+ rule here.
+ */
+ private int tryNumber;
+
+ private OffsetDateTime time;
+
+ RequestRetryTestFactory(int scenario, RequestRetryOptions options) {
+ this.retryTestScenario = scenario;
+ this.options = options;
+ }
+
+ Mono send(URL url) {
+ return new HttpPipelineBuilder()
+ .policies(new RequestRetryPolicy(this.options))
+ .httpClient(new RetryTestClient(this))
+ .build()
+ .send(new HttpRequest(HttpMethod.GET, url).body(Flux.just(RETRY_TEST_DEFAULT_DATA)));
+ }
+
+ int getTryNumber() {
+ return this.tryNumber;
+ }
+
+ // The retry factory only really cares about the status code.
+ private static final class RetryTestResponse extends HttpResponse {
+ int statusCode;
+
+ RetryTestResponse(int statusCode) {
+ this.statusCode = statusCode;
+ }
+
+ @Override
+ public int statusCode() {
+ return this.statusCode;
+ }
+
+ @Override
+ public String headerValue(String headerName) {
+ return null;
+ }
+
+ @Override
+ public HttpHeaders headers() {
+ return null;
+ }
+
+ @Override
+ public Flux body() {
+ return null;
+ }
+
+ @Override
+ public Mono bodyAsByteArray() {
+ return null;
+ }
+
+ @Override
+ public Mono bodyAsString() {
+ return null;
+ }
+
+ @Override
+ public Mono bodyAsString(Charset charset) {
+ return null;
+ }
+ }
+
+ private final class RetryTestClient implements HttpClient {
+ private RequestRetryTestFactory factory;
+
+ RetryTestClient(RequestRetryTestFactory parent) {
+ this.factory = parent;
+ }
+
+ @Override
+ public Mono send(HttpRequest request) {
+ this.factory.tryNumber++;
+ if (this.factory.tryNumber > this.factory.options.maxTries()) {
+ throw new IllegalArgumentException("Try number has exceeded max tries");
+ }
+
+ // Validate the expected preconditions for each try: The correct host is used.
+ String expectedHost = RETRY_TEST_PRIMARY_HOST;
+ if (this.factory.tryNumber % 2 == 0) {
+ /*
+ Special cases: retry until success scenario fail's on the 4th try with a 404 on the secondary, so we
+ never expect it to check the secondary after that. All other tests should continue to check the
+ secondary.
+ Exponential timing only tests secondary backoff once but uses the rest of the retries to hit the max
+ delay.
+ */
+ if (!((this.factory.retryTestScenario == RequestRetryTestFactory.RETRY_TEST_SCENARIO_RETRY_UNTIL_SUCCESS && this.factory.tryNumber > 4)
+ || (this.factory.retryTestScenario == RequestRetryTestFactory.RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING && this.factory.tryNumber > 2))) {
+ expectedHost = RETRY_TEST_SECONDARY_HOST;
+ }
+ }
+
+ if (!request.url().getHost().equals(expectedHost)) {
+ throw new IllegalArgumentException("The host does not match the expected host");
+ }
+
+ /*
+ This policy will add test headers and query parameters. Ensure they are removed/reset for each retry.
+ The retry policy should be starting with a fresh copy of the request for every try.
+ */
+ if (request.headers().value(RETRY_TEST_HEADER) != null) {
+ throw new IllegalArgumentException("Headers not reset.");
+ }
+ if ((request.url().getQuery() != null && request.url().getQuery().contains(RETRY_TEST_QUERY_PARAM))) {
+ throw new IllegalArgumentException("Query params not reset.");
+ }
+
+ // Subscribe and block until all information is read to prevent a blocking on another thread exception from Reactor.
+ ByteBuf buf = Unpooled.buffer();
+ Disposable disposable = request.body().subscribe(buf::writeBytes);
+ while (!disposable.isDisposed()) {
+ System.out.println("Waiting for Flux to finish to prevent blocking on another thread exception");
+ }
+ if (RETRY_TEST_DEFAULT_DATA.compareTo(buf) != 0) {
+ throw new IllegalArgumentException(("Body not reset."));
+ }
+
+ /*
+ Modify the request as policies downstream of the retry policy are likely to do. These must be reset on each
+ try.
+ */
+ request.headers().put(RETRY_TEST_HEADER, "testheader");
+ UrlBuilder builder = UrlBuilder.parse(request.url());
+ builder.setQueryParameter(RETRY_TEST_QUERY_PARAM, "testquery");
+ try {
+ request.url(builder.toURL());
+ } catch (MalformedURLException e) {
+ throw new IllegalArgumentException("The URL has been mangled");
+ }
+
+ switch (this.factory.retryTestScenario) {
+ case RETRY_TEST_SCENARIO_RETRY_UNTIL_SUCCESS:
+ switch (this.factory.tryNumber) {
+ case 1:
+ /*
+ The timer is set with a timeout on the Mono used to make the request. If the Mono
+ doesn't return success fast enough, it will throw a TimeoutException. We can short circuit
+ the waiting by simply returning an error. We will validate the time parameter later. Here,
+ we just test that a timeout is retried.
+ */
+ return Mono.error(new TimeoutException());
+ case 2:
+ return RETRY_TEST_TEMPORARY_ERROR_RESPONSE;
+ case 3:
+ return RETRY_TEST_TIMEOUT_ERROR_RESPONSE;
+ case 4:
+ /*
+ By returning 404 when we should be testing against the secondary, we exercise the logic
+ that should prevent further tries to secondary when the secondary evidently doesn't have the
+ data.
+ */
+ return RETRY_TEST_NOT_FOUND_RESPONSE;
+ case 5:
+ // Just to get to a sixth try where we ensure we should not be trying the secondary again.
+ return RETRY_TEST_TEMPORARY_ERROR_RESPONSE;
+ case 6:
+ return RETRY_TEST_OK_RESPONSE;
+ default:
+ throw new IllegalArgumentException("Continued trying after success.");
+ }
+
+ case RETRY_TEST_SCENARIO_RETRY_UNTIL_MAX_RETRIES:
+ return RETRY_TEST_TEMPORARY_ERROR_RESPONSE;
+
+ case RETRY_TEST_SCENARIO_NON_RETRYABLE:
+ if (this.factory.tryNumber == 1) {
+ return RETRY_TEST_NON_RETRYABLE_ERROR;
+ } else {
+ throw new IllegalArgumentException("Continued trying after non retryable error.");
+ }
+
+ case RETRY_TEST_SCENARIO_NON_RETRYABLE_SECONDARY:
+ switch (this.factory.tryNumber) {
+ case 1:
+ return RETRY_TEST_TEMPORARY_ERROR_RESPONSE;
+ case 2:
+ return RETRY_TEST_NON_RETRYABLE_ERROR;
+ default:
+ throw new IllegalArgumentException("Continued trying after non retryable error.");
+ }
+
+ case RETRY_TEST_SCENARIO_NETWORK_ERROR:
+ switch (this.factory.tryNumber) {
+ case 1:
+ // fall through
+ case 2:
+ return Mono.error(new IOException());
+ case 3:
+ return RETRY_TEST_OK_RESPONSE;
+ default:
+ throw new IllegalArgumentException("Continued retrying after success.");
+ }
+
+ case RETRY_TEST_SCENARIO_TRY_TIMEOUT:
+ switch (this.factory.tryNumber) {
+ case 1:
+ case 2:
+ return RETRY_TEST_OK_RESPONSE.delaySubscription(Duration.ofSeconds(options.tryTimeout() + 1));
+ case 3:
+ return RETRY_TEST_OK_RESPONSE.delaySubscription(Duration.ofSeconds(options.tryTimeout() - 1));
+ default:
+ throw new IllegalArgumentException("Continued retrying after success");
+ }
+
+ case RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING:
+ switch (this.factory.tryNumber) {
+ case 1:
+ this.factory.time = OffsetDateTime.now();
+ return RETRY_TEST_TEMPORARY_ERROR_RESPONSE;
+ case 2:
+ /*
+ Calculation for secondary is always the same, so we don't need to keep testing it. Not
+ trying the secondary any more will also speed up the test.
+ */
+ return testDelayBounds(1, false, RETRY_TEST_NOT_FOUND_RESPONSE);
+ case 3:
+ return testDelayBounds(2, true, RETRY_TEST_TEMPORARY_ERROR_RESPONSE);
+ case 4:
+ return testDelayBounds(3, true, RETRY_TEST_TEMPORARY_ERROR_RESPONSE);
+ case 5:
+ /*
+ With the current configuration in RetryTest, the maxRetryDelay should be reached upon the
+ fourth try to the primary.
+ */
+ return testMaxDelayBounds(RETRY_TEST_TEMPORARY_ERROR_RESPONSE);
+ case 6:
+ return testMaxDelayBounds(RETRY_TEST_OK_RESPONSE);
+ default:
+ throw new IllegalArgumentException("Max retries exceeded/continued retrying after success");
+ }
+
+ case RETRY_TEST_SCENARIO_FIXED_TIMING:
+ switch (this.factory.tryNumber) {
+ case 1:
+ this.factory.time = OffsetDateTime.now();
+ return RETRY_TEST_TEMPORARY_ERROR_RESPONSE;
+ case 2:
+ return testDelayBounds(1, false, RETRY_TEST_TEMPORARY_ERROR_RESPONSE);
+ case 3:
+ return testDelayBounds(2, true, RETRY_TEST_TEMPORARY_ERROR_RESPONSE);
+ case 4:
+ /*
+ Fixed backoff means it's always the same and we never hit the max, no need to keep testing.
+ */
+ return RETRY_TEST_OK_RESPONSE;
+ default:
+ throw new IllegalArgumentException("Retries continued after success.");
+ }
+
+ case RETRY_TEST_SCENARIO_NON_REPLAYABLE_FLOWABLE:
+ switch (this.factory.tryNumber) {
+ case 1:
+ return RETRY_TEST_TEMPORARY_ERROR_RESPONSE;
+ case 2:
+ return Mono.error(new UnexpectedLengthException("Unexpected length", 5, 6));
+ default:
+ throw new IllegalArgumentException("Retries continued on non retryable error.");
+ }
+ default:
+ throw new IllegalArgumentException("Invalid retry test scenario.");
+ }
+ }
+
+ @Override
+ public HttpClient proxy(Supplier supplier) {
+ return null;
+ }
+
+ @Override
+ public HttpClient wiretap(boolean b) {
+ return null;
+ }
+
+ @Override
+ public HttpClient port(int i) {
+ return null;
+ }
+
+ /*
+ Calculate the delay in seconds. Round up to ensure we include the maximum value and some offset for the code
+ executing between the original calculation in the retry policy and this check.
+ */
+ private long calcPrimaryDelay(int tryNumber) {
+ switch (this.factory.retryTestScenario) {
+ case RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING:
+ return (long) Math.ceil(
+ ((pow(2L, tryNumber - 1) - 1L) * this.factory.options.retryDelayInMs()) / 1000);
+ case RETRY_TEST_SCENARIO_FIXED_TIMING:
+ return (long) Math.ceil(this.factory.options.retryDelayInMs() / 1000);
+ default:
+ throw new IllegalArgumentException("Invalid test scenario");
+ }
+ }
+
+ private OffsetDateTime calcUpperBound(OffsetDateTime start, int primaryTryNumber, boolean tryingPrimary) {
+ if (tryingPrimary) {
+ return start.plus(calcPrimaryDelay(primaryTryNumber) * 1000 + 500, ChronoUnit.MILLIS);
+ } else {
+ return start.plus(1400, ChronoUnit.MILLIS);
+ }
+ }
+
+ private OffsetDateTime calcLowerBound(OffsetDateTime start, int primaryTryNumber, boolean tryingPrimary) {
+ if (tryingPrimary) {
+ return start.plus(calcPrimaryDelay(primaryTryNumber) * 1000 - 500, ChronoUnit.MILLIS);
+ } else {
+ return start.plus(700, ChronoUnit.MILLIS);
+ }
+ }
+
+ private Mono testDelayBounds(int primaryTryNumber, boolean tryingPrimary, Mono response) {
+ /*
+ We have to return a new Mono so that the calculation for time is performed at the correct time, i.e. when
+ the Mono is actually subscribed to. This mocks an HttpClient because the requests are made only when
+ the Mono is subscribed to, not when all the infrastructure around it is put in place, and we care about
+ the delay before the request itself.
+ */
+ return Mono.defer(() -> Mono.fromCallable(() -> {
+ OffsetDateTime now = OffsetDateTime.now();
+ if (now.isAfter(calcUpperBound(factory.time, primaryTryNumber, tryingPrimary))
+ || now.isBefore(calcLowerBound(factory.time, primaryTryNumber, tryingPrimary))) {
+ throw new IllegalArgumentException("Delay was not within jitter bounds");
+ }
+
+ factory.time = now;
+ return response.block();
+ }));
+ }
+
+ private Mono testMaxDelayBounds(Mono response) {
+ return Mono.defer(() -> Mono.fromCallable(() -> {
+ OffsetDateTime now = OffsetDateTime.now();
+ if (now.isAfter(factory.time.plusSeconds((long) Math.ceil((factory.options.maxRetryDelayInMs() / 1000) + 1)))) {
+ throw new IllegalArgumentException("Max retry delay exceeded");
+ } else if (now.isBefore(factory.time.plusSeconds((long) Math.ceil((factory.options.maxRetryDelayInMs() / 1000) - 1)))) {
+ throw new IllegalArgumentException("Retry did not delay long enough");
+ }
+
+ factory.time = now;
+ return response.block();
+ }));
+ }
+ }
+}
diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/RetryTest.groovy b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/RetryTest.groovy
new file mode 100644
index 0000000000000..ab5f9aabd8726
--- /dev/null
+++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/RetryTest.groovy
@@ -0,0 +1,144 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package com.azure.storage.blob
+
+
+import com.azure.core.http.HttpResponse
+import com.azure.storage.common.policy.RequestRetryOptions
+import com.azure.storage.common.policy.RetryPolicyType
+import spock.lang.Unroll
+// Tests for package-private functionality.
+class RetryTest extends APISpec {
+ static URL retryTestURL = new URL("https://" + RequestRetryTestFactory.RETRY_TEST_PRIMARY_HOST)
+ static RequestRetryOptions retryTestOptions = new RequestRetryOptions(RetryPolicyType.EXPONENTIAL, 6, 2,
+ 1000L, 4000L, RequestRetryTestFactory.RETRY_TEST_SECONDARY_HOST)
+
+ def "Retries until success"() {
+ setup:
+ RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory(RequestRetryTestFactory.RETRY_TEST_SCENARIO_RETRY_UNTIL_SUCCESS, retryTestOptions)
+
+ when:
+ HttpResponse response = retryTestFactory.send(retryTestURL).block()
+
+ then:
+ response.statusCode() == 200
+ retryTestFactory.getTryNumber() == 6
+ }
+
+ def "Retries until max retries"() {
+ setup:
+ RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory(RequestRetryTestFactory.RETRY_TEST_SCENARIO_RETRY_UNTIL_MAX_RETRIES, retryTestOptions)
+
+ when:
+ HttpResponse response = retryTestFactory.send(retryTestURL).block()
+
+ then:
+ response.statusCode() == 503
+ retryTestFactory.getTryNumber() == retryTestOptions.maxTries()
+ }
+
+ def "Retries non retryable"() {
+ setup:
+ RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory(RequestRetryTestFactory.RETRY_TEST_SCENARIO_NON_RETRYABLE, retryTestOptions)
+
+ when:
+ HttpResponse response = retryTestFactory.send(retryTestURL).block()
+
+ then:
+ response.statusCode() == 400
+ retryTestFactory.getTryNumber() == 1
+ }
+
+ def "Retries non retryable secondary"() {
+ setup:
+ RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory(RequestRetryTestFactory.RETRY_TEST_SCENARIO_NON_RETRYABLE_SECONDARY, retryTestOptions)
+
+ when:
+ HttpResponse response = retryTestFactory.send(retryTestURL).block()
+
+ then:
+ response.statusCode() == 400
+ retryTestFactory.getTryNumber() == 2
+ }
+
+ def "Retries network error"() {
+ setup:
+ RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory(RequestRetryTestFactory.RETRY_TEST_SCENARIO_NETWORK_ERROR, retryTestOptions)
+
+ when:
+ HttpResponse response = retryTestFactory.send(retryTestURL).block()
+
+ then:
+ response.statusCode() == 200
+ retryTestFactory.getTryNumber() == 3
+ }
+
+ def "Retries try timeout"() {
+ setup:
+ RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory(RequestRetryTestFactory.RETRY_TEST_SCENARIO_TRY_TIMEOUT, retryTestOptions)
+
+ when:
+ HttpResponse response = retryTestFactory.send(retryTestURL).block()
+
+ then:
+ response.statusCode() == 200
+ retryTestFactory.getTryNumber() == 3
+ }
+
+ def "Retries exponential delay"() {
+ setup:
+ RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory(RequestRetryTestFactory.RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING, retryTestOptions)
+
+ when:
+ HttpResponse response = retryTestFactory.send(retryTestURL).block()
+
+ then:
+ response.statusCode() == 200
+ retryTestFactory.getTryNumber() == 6
+ }
+
+ def "Retries fixed delay"() {
+ setup:
+ RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory(RequestRetryTestFactory.RETRY_TEST_SCENARIO_FIXED_TIMING, retryTestOptions)
+
+ when:
+ HttpResponse response = retryTestFactory.send(retryTestURL).block()
+
+ then:
+ response.statusCode() == 200
+ retryTestFactory.getTryNumber() == 4
+ }
+
+ def "Retries non replyable flux"() {
+ setup:
+ RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory(RequestRetryTestFactory.RETRY_TEST_SCENARIO_NON_REPLAYABLE_FLOWABLE, retryTestOptions)
+
+ when:
+ retryTestFactory.send(retryTestURL).block()
+
+ then:
+ def e = thrown(IllegalStateException)
+ e.getMessage().startsWith("The request failed because")
+ e.getCause() instanceof UnexpectedLengthException
+ }
+
+ @Unroll
+ def "Retries options invalid"() {
+ when:
+ new RequestRetryOptions(null, maxTries, tryTimeout, retryDelayInMs, maxRetryDelayInMs, null)
+
+ then:
+ thrown(IllegalArgumentException)
+
+ where:
+ maxTries | tryTimeout | retryDelayInMs | maxRetryDelayInMs
+ 0 | null | null | null
+ null | 0 | null | null
+ null | null | 0 | 1
+ null | null | 1 | 0
+ null | null | null | 1
+ null | null | 1 | null
+ null | null | 5 | 4
+ }
+}
diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/SASTest.groovy b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/SASTest.groovy
index 705453b54968d..9a7cab385f2e6 100644
--- a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/SASTest.groovy
+++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/SASTest.groovy
@@ -3,10 +3,10 @@
package com.azure.storage.blob
+import com.azure.core.http.policy.HttpLogDetailLevel
import com.azure.storage.blob.models.AccessPolicy
import com.azure.storage.blob.models.BlobRange
import com.azure.storage.blob.models.SignedIdentifier
-import com.azure.storage.blob.models.StorageErrorCode
import com.azure.storage.blob.models.UserDelegationKey
import com.azure.storage.common.credentials.SASTokenCredential
import com.azure.storage.common.credentials.SharedKeyCredential
@@ -17,24 +17,10 @@ import java.time.OffsetDateTime
import java.time.ZoneOffset
class SASTest extends APISpec {
-
- def "responseError"() {
- when:
- cu.listBlobsFlat()
-
- then:
- def e = thrown(StorageException)
- e.errorCode() == StorageErrorCode.INVALID_QUERY_PARAMETER_VALUE
- e.statusCode() == 400
- e.message().contains("Value for one of the query parameters specified in the request URI is invalid.")
- e.getMessage().contains("